ZTWHHH commited on
Commit
1fdac67
·
verified ·
1 Parent(s): 2339b66

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/_meta.cpython-310.pyc +0 -0
  2. pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/densenet.cpython-310.pyc +0 -0
  3. pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/efficientnet.cpython-310.pyc +0 -0
  4. pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/feature_extraction.cpython-310.pyc +0 -0
  5. pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/inception.cpython-310.pyc +0 -0
  6. pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/maxvit.cpython-310.pyc +0 -0
  7. pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/mnasnet.cpython-310.pyc +0 -0
  8. pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/mobilenet.cpython-310.pyc +0 -0
  9. pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/mobilenetv3.cpython-310.pyc +0 -0
  10. pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/regnet.cpython-310.pyc +0 -0
  11. pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/resnet.cpython-310.pyc +0 -0
  12. pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/shufflenetv2.cpython-310.pyc +0 -0
  13. pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/vision_transformer.cpython-310.pyc +0 -0
  14. pllava/lib/python3.10/site-packages/torchvision/models/_api.py +277 -0
  15. pllava/lib/python3.10/site-packages/torchvision/models/_meta.py +1554 -0
  16. pllava/lib/python3.10/site-packages/torchvision/models/feature_extraction.py +572 -0
  17. pllava/lib/python3.10/site-packages/torchvision/models/inception.py +478 -0
  18. pllava/lib/python3.10/site-packages/torchvision/models/maxvit.py +833 -0
  19. pllava/lib/python3.10/site-packages/torchvision/models/mnasnet.py +434 -0
  20. pllava/lib/python3.10/site-packages/torchvision/models/mobilenet.py +6 -0
  21. pllava/lib/python3.10/site-packages/torchvision/models/mobilenetv3.py +423 -0
  22. pllava/lib/python3.10/site-packages/torchvision/models/optical_flow/__init__.py +1 -0
  23. pllava/lib/python3.10/site-packages/torchvision/models/optical_flow/__pycache__/__init__.cpython-310.pyc +0 -0
  24. pllava/lib/python3.10/site-packages/torchvision/models/optical_flow/__pycache__/_utils.cpython-310.pyc +0 -0
  25. pllava/lib/python3.10/site-packages/torchvision/models/optical_flow/__pycache__/raft.cpython-310.pyc +0 -0
  26. pllava/lib/python3.10/site-packages/torchvision/models/optical_flow/_utils.py +48 -0
  27. pllava/lib/python3.10/site-packages/torchvision/models/optical_flow/raft.py +947 -0
  28. pllava/lib/python3.10/site-packages/torchvision/models/quantization/__init__.py +5 -0
  29. pllava/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/__init__.cpython-310.pyc +0 -0
  30. pllava/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/googlenet.cpython-310.pyc +0 -0
  31. pllava/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/inception.cpython-310.pyc +0 -0
  32. pllava/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/mobilenet.cpython-310.pyc +0 -0
  33. pllava/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/mobilenetv2.cpython-310.pyc +0 -0
  34. pllava/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/mobilenetv3.cpython-310.pyc +0 -0
  35. pllava/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/resnet.cpython-310.pyc +0 -0
  36. pllava/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/shufflenetv2.cpython-310.pyc +0 -0
  37. pllava/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/utils.cpython-310.pyc +0 -0
  38. pllava/lib/python3.10/site-packages/torchvision/models/quantization/googlenet.py +210 -0
  39. pllava/lib/python3.10/site-packages/torchvision/models/quantization/inception.py +273 -0
  40. pllava/lib/python3.10/site-packages/torchvision/models/quantization/mobilenet.py +6 -0
  41. pllava/lib/python3.10/site-packages/torchvision/models/quantization/mobilenetv2.py +154 -0
  42. pllava/lib/python3.10/site-packages/torchvision/models/quantization/mobilenetv3.py +237 -0
  43. pllava/lib/python3.10/site-packages/torchvision/models/quantization/resnet.py +484 -0
  44. pllava/lib/python3.10/site-packages/torchvision/models/quantization/shufflenetv2.py +427 -0
  45. pllava/lib/python3.10/site-packages/torchvision/models/quantization/utils.py +51 -0
  46. pllava/lib/python3.10/site-packages/torchvision/models/regnet.py +1571 -0
  47. pllava/lib/python3.10/site-packages/torchvision/models/resnet.py +985 -0
  48. pllava/lib/python3.10/site-packages/torchvision/models/segmentation/__init__.py +3 -0
  49. pllava/lib/python3.10/site-packages/torchvision/models/segmentation/__pycache__/__init__.cpython-310.pyc +0 -0
  50. pllava/lib/python3.10/site-packages/torchvision/models/segmentation/__pycache__/_utils.cpython-310.pyc +0 -0
pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/_meta.cpython-310.pyc ADDED
Binary file (19.4 kB). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/densenet.cpython-310.pyc ADDED
Binary file (14.2 kB). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/efficientnet.cpython-310.pyc ADDED
Binary file (29.2 kB). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/feature_extraction.cpython-310.pyc ADDED
Binary file (21.6 kB). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/inception.cpython-310.pyc ADDED
Binary file (14.4 kB). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/maxvit.cpython-310.pyc ADDED
Binary file (24.6 kB). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/mnasnet.cpython-310.pyc ADDED
Binary file (13.7 kB). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/mobilenet.cpython-310.pyc ADDED
Binary file (294 Bytes). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/mobilenetv3.cpython-310.pyc ADDED
Binary file (12 kB). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/regnet.cpython-310.pyc ADDED
Binary file (37.3 kB). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/resnet.cpython-310.pyc ADDED
Binary file (25.3 kB). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/shufflenetv2.cpython-310.pyc ADDED
Binary file (12.4 kB). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/__pycache__/vision_transformer.cpython-310.pyc ADDED
Binary file (21.1 kB). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/_api.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import fnmatch
2
+ import importlib
3
+ import inspect
4
+ import sys
5
+ from dataclasses import dataclass
6
+ from enum import Enum
7
+ from functools import partial
8
+ from inspect import signature
9
+ from types import ModuleType
10
+ from typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, Set, Type, TypeVar, Union
11
+
12
+ from torch import nn
13
+
14
+ from .._internally_replaced_utils import load_state_dict_from_url
15
+
16
+
17
+ __all__ = ["WeightsEnum", "Weights", "get_model", "get_model_builder", "get_model_weights", "get_weight", "list_models"]
18
+
19
+
20
+ @dataclass
21
+ class Weights:
22
+ """
23
+ This class is used to group important attributes associated with the pre-trained weights.
24
+
25
+ Args:
26
+ url (str): The location where we find the weights.
27
+ transforms (Callable): A callable that constructs the preprocessing method (or validation preset transforms)
28
+ needed to use the model. The reason we attach a constructor method rather than an already constructed
29
+ object is because the specific object might have memory and thus we want to delay initialization until
30
+ needed.
31
+ meta (Dict[str, Any]): Stores meta-data related to the weights of the model and its configuration. These can be
32
+ informative attributes (for example the number of parameters/flops, recipe link/methods used in training
33
+ etc), configuration parameters (for example the `num_classes`) needed to construct the model or important
34
+ meta-data (for example the `classes` of a classification model) needed to use the model.
35
+ """
36
+
37
+ url: str
38
+ transforms: Callable
39
+ meta: Dict[str, Any]
40
+
41
+ def __eq__(self, other: Any) -> bool:
42
+ # We need this custom implementation for correct deep-copy and deserialization behavior.
43
+ # TL;DR: After the definition of an enum, creating a new instance, i.e. by deep-copying or deserializing it,
44
+ # involves an equality check against the defined members. Unfortunately, the `transforms` attribute is often
45
+ # defined with `functools.partial` and `fn = partial(...); assert deepcopy(fn) != fn`. Without custom handling
46
+ # for it, the check against the defined members would fail and effectively prevent the weights from being
47
+ # deep-copied or deserialized.
48
+ # See https://github.com/pytorch/vision/pull/7107 for details.
49
+ if not isinstance(other, Weights):
50
+ return NotImplemented
51
+
52
+ if self.url != other.url:
53
+ return False
54
+
55
+ if self.meta != other.meta:
56
+ return False
57
+
58
+ if isinstance(self.transforms, partial) and isinstance(other.transforms, partial):
59
+ return (
60
+ self.transforms.func == other.transforms.func
61
+ and self.transforms.args == other.transforms.args
62
+ and self.transforms.keywords == other.transforms.keywords
63
+ )
64
+ else:
65
+ return self.transforms == other.transforms
66
+
67
+
68
+ class WeightsEnum(Enum):
69
+ """
70
+ This class is the parent class of all model weights. Each model building method receives an optional `weights`
71
+ parameter with its associated pre-trained weights. It inherits from `Enum` and its values should be of type
72
+ `Weights`.
73
+
74
+ Args:
75
+ value (Weights): The data class entry with the weight information.
76
+ """
77
+
78
+ @classmethod
79
+ def verify(cls, obj: Any) -> Any:
80
+ if obj is not None:
81
+ if type(obj) is str:
82
+ obj = cls[obj.replace(cls.__name__ + ".", "")]
83
+ elif not isinstance(obj, cls):
84
+ raise TypeError(
85
+ f"Invalid Weight class provided; expected {cls.__name__} but received {obj.__class__.__name__}."
86
+ )
87
+ return obj
88
+
89
+ def get_state_dict(self, *args: Any, **kwargs: Any) -> Mapping[str, Any]:
90
+ return load_state_dict_from_url(self.url, *args, **kwargs)
91
+
92
+ def __repr__(self) -> str:
93
+ return f"{self.__class__.__name__}.{self._name_}"
94
+
95
+ @property
96
+ def url(self):
97
+ return self.value.url
98
+
99
+ @property
100
+ def transforms(self):
101
+ return self.value.transforms
102
+
103
+ @property
104
+ def meta(self):
105
+ return self.value.meta
106
+
107
+
108
+ def get_weight(name: str) -> WeightsEnum:
109
+ """
110
+ Gets the weights enum value by its full name. Example: "ResNet50_Weights.IMAGENET1K_V1"
111
+
112
+ Args:
113
+ name (str): The name of the weight enum entry.
114
+
115
+ Returns:
116
+ WeightsEnum: The requested weight enum.
117
+ """
118
+ try:
119
+ enum_name, value_name = name.split(".")
120
+ except ValueError:
121
+ raise ValueError(f"Invalid weight name provided: '{name}'.")
122
+
123
+ base_module_name = ".".join(sys.modules[__name__].__name__.split(".")[:-1])
124
+ base_module = importlib.import_module(base_module_name)
125
+ model_modules = [base_module] + [
126
+ x[1]
127
+ for x in inspect.getmembers(base_module, inspect.ismodule)
128
+ if x[1].__file__.endswith("__init__.py") # type: ignore[union-attr]
129
+ ]
130
+
131
+ weights_enum = None
132
+ for m in model_modules:
133
+ potential_class = m.__dict__.get(enum_name, None)
134
+ if potential_class is not None and issubclass(potential_class, WeightsEnum):
135
+ weights_enum = potential_class
136
+ break
137
+
138
+ if weights_enum is None:
139
+ raise ValueError(f"The weight enum '{enum_name}' for the specific method couldn't be retrieved.")
140
+
141
+ return weights_enum[value_name]
142
+
143
+
144
+ def get_model_weights(name: Union[Callable, str]) -> Type[WeightsEnum]:
145
+ """
146
+ Returns the weights enum class associated to the given model.
147
+
148
+ Args:
149
+ name (callable or str): The model builder function or the name under which it is registered.
150
+
151
+ Returns:
152
+ weights_enum (WeightsEnum): The weights enum class associated with the model.
153
+ """
154
+ model = get_model_builder(name) if isinstance(name, str) else name
155
+ return _get_enum_from_fn(model)
156
+
157
+
158
+ def _get_enum_from_fn(fn: Callable) -> Type[WeightsEnum]:
159
+ """
160
+ Internal method that gets the weight enum of a specific model builder method.
161
+
162
+ Args:
163
+ fn (Callable): The builder method used to create the model.
164
+ Returns:
165
+ WeightsEnum: The requested weight enum.
166
+ """
167
+ sig = signature(fn)
168
+ if "weights" not in sig.parameters:
169
+ raise ValueError("The method is missing the 'weights' argument.")
170
+
171
+ ann = signature(fn).parameters["weights"].annotation
172
+ weights_enum = None
173
+ if isinstance(ann, type) and issubclass(ann, WeightsEnum):
174
+ weights_enum = ann
175
+ else:
176
+ # handle cases like Union[Optional, T]
177
+ # TODO: Replace ann.__args__ with typing.get_args(ann) after python >= 3.8
178
+ for t in ann.__args__: # type: ignore[union-attr]
179
+ if isinstance(t, type) and issubclass(t, WeightsEnum):
180
+ weights_enum = t
181
+ break
182
+
183
+ if weights_enum is None:
184
+ raise ValueError(
185
+ "The WeightsEnum class for the specific method couldn't be retrieved. Make sure the typing info is correct."
186
+ )
187
+
188
+ return weights_enum
189
+
190
+
191
+ M = TypeVar("M", bound=nn.Module)
192
+
193
+ BUILTIN_MODELS = {}
194
+
195
+
196
+ def register_model(name: Optional[str] = None) -> Callable[[Callable[..., M]], Callable[..., M]]:
197
+ def wrapper(fn: Callable[..., M]) -> Callable[..., M]:
198
+ key = name if name is not None else fn.__name__
199
+ if key in BUILTIN_MODELS:
200
+ raise ValueError(f"An entry is already registered under the name '{key}'.")
201
+ BUILTIN_MODELS[key] = fn
202
+ return fn
203
+
204
+ return wrapper
205
+
206
+
207
+ def list_models(
208
+ module: Optional[ModuleType] = None,
209
+ include: Union[Iterable[str], str, None] = None,
210
+ exclude: Union[Iterable[str], str, None] = None,
211
+ ) -> List[str]:
212
+ """
213
+ Returns a list with the names of registered models.
214
+
215
+ Args:
216
+ module (ModuleType, optional): The module from which we want to extract the available models.
217
+ include (str or Iterable[str], optional): Filter(s) for including the models from the set of all models.
218
+ Filters are passed to `fnmatch <https://docs.python.org/3/library/fnmatch.html>`__ to match Unix shell-style
219
+ wildcards. In case of many filters, the results is the union of individual filters.
220
+ exclude (str or Iterable[str], optional): Filter(s) applied after include_filters to remove models.
221
+ Filter are passed to `fnmatch <https://docs.python.org/3/library/fnmatch.html>`__ to match Unix shell-style
222
+ wildcards. In case of many filters, the results is removal of all the models that match any individual filter.
223
+
224
+ Returns:
225
+ models (list): A list with the names of available models.
226
+ """
227
+ all_models = {
228
+ k for k, v in BUILTIN_MODELS.items() if module is None or v.__module__.rsplit(".", 1)[0] == module.__name__
229
+ }
230
+ if include:
231
+ models: Set[str] = set()
232
+ if isinstance(include, str):
233
+ include = [include]
234
+ for include_filter in include:
235
+ models = models | set(fnmatch.filter(all_models, include_filter))
236
+ else:
237
+ models = all_models
238
+
239
+ if exclude:
240
+ if isinstance(exclude, str):
241
+ exclude = [exclude]
242
+ for exclude_filter in exclude:
243
+ models = models - set(fnmatch.filter(all_models, exclude_filter))
244
+ return sorted(models)
245
+
246
+
247
+ def get_model_builder(name: str) -> Callable[..., nn.Module]:
248
+ """
249
+ Gets the model name and returns the model builder method.
250
+
251
+ Args:
252
+ name (str): The name under which the model is registered.
253
+
254
+ Returns:
255
+ fn (Callable): The model builder method.
256
+ """
257
+ name = name.lower()
258
+ try:
259
+ fn = BUILTIN_MODELS[name]
260
+ except KeyError:
261
+ raise ValueError(f"Unknown model {name}")
262
+ return fn
263
+
264
+
265
+ def get_model(name: str, **config: Any) -> nn.Module:
266
+ """
267
+ Gets the model name and configuration and returns an instantiated model.
268
+
269
+ Args:
270
+ name (str): The name under which the model is registered.
271
+ **config (Any): parameters passed to the model builder method.
272
+
273
+ Returns:
274
+ model (nn.Module): The initialized model.
275
+ """
276
+ fn = get_model_builder(name)
277
+ return fn(**config)
pllava/lib/python3.10/site-packages/torchvision/models/_meta.py ADDED
@@ -0,0 +1,1554 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file is part of the private API. Please do not refer to any variables defined here directly as they will be
3
+ removed on future versions without warning.
4
+ """
5
+
6
+ # This will eventually be replaced with a call at torchvision.datasets.info("imagenet").categories
7
+ _IMAGENET_CATEGORIES = [
8
+ "tench",
9
+ "goldfish",
10
+ "great white shark",
11
+ "tiger shark",
12
+ "hammerhead",
13
+ "electric ray",
14
+ "stingray",
15
+ "cock",
16
+ "hen",
17
+ "ostrich",
18
+ "brambling",
19
+ "goldfinch",
20
+ "house finch",
21
+ "junco",
22
+ "indigo bunting",
23
+ "robin",
24
+ "bulbul",
25
+ "jay",
26
+ "magpie",
27
+ "chickadee",
28
+ "water ouzel",
29
+ "kite",
30
+ "bald eagle",
31
+ "vulture",
32
+ "great grey owl",
33
+ "European fire salamander",
34
+ "common newt",
35
+ "eft",
36
+ "spotted salamander",
37
+ "axolotl",
38
+ "bullfrog",
39
+ "tree frog",
40
+ "tailed frog",
41
+ "loggerhead",
42
+ "leatherback turtle",
43
+ "mud turtle",
44
+ "terrapin",
45
+ "box turtle",
46
+ "banded gecko",
47
+ "common iguana",
48
+ "American chameleon",
49
+ "whiptail",
50
+ "agama",
51
+ "frilled lizard",
52
+ "alligator lizard",
53
+ "Gila monster",
54
+ "green lizard",
55
+ "African chameleon",
56
+ "Komodo dragon",
57
+ "African crocodile",
58
+ "American alligator",
59
+ "triceratops",
60
+ "thunder snake",
61
+ "ringneck snake",
62
+ "hognose snake",
63
+ "green snake",
64
+ "king snake",
65
+ "garter snake",
66
+ "water snake",
67
+ "vine snake",
68
+ "night snake",
69
+ "boa constrictor",
70
+ "rock python",
71
+ "Indian cobra",
72
+ "green mamba",
73
+ "sea snake",
74
+ "horned viper",
75
+ "diamondback",
76
+ "sidewinder",
77
+ "trilobite",
78
+ "harvestman",
79
+ "scorpion",
80
+ "black and gold garden spider",
81
+ "barn spider",
82
+ "garden spider",
83
+ "black widow",
84
+ "tarantula",
85
+ "wolf spider",
86
+ "tick",
87
+ "centipede",
88
+ "black grouse",
89
+ "ptarmigan",
90
+ "ruffed grouse",
91
+ "prairie chicken",
92
+ "peacock",
93
+ "quail",
94
+ "partridge",
95
+ "African grey",
96
+ "macaw",
97
+ "sulphur-crested cockatoo",
98
+ "lorikeet",
99
+ "coucal",
100
+ "bee eater",
101
+ "hornbill",
102
+ "hummingbird",
103
+ "jacamar",
104
+ "toucan",
105
+ "drake",
106
+ "red-breasted merganser",
107
+ "goose",
108
+ "black swan",
109
+ "tusker",
110
+ "echidna",
111
+ "platypus",
112
+ "wallaby",
113
+ "koala",
114
+ "wombat",
115
+ "jellyfish",
116
+ "sea anemone",
117
+ "brain coral",
118
+ "flatworm",
119
+ "nematode",
120
+ "conch",
121
+ "snail",
122
+ "slug",
123
+ "sea slug",
124
+ "chiton",
125
+ "chambered nautilus",
126
+ "Dungeness crab",
127
+ "rock crab",
128
+ "fiddler crab",
129
+ "king crab",
130
+ "American lobster",
131
+ "spiny lobster",
132
+ "crayfish",
133
+ "hermit crab",
134
+ "isopod",
135
+ "white stork",
136
+ "black stork",
137
+ "spoonbill",
138
+ "flamingo",
139
+ "little blue heron",
140
+ "American egret",
141
+ "bittern",
142
+ "crane bird",
143
+ "limpkin",
144
+ "European gallinule",
145
+ "American coot",
146
+ "bustard",
147
+ "ruddy turnstone",
148
+ "red-backed sandpiper",
149
+ "redshank",
150
+ "dowitcher",
151
+ "oystercatcher",
152
+ "pelican",
153
+ "king penguin",
154
+ "albatross",
155
+ "grey whale",
156
+ "killer whale",
157
+ "dugong",
158
+ "sea lion",
159
+ "Chihuahua",
160
+ "Japanese spaniel",
161
+ "Maltese dog",
162
+ "Pekinese",
163
+ "Shih-Tzu",
164
+ "Blenheim spaniel",
165
+ "papillon",
166
+ "toy terrier",
167
+ "Rhodesian ridgeback",
168
+ "Afghan hound",
169
+ "basset",
170
+ "beagle",
171
+ "bloodhound",
172
+ "bluetick",
173
+ "black-and-tan coonhound",
174
+ "Walker hound",
175
+ "English foxhound",
176
+ "redbone",
177
+ "borzoi",
178
+ "Irish wolfhound",
179
+ "Italian greyhound",
180
+ "whippet",
181
+ "Ibizan hound",
182
+ "Norwegian elkhound",
183
+ "otterhound",
184
+ "Saluki",
185
+ "Scottish deerhound",
186
+ "Weimaraner",
187
+ "Staffordshire bullterrier",
188
+ "American Staffordshire terrier",
189
+ "Bedlington terrier",
190
+ "Border terrier",
191
+ "Kerry blue terrier",
192
+ "Irish terrier",
193
+ "Norfolk terrier",
194
+ "Norwich terrier",
195
+ "Yorkshire terrier",
196
+ "wire-haired fox terrier",
197
+ "Lakeland terrier",
198
+ "Sealyham terrier",
199
+ "Airedale",
200
+ "cairn",
201
+ "Australian terrier",
202
+ "Dandie Dinmont",
203
+ "Boston bull",
204
+ "miniature schnauzer",
205
+ "giant schnauzer",
206
+ "standard schnauzer",
207
+ "Scotch terrier",
208
+ "Tibetan terrier",
209
+ "silky terrier",
210
+ "soft-coated wheaten terrier",
211
+ "West Highland white terrier",
212
+ "Lhasa",
213
+ "flat-coated retriever",
214
+ "curly-coated retriever",
215
+ "golden retriever",
216
+ "Labrador retriever",
217
+ "Chesapeake Bay retriever",
218
+ "German short-haired pointer",
219
+ "vizsla",
220
+ "English setter",
221
+ "Irish setter",
222
+ "Gordon setter",
223
+ "Brittany spaniel",
224
+ "clumber",
225
+ "English springer",
226
+ "Welsh springer spaniel",
227
+ "cocker spaniel",
228
+ "Sussex spaniel",
229
+ "Irish water spaniel",
230
+ "kuvasz",
231
+ "schipperke",
232
+ "groenendael",
233
+ "malinois",
234
+ "briard",
235
+ "kelpie",
236
+ "komondor",
237
+ "Old English sheepdog",
238
+ "Shetland sheepdog",
239
+ "collie",
240
+ "Border collie",
241
+ "Bouvier des Flandres",
242
+ "Rottweiler",
243
+ "German shepherd",
244
+ "Doberman",
245
+ "miniature pinscher",
246
+ "Greater Swiss Mountain dog",
247
+ "Bernese mountain dog",
248
+ "Appenzeller",
249
+ "EntleBucher",
250
+ "boxer",
251
+ "bull mastiff",
252
+ "Tibetan mastiff",
253
+ "French bulldog",
254
+ "Great Dane",
255
+ "Saint Bernard",
256
+ "Eskimo dog",
257
+ "malamute",
258
+ "Siberian husky",
259
+ "dalmatian",
260
+ "affenpinscher",
261
+ "basenji",
262
+ "pug",
263
+ "Leonberg",
264
+ "Newfoundland",
265
+ "Great Pyrenees",
266
+ "Samoyed",
267
+ "Pomeranian",
268
+ "chow",
269
+ "keeshond",
270
+ "Brabancon griffon",
271
+ "Pembroke",
272
+ "Cardigan",
273
+ "toy poodle",
274
+ "miniature poodle",
275
+ "standard poodle",
276
+ "Mexican hairless",
277
+ "timber wolf",
278
+ "white wolf",
279
+ "red wolf",
280
+ "coyote",
281
+ "dingo",
282
+ "dhole",
283
+ "African hunting dog",
284
+ "hyena",
285
+ "red fox",
286
+ "kit fox",
287
+ "Arctic fox",
288
+ "grey fox",
289
+ "tabby",
290
+ "tiger cat",
291
+ "Persian cat",
292
+ "Siamese cat",
293
+ "Egyptian cat",
294
+ "cougar",
295
+ "lynx",
296
+ "leopard",
297
+ "snow leopard",
298
+ "jaguar",
299
+ "lion",
300
+ "tiger",
301
+ "cheetah",
302
+ "brown bear",
303
+ "American black bear",
304
+ "ice bear",
305
+ "sloth bear",
306
+ "mongoose",
307
+ "meerkat",
308
+ "tiger beetle",
309
+ "ladybug",
310
+ "ground beetle",
311
+ "long-horned beetle",
312
+ "leaf beetle",
313
+ "dung beetle",
314
+ "rhinoceros beetle",
315
+ "weevil",
316
+ "fly",
317
+ "bee",
318
+ "ant",
319
+ "grasshopper",
320
+ "cricket",
321
+ "walking stick",
322
+ "cockroach",
323
+ "mantis",
324
+ "cicada",
325
+ "leafhopper",
326
+ "lacewing",
327
+ "dragonfly",
328
+ "damselfly",
329
+ "admiral",
330
+ "ringlet",
331
+ "monarch",
332
+ "cabbage butterfly",
333
+ "sulphur butterfly",
334
+ "lycaenid",
335
+ "starfish",
336
+ "sea urchin",
337
+ "sea cucumber",
338
+ "wood rabbit",
339
+ "hare",
340
+ "Angora",
341
+ "hamster",
342
+ "porcupine",
343
+ "fox squirrel",
344
+ "marmot",
345
+ "beaver",
346
+ "guinea pig",
347
+ "sorrel",
348
+ "zebra",
349
+ "hog",
350
+ "wild boar",
351
+ "warthog",
352
+ "hippopotamus",
353
+ "ox",
354
+ "water buffalo",
355
+ "bison",
356
+ "ram",
357
+ "bighorn",
358
+ "ibex",
359
+ "hartebeest",
360
+ "impala",
361
+ "gazelle",
362
+ "Arabian camel",
363
+ "llama",
364
+ "weasel",
365
+ "mink",
366
+ "polecat",
367
+ "black-footed ferret",
368
+ "otter",
369
+ "skunk",
370
+ "badger",
371
+ "armadillo",
372
+ "three-toed sloth",
373
+ "orangutan",
374
+ "gorilla",
375
+ "chimpanzee",
376
+ "gibbon",
377
+ "siamang",
378
+ "guenon",
379
+ "patas",
380
+ "baboon",
381
+ "macaque",
382
+ "langur",
383
+ "colobus",
384
+ "proboscis monkey",
385
+ "marmoset",
386
+ "capuchin",
387
+ "howler monkey",
388
+ "titi",
389
+ "spider monkey",
390
+ "squirrel monkey",
391
+ "Madagascar cat",
392
+ "indri",
393
+ "Indian elephant",
394
+ "African elephant",
395
+ "lesser panda",
396
+ "giant panda",
397
+ "barracouta",
398
+ "eel",
399
+ "coho",
400
+ "rock beauty",
401
+ "anemone fish",
402
+ "sturgeon",
403
+ "gar",
404
+ "lionfish",
405
+ "puffer",
406
+ "abacus",
407
+ "abaya",
408
+ "academic gown",
409
+ "accordion",
410
+ "acoustic guitar",
411
+ "aircraft carrier",
412
+ "airliner",
413
+ "airship",
414
+ "altar",
415
+ "ambulance",
416
+ "amphibian",
417
+ "analog clock",
418
+ "apiary",
419
+ "apron",
420
+ "ashcan",
421
+ "assault rifle",
422
+ "backpack",
423
+ "bakery",
424
+ "balance beam",
425
+ "balloon",
426
+ "ballpoint",
427
+ "Band Aid",
428
+ "banjo",
429
+ "bannister",
430
+ "barbell",
431
+ "barber chair",
432
+ "barbershop",
433
+ "barn",
434
+ "barometer",
435
+ "barrel",
436
+ "barrow",
437
+ "baseball",
438
+ "basketball",
439
+ "bassinet",
440
+ "bassoon",
441
+ "bathing cap",
442
+ "bath towel",
443
+ "bathtub",
444
+ "beach wagon",
445
+ "beacon",
446
+ "beaker",
447
+ "bearskin",
448
+ "beer bottle",
449
+ "beer glass",
450
+ "bell cote",
451
+ "bib",
452
+ "bicycle-built-for-two",
453
+ "bikini",
454
+ "binder",
455
+ "binoculars",
456
+ "birdhouse",
457
+ "boathouse",
458
+ "bobsled",
459
+ "bolo tie",
460
+ "bonnet",
461
+ "bookcase",
462
+ "bookshop",
463
+ "bottlecap",
464
+ "bow",
465
+ "bow tie",
466
+ "brass",
467
+ "brassiere",
468
+ "breakwater",
469
+ "breastplate",
470
+ "broom",
471
+ "bucket",
472
+ "buckle",
473
+ "bulletproof vest",
474
+ "bullet train",
475
+ "butcher shop",
476
+ "cab",
477
+ "caldron",
478
+ "candle",
479
+ "cannon",
480
+ "canoe",
481
+ "can opener",
482
+ "cardigan",
483
+ "car mirror",
484
+ "carousel",
485
+ "carpenter's kit",
486
+ "carton",
487
+ "car wheel",
488
+ "cash machine",
489
+ "cassette",
490
+ "cassette player",
491
+ "castle",
492
+ "catamaran",
493
+ "CD player",
494
+ "cello",
495
+ "cellular telephone",
496
+ "chain",
497
+ "chainlink fence",
498
+ "chain mail",
499
+ "chain saw",
500
+ "chest",
501
+ "chiffonier",
502
+ "chime",
503
+ "china cabinet",
504
+ "Christmas stocking",
505
+ "church",
506
+ "cinema",
507
+ "cleaver",
508
+ "cliff dwelling",
509
+ "cloak",
510
+ "clog",
511
+ "cocktail shaker",
512
+ "coffee mug",
513
+ "coffeepot",
514
+ "coil",
515
+ "combination lock",
516
+ "computer keyboard",
517
+ "confectionery",
518
+ "container ship",
519
+ "convertible",
520
+ "corkscrew",
521
+ "cornet",
522
+ "cowboy boot",
523
+ "cowboy hat",
524
+ "cradle",
525
+ "crane",
526
+ "crash helmet",
527
+ "crate",
528
+ "crib",
529
+ "Crock Pot",
530
+ "croquet ball",
531
+ "crutch",
532
+ "cuirass",
533
+ "dam",
534
+ "desk",
535
+ "desktop computer",
536
+ "dial telephone",
537
+ "diaper",
538
+ "digital clock",
539
+ "digital watch",
540
+ "dining table",
541
+ "dishrag",
542
+ "dishwasher",
543
+ "disk brake",
544
+ "dock",
545
+ "dogsled",
546
+ "dome",
547
+ "doormat",
548
+ "drilling platform",
549
+ "drum",
550
+ "drumstick",
551
+ "dumbbell",
552
+ "Dutch oven",
553
+ "electric fan",
554
+ "electric guitar",
555
+ "electric locomotive",
556
+ "entertainment center",
557
+ "envelope",
558
+ "espresso maker",
559
+ "face powder",
560
+ "feather boa",
561
+ "file",
562
+ "fireboat",
563
+ "fire engine",
564
+ "fire screen",
565
+ "flagpole",
566
+ "flute",
567
+ "folding chair",
568
+ "football helmet",
569
+ "forklift",
570
+ "fountain",
571
+ "fountain pen",
572
+ "four-poster",
573
+ "freight car",
574
+ "French horn",
575
+ "frying pan",
576
+ "fur coat",
577
+ "garbage truck",
578
+ "gasmask",
579
+ "gas pump",
580
+ "goblet",
581
+ "go-kart",
582
+ "golf ball",
583
+ "golfcart",
584
+ "gondola",
585
+ "gong",
586
+ "gown",
587
+ "grand piano",
588
+ "greenhouse",
589
+ "grille",
590
+ "grocery store",
591
+ "guillotine",
592
+ "hair slide",
593
+ "hair spray",
594
+ "half track",
595
+ "hammer",
596
+ "hamper",
597
+ "hand blower",
598
+ "hand-held computer",
599
+ "handkerchief",
600
+ "hard disc",
601
+ "harmonica",
602
+ "harp",
603
+ "harvester",
604
+ "hatchet",
605
+ "holster",
606
+ "home theater",
607
+ "honeycomb",
608
+ "hook",
609
+ "hoopskirt",
610
+ "horizontal bar",
611
+ "horse cart",
612
+ "hourglass",
613
+ "iPod",
614
+ "iron",
615
+ "jack-o'-lantern",
616
+ "jean",
617
+ "jeep",
618
+ "jersey",
619
+ "jigsaw puzzle",
620
+ "jinrikisha",
621
+ "joystick",
622
+ "kimono",
623
+ "knee pad",
624
+ "knot",
625
+ "lab coat",
626
+ "ladle",
627
+ "lampshade",
628
+ "laptop",
629
+ "lawn mower",
630
+ "lens cap",
631
+ "letter opener",
632
+ "library",
633
+ "lifeboat",
634
+ "lighter",
635
+ "limousine",
636
+ "liner",
637
+ "lipstick",
638
+ "Loafer",
639
+ "lotion",
640
+ "loudspeaker",
641
+ "loupe",
642
+ "lumbermill",
643
+ "magnetic compass",
644
+ "mailbag",
645
+ "mailbox",
646
+ "maillot",
647
+ "maillot tank suit",
648
+ "manhole cover",
649
+ "maraca",
650
+ "marimba",
651
+ "mask",
652
+ "matchstick",
653
+ "maypole",
654
+ "maze",
655
+ "measuring cup",
656
+ "medicine chest",
657
+ "megalith",
658
+ "microphone",
659
+ "microwave",
660
+ "military uniform",
661
+ "milk can",
662
+ "minibus",
663
+ "miniskirt",
664
+ "minivan",
665
+ "missile",
666
+ "mitten",
667
+ "mixing bowl",
668
+ "mobile home",
669
+ "Model T",
670
+ "modem",
671
+ "monastery",
672
+ "monitor",
673
+ "moped",
674
+ "mortar",
675
+ "mortarboard",
676
+ "mosque",
677
+ "mosquito net",
678
+ "motor scooter",
679
+ "mountain bike",
680
+ "mountain tent",
681
+ "mouse",
682
+ "mousetrap",
683
+ "moving van",
684
+ "muzzle",
685
+ "nail",
686
+ "neck brace",
687
+ "necklace",
688
+ "nipple",
689
+ "notebook",
690
+ "obelisk",
691
+ "oboe",
692
+ "ocarina",
693
+ "odometer",
694
+ "oil filter",
695
+ "organ",
696
+ "oscilloscope",
697
+ "overskirt",
698
+ "oxcart",
699
+ "oxygen mask",
700
+ "packet",
701
+ "paddle",
702
+ "paddlewheel",
703
+ "padlock",
704
+ "paintbrush",
705
+ "pajama",
706
+ "palace",
707
+ "panpipe",
708
+ "paper towel",
709
+ "parachute",
710
+ "parallel bars",
711
+ "park bench",
712
+ "parking meter",
713
+ "passenger car",
714
+ "patio",
715
+ "pay-phone",
716
+ "pedestal",
717
+ "pencil box",
718
+ "pencil sharpener",
719
+ "perfume",
720
+ "Petri dish",
721
+ "photocopier",
722
+ "pick",
723
+ "pickelhaube",
724
+ "picket fence",
725
+ "pickup",
726
+ "pier",
727
+ "piggy bank",
728
+ "pill bottle",
729
+ "pillow",
730
+ "ping-pong ball",
731
+ "pinwheel",
732
+ "pirate",
733
+ "pitcher",
734
+ "plane",
735
+ "planetarium",
736
+ "plastic bag",
737
+ "plate rack",
738
+ "plow",
739
+ "plunger",
740
+ "Polaroid camera",
741
+ "pole",
742
+ "police van",
743
+ "poncho",
744
+ "pool table",
745
+ "pop bottle",
746
+ "pot",
747
+ "potter's wheel",
748
+ "power drill",
749
+ "prayer rug",
750
+ "printer",
751
+ "prison",
752
+ "projectile",
753
+ "projector",
754
+ "puck",
755
+ "punching bag",
756
+ "purse",
757
+ "quill",
758
+ "quilt",
759
+ "racer",
760
+ "racket",
761
+ "radiator",
762
+ "radio",
763
+ "radio telescope",
764
+ "rain barrel",
765
+ "recreational vehicle",
766
+ "reel",
767
+ "reflex camera",
768
+ "refrigerator",
769
+ "remote control",
770
+ "restaurant",
771
+ "revolver",
772
+ "rifle",
773
+ "rocking chair",
774
+ "rotisserie",
775
+ "rubber eraser",
776
+ "rugby ball",
777
+ "rule",
778
+ "running shoe",
779
+ "safe",
780
+ "safety pin",
781
+ "saltshaker",
782
+ "sandal",
783
+ "sarong",
784
+ "sax",
785
+ "scabbard",
786
+ "scale",
787
+ "school bus",
788
+ "schooner",
789
+ "scoreboard",
790
+ "screen",
791
+ "screw",
792
+ "screwdriver",
793
+ "seat belt",
794
+ "sewing machine",
795
+ "shield",
796
+ "shoe shop",
797
+ "shoji",
798
+ "shopping basket",
799
+ "shopping cart",
800
+ "shovel",
801
+ "shower cap",
802
+ "shower curtain",
803
+ "ski",
804
+ "ski mask",
805
+ "sleeping bag",
806
+ "slide rule",
807
+ "sliding door",
808
+ "slot",
809
+ "snorkel",
810
+ "snowmobile",
811
+ "snowplow",
812
+ "soap dispenser",
813
+ "soccer ball",
814
+ "sock",
815
+ "solar dish",
816
+ "sombrero",
817
+ "soup bowl",
818
+ "space bar",
819
+ "space heater",
820
+ "space shuttle",
821
+ "spatula",
822
+ "speedboat",
823
+ "spider web",
824
+ "spindle",
825
+ "sports car",
826
+ "spotlight",
827
+ "stage",
828
+ "steam locomotive",
829
+ "steel arch bridge",
830
+ "steel drum",
831
+ "stethoscope",
832
+ "stole",
833
+ "stone wall",
834
+ "stopwatch",
835
+ "stove",
836
+ "strainer",
837
+ "streetcar",
838
+ "stretcher",
839
+ "studio couch",
840
+ "stupa",
841
+ "submarine",
842
+ "suit",
843
+ "sundial",
844
+ "sunglass",
845
+ "sunglasses",
846
+ "sunscreen",
847
+ "suspension bridge",
848
+ "swab",
849
+ "sweatshirt",
850
+ "swimming trunks",
851
+ "swing",
852
+ "switch",
853
+ "syringe",
854
+ "table lamp",
855
+ "tank",
856
+ "tape player",
857
+ "teapot",
858
+ "teddy",
859
+ "television",
860
+ "tennis ball",
861
+ "thatch",
862
+ "theater curtain",
863
+ "thimble",
864
+ "thresher",
865
+ "throne",
866
+ "tile roof",
867
+ "toaster",
868
+ "tobacco shop",
869
+ "toilet seat",
870
+ "torch",
871
+ "totem pole",
872
+ "tow truck",
873
+ "toyshop",
874
+ "tractor",
875
+ "trailer truck",
876
+ "tray",
877
+ "trench coat",
878
+ "tricycle",
879
+ "trimaran",
880
+ "tripod",
881
+ "triumphal arch",
882
+ "trolleybus",
883
+ "trombone",
884
+ "tub",
885
+ "turnstile",
886
+ "typewriter keyboard",
887
+ "umbrella",
888
+ "unicycle",
889
+ "upright",
890
+ "vacuum",
891
+ "vase",
892
+ "vault",
893
+ "velvet",
894
+ "vending machine",
895
+ "vestment",
896
+ "viaduct",
897
+ "violin",
898
+ "volleyball",
899
+ "waffle iron",
900
+ "wall clock",
901
+ "wallet",
902
+ "wardrobe",
903
+ "warplane",
904
+ "washbasin",
905
+ "washer",
906
+ "water bottle",
907
+ "water jug",
908
+ "water tower",
909
+ "whiskey jug",
910
+ "whistle",
911
+ "wig",
912
+ "window screen",
913
+ "window shade",
914
+ "Windsor tie",
915
+ "wine bottle",
916
+ "wing",
917
+ "wok",
918
+ "wooden spoon",
919
+ "wool",
920
+ "worm fence",
921
+ "wreck",
922
+ "yawl",
923
+ "yurt",
924
+ "web site",
925
+ "comic book",
926
+ "crossword puzzle",
927
+ "street sign",
928
+ "traffic light",
929
+ "book jacket",
930
+ "menu",
931
+ "plate",
932
+ "guacamole",
933
+ "consomme",
934
+ "hot pot",
935
+ "trifle",
936
+ "ice cream",
937
+ "ice lolly",
938
+ "French loaf",
939
+ "bagel",
940
+ "pretzel",
941
+ "cheeseburger",
942
+ "hotdog",
943
+ "mashed potato",
944
+ "head cabbage",
945
+ "broccoli",
946
+ "cauliflower",
947
+ "zucchini",
948
+ "spaghetti squash",
949
+ "acorn squash",
950
+ "butternut squash",
951
+ "cucumber",
952
+ "artichoke",
953
+ "bell pepper",
954
+ "cardoon",
955
+ "mushroom",
956
+ "Granny Smith",
957
+ "strawberry",
958
+ "orange",
959
+ "lemon",
960
+ "fig",
961
+ "pineapple",
962
+ "banana",
963
+ "jackfruit",
964
+ "custard apple",
965
+ "pomegranate",
966
+ "hay",
967
+ "carbonara",
968
+ "chocolate sauce",
969
+ "dough",
970
+ "meat loaf",
971
+ "pizza",
972
+ "potpie",
973
+ "burrito",
974
+ "red wine",
975
+ "espresso",
976
+ "cup",
977
+ "eggnog",
978
+ "alp",
979
+ "bubble",
980
+ "cliff",
981
+ "coral reef",
982
+ "geyser",
983
+ "lakeside",
984
+ "promontory",
985
+ "sandbar",
986
+ "seashore",
987
+ "valley",
988
+ "volcano",
989
+ "ballplayer",
990
+ "groom",
991
+ "scuba diver",
992
+ "rapeseed",
993
+ "daisy",
994
+ "yellow lady's slipper",
995
+ "corn",
996
+ "acorn",
997
+ "hip",
998
+ "buckeye",
999
+ "coral fungus",
1000
+ "agaric",
1001
+ "gyromitra",
1002
+ "stinkhorn",
1003
+ "earthstar",
1004
+ "hen-of-the-woods",
1005
+ "bolete",
1006
+ "ear",
1007
+ "toilet tissue",
1008
+ ]
1009
+
1010
+ # To be replaced with torchvision.datasets.info("coco").categories
1011
+ _COCO_CATEGORIES = [
1012
+ "__background__",
1013
+ "person",
1014
+ "bicycle",
1015
+ "car",
1016
+ "motorcycle",
1017
+ "airplane",
1018
+ "bus",
1019
+ "train",
1020
+ "truck",
1021
+ "boat",
1022
+ "traffic light",
1023
+ "fire hydrant",
1024
+ "N/A",
1025
+ "stop sign",
1026
+ "parking meter",
1027
+ "bench",
1028
+ "bird",
1029
+ "cat",
1030
+ "dog",
1031
+ "horse",
1032
+ "sheep",
1033
+ "cow",
1034
+ "elephant",
1035
+ "bear",
1036
+ "zebra",
1037
+ "giraffe",
1038
+ "N/A",
1039
+ "backpack",
1040
+ "umbrella",
1041
+ "N/A",
1042
+ "N/A",
1043
+ "handbag",
1044
+ "tie",
1045
+ "suitcase",
1046
+ "frisbee",
1047
+ "skis",
1048
+ "snowboard",
1049
+ "sports ball",
1050
+ "kite",
1051
+ "baseball bat",
1052
+ "baseball glove",
1053
+ "skateboard",
1054
+ "surfboard",
1055
+ "tennis racket",
1056
+ "bottle",
1057
+ "N/A",
1058
+ "wine glass",
1059
+ "cup",
1060
+ "fork",
1061
+ "knife",
1062
+ "spoon",
1063
+ "bowl",
1064
+ "banana",
1065
+ "apple",
1066
+ "sandwich",
1067
+ "orange",
1068
+ "broccoli",
1069
+ "carrot",
1070
+ "hot dog",
1071
+ "pizza",
1072
+ "donut",
1073
+ "cake",
1074
+ "chair",
1075
+ "couch",
1076
+ "potted plant",
1077
+ "bed",
1078
+ "N/A",
1079
+ "dining table",
1080
+ "N/A",
1081
+ "N/A",
1082
+ "toilet",
1083
+ "N/A",
1084
+ "tv",
1085
+ "laptop",
1086
+ "mouse",
1087
+ "remote",
1088
+ "keyboard",
1089
+ "cell phone",
1090
+ "microwave",
1091
+ "oven",
1092
+ "toaster",
1093
+ "sink",
1094
+ "refrigerator",
1095
+ "N/A",
1096
+ "book",
1097
+ "clock",
1098
+ "vase",
1099
+ "scissors",
1100
+ "teddy bear",
1101
+ "hair drier",
1102
+ "toothbrush",
1103
+ ]
1104
+
1105
+ # To be replaced with torchvision.datasets.info("coco_kp")
1106
+ _COCO_PERSON_CATEGORIES = ["no person", "person"]
1107
+ _COCO_PERSON_KEYPOINT_NAMES = [
1108
+ "nose",
1109
+ "left_eye",
1110
+ "right_eye",
1111
+ "left_ear",
1112
+ "right_ear",
1113
+ "left_shoulder",
1114
+ "right_shoulder",
1115
+ "left_elbow",
1116
+ "right_elbow",
1117
+ "left_wrist",
1118
+ "right_wrist",
1119
+ "left_hip",
1120
+ "right_hip",
1121
+ "left_knee",
1122
+ "right_knee",
1123
+ "left_ankle",
1124
+ "right_ankle",
1125
+ ]
1126
+
1127
+ # To be replaced with torchvision.datasets.info("voc").categories
1128
+ _VOC_CATEGORIES = [
1129
+ "__background__",
1130
+ "aeroplane",
1131
+ "bicycle",
1132
+ "bird",
1133
+ "boat",
1134
+ "bottle",
1135
+ "bus",
1136
+ "car",
1137
+ "cat",
1138
+ "chair",
1139
+ "cow",
1140
+ "diningtable",
1141
+ "dog",
1142
+ "horse",
1143
+ "motorbike",
1144
+ "person",
1145
+ "pottedplant",
1146
+ "sheep",
1147
+ "sofa",
1148
+ "train",
1149
+ "tvmonitor",
1150
+ ]
1151
+
1152
+ # To be replaced with torchvision.datasets.info("kinetics400").categories
1153
+ _KINETICS400_CATEGORIES = [
1154
+ "abseiling",
1155
+ "air drumming",
1156
+ "answering questions",
1157
+ "applauding",
1158
+ "applying cream",
1159
+ "archery",
1160
+ "arm wrestling",
1161
+ "arranging flowers",
1162
+ "assembling computer",
1163
+ "auctioning",
1164
+ "baby waking up",
1165
+ "baking cookies",
1166
+ "balloon blowing",
1167
+ "bandaging",
1168
+ "barbequing",
1169
+ "bartending",
1170
+ "beatboxing",
1171
+ "bee keeping",
1172
+ "belly dancing",
1173
+ "bench pressing",
1174
+ "bending back",
1175
+ "bending metal",
1176
+ "biking through snow",
1177
+ "blasting sand",
1178
+ "blowing glass",
1179
+ "blowing leaves",
1180
+ "blowing nose",
1181
+ "blowing out candles",
1182
+ "bobsledding",
1183
+ "bookbinding",
1184
+ "bouncing on trampoline",
1185
+ "bowling",
1186
+ "braiding hair",
1187
+ "breading or breadcrumbing",
1188
+ "breakdancing",
1189
+ "brush painting",
1190
+ "brushing hair",
1191
+ "brushing teeth",
1192
+ "building cabinet",
1193
+ "building shed",
1194
+ "bungee jumping",
1195
+ "busking",
1196
+ "canoeing or kayaking",
1197
+ "capoeira",
1198
+ "carrying baby",
1199
+ "cartwheeling",
1200
+ "carving pumpkin",
1201
+ "catching fish",
1202
+ "catching or throwing baseball",
1203
+ "catching or throwing frisbee",
1204
+ "catching or throwing softball",
1205
+ "celebrating",
1206
+ "changing oil",
1207
+ "changing wheel",
1208
+ "checking tires",
1209
+ "cheerleading",
1210
+ "chopping wood",
1211
+ "clapping",
1212
+ "clay pottery making",
1213
+ "clean and jerk",
1214
+ "cleaning floor",
1215
+ "cleaning gutters",
1216
+ "cleaning pool",
1217
+ "cleaning shoes",
1218
+ "cleaning toilet",
1219
+ "cleaning windows",
1220
+ "climbing a rope",
1221
+ "climbing ladder",
1222
+ "climbing tree",
1223
+ "contact juggling",
1224
+ "cooking chicken",
1225
+ "cooking egg",
1226
+ "cooking on campfire",
1227
+ "cooking sausages",
1228
+ "counting money",
1229
+ "country line dancing",
1230
+ "cracking neck",
1231
+ "crawling baby",
1232
+ "crossing river",
1233
+ "crying",
1234
+ "curling hair",
1235
+ "cutting nails",
1236
+ "cutting pineapple",
1237
+ "cutting watermelon",
1238
+ "dancing ballet",
1239
+ "dancing charleston",
1240
+ "dancing gangnam style",
1241
+ "dancing macarena",
1242
+ "deadlifting",
1243
+ "decorating the christmas tree",
1244
+ "digging",
1245
+ "dining",
1246
+ "disc golfing",
1247
+ "diving cliff",
1248
+ "dodgeball",
1249
+ "doing aerobics",
1250
+ "doing laundry",
1251
+ "doing nails",
1252
+ "drawing",
1253
+ "dribbling basketball",
1254
+ "drinking",
1255
+ "drinking beer",
1256
+ "drinking shots",
1257
+ "driving car",
1258
+ "driving tractor",
1259
+ "drop kicking",
1260
+ "drumming fingers",
1261
+ "dunking basketball",
1262
+ "dying hair",
1263
+ "eating burger",
1264
+ "eating cake",
1265
+ "eating carrots",
1266
+ "eating chips",
1267
+ "eating doughnuts",
1268
+ "eating hotdog",
1269
+ "eating ice cream",
1270
+ "eating spaghetti",
1271
+ "eating watermelon",
1272
+ "egg hunting",
1273
+ "exercising arm",
1274
+ "exercising with an exercise ball",
1275
+ "extinguishing fire",
1276
+ "faceplanting",
1277
+ "feeding birds",
1278
+ "feeding fish",
1279
+ "feeding goats",
1280
+ "filling eyebrows",
1281
+ "finger snapping",
1282
+ "fixing hair",
1283
+ "flipping pancake",
1284
+ "flying kite",
1285
+ "folding clothes",
1286
+ "folding napkins",
1287
+ "folding paper",
1288
+ "front raises",
1289
+ "frying vegetables",
1290
+ "garbage collecting",
1291
+ "gargling",
1292
+ "getting a haircut",
1293
+ "getting a tattoo",
1294
+ "giving or receiving award",
1295
+ "golf chipping",
1296
+ "golf driving",
1297
+ "golf putting",
1298
+ "grinding meat",
1299
+ "grooming dog",
1300
+ "grooming horse",
1301
+ "gymnastics tumbling",
1302
+ "hammer throw",
1303
+ "headbanging",
1304
+ "headbutting",
1305
+ "high jump",
1306
+ "high kick",
1307
+ "hitting baseball",
1308
+ "hockey stop",
1309
+ "holding snake",
1310
+ "hopscotch",
1311
+ "hoverboarding",
1312
+ "hugging",
1313
+ "hula hooping",
1314
+ "hurdling",
1315
+ "hurling (sport)",
1316
+ "ice climbing",
1317
+ "ice fishing",
1318
+ "ice skating",
1319
+ "ironing",
1320
+ "javelin throw",
1321
+ "jetskiing",
1322
+ "jogging",
1323
+ "juggling balls",
1324
+ "juggling fire",
1325
+ "juggling soccer ball",
1326
+ "jumping into pool",
1327
+ "jumpstyle dancing",
1328
+ "kicking field goal",
1329
+ "kicking soccer ball",
1330
+ "kissing",
1331
+ "kitesurfing",
1332
+ "knitting",
1333
+ "krumping",
1334
+ "laughing",
1335
+ "laying bricks",
1336
+ "long jump",
1337
+ "lunge",
1338
+ "making a cake",
1339
+ "making a sandwich",
1340
+ "making bed",
1341
+ "making jewelry",
1342
+ "making pizza",
1343
+ "making snowman",
1344
+ "making sushi",
1345
+ "making tea",
1346
+ "marching",
1347
+ "massaging back",
1348
+ "massaging feet",
1349
+ "massaging legs",
1350
+ "massaging person's head",
1351
+ "milking cow",
1352
+ "mopping floor",
1353
+ "motorcycling",
1354
+ "moving furniture",
1355
+ "mowing lawn",
1356
+ "news anchoring",
1357
+ "opening bottle",
1358
+ "opening present",
1359
+ "paragliding",
1360
+ "parasailing",
1361
+ "parkour",
1362
+ "passing American football (in game)",
1363
+ "passing American football (not in game)",
1364
+ "peeling apples",
1365
+ "peeling potatoes",
1366
+ "petting animal (not cat)",
1367
+ "petting cat",
1368
+ "picking fruit",
1369
+ "planting trees",
1370
+ "plastering",
1371
+ "playing accordion",
1372
+ "playing badminton",
1373
+ "playing bagpipes",
1374
+ "playing basketball",
1375
+ "playing bass guitar",
1376
+ "playing cards",
1377
+ "playing cello",
1378
+ "playing chess",
1379
+ "playing clarinet",
1380
+ "playing controller",
1381
+ "playing cricket",
1382
+ "playing cymbals",
1383
+ "playing didgeridoo",
1384
+ "playing drums",
1385
+ "playing flute",
1386
+ "playing guitar",
1387
+ "playing harmonica",
1388
+ "playing harp",
1389
+ "playing ice hockey",
1390
+ "playing keyboard",
1391
+ "playing kickball",
1392
+ "playing monopoly",
1393
+ "playing organ",
1394
+ "playing paintball",
1395
+ "playing piano",
1396
+ "playing poker",
1397
+ "playing recorder",
1398
+ "playing saxophone",
1399
+ "playing squash or racquetball",
1400
+ "playing tennis",
1401
+ "playing trombone",
1402
+ "playing trumpet",
1403
+ "playing ukulele",
1404
+ "playing violin",
1405
+ "playing volleyball",
1406
+ "playing xylophone",
1407
+ "pole vault",
1408
+ "presenting weather forecast",
1409
+ "pull ups",
1410
+ "pumping fist",
1411
+ "pumping gas",
1412
+ "punching bag",
1413
+ "punching person (boxing)",
1414
+ "push up",
1415
+ "pushing car",
1416
+ "pushing cart",
1417
+ "pushing wheelchair",
1418
+ "reading book",
1419
+ "reading newspaper",
1420
+ "recording music",
1421
+ "riding a bike",
1422
+ "riding camel",
1423
+ "riding elephant",
1424
+ "riding mechanical bull",
1425
+ "riding mountain bike",
1426
+ "riding mule",
1427
+ "riding or walking with horse",
1428
+ "riding scooter",
1429
+ "riding unicycle",
1430
+ "ripping paper",
1431
+ "robot dancing",
1432
+ "rock climbing",
1433
+ "rock scissors paper",
1434
+ "roller skating",
1435
+ "running on treadmill",
1436
+ "sailing",
1437
+ "salsa dancing",
1438
+ "sanding floor",
1439
+ "scrambling eggs",
1440
+ "scuba diving",
1441
+ "setting table",
1442
+ "shaking hands",
1443
+ "shaking head",
1444
+ "sharpening knives",
1445
+ "sharpening pencil",
1446
+ "shaving head",
1447
+ "shaving legs",
1448
+ "shearing sheep",
1449
+ "shining shoes",
1450
+ "shooting basketball",
1451
+ "shooting goal (soccer)",
1452
+ "shot put",
1453
+ "shoveling snow",
1454
+ "shredding paper",
1455
+ "shuffling cards",
1456
+ "side kick",
1457
+ "sign language interpreting",
1458
+ "singing",
1459
+ "situp",
1460
+ "skateboarding",
1461
+ "ski jumping",
1462
+ "skiing (not slalom or crosscountry)",
1463
+ "skiing crosscountry",
1464
+ "skiing slalom",
1465
+ "skipping rope",
1466
+ "skydiving",
1467
+ "slacklining",
1468
+ "slapping",
1469
+ "sled dog racing",
1470
+ "smoking",
1471
+ "smoking hookah",
1472
+ "snatch weight lifting",
1473
+ "sneezing",
1474
+ "sniffing",
1475
+ "snorkeling",
1476
+ "snowboarding",
1477
+ "snowkiting",
1478
+ "snowmobiling",
1479
+ "somersaulting",
1480
+ "spinning poi",
1481
+ "spray painting",
1482
+ "spraying",
1483
+ "springboard diving",
1484
+ "squat",
1485
+ "sticking tongue out",
1486
+ "stomping grapes",
1487
+ "stretching arm",
1488
+ "stretching leg",
1489
+ "strumming guitar",
1490
+ "surfing crowd",
1491
+ "surfing water",
1492
+ "sweeping floor",
1493
+ "swimming backstroke",
1494
+ "swimming breast stroke",
1495
+ "swimming butterfly stroke",
1496
+ "swing dancing",
1497
+ "swinging legs",
1498
+ "swinging on something",
1499
+ "sword fighting",
1500
+ "tai chi",
1501
+ "taking a shower",
1502
+ "tango dancing",
1503
+ "tap dancing",
1504
+ "tapping guitar",
1505
+ "tapping pen",
1506
+ "tasting beer",
1507
+ "tasting food",
1508
+ "testifying",
1509
+ "texting",
1510
+ "throwing axe",
1511
+ "throwing ball",
1512
+ "throwing discus",
1513
+ "tickling",
1514
+ "tobogganing",
1515
+ "tossing coin",
1516
+ "tossing salad",
1517
+ "training dog",
1518
+ "trapezing",
1519
+ "trimming or shaving beard",
1520
+ "trimming trees",
1521
+ "triple jump",
1522
+ "tying bow tie",
1523
+ "tying knot (not on a tie)",
1524
+ "tying tie",
1525
+ "unboxing",
1526
+ "unloading truck",
1527
+ "using computer",
1528
+ "using remote controller (not gaming)",
1529
+ "using segway",
1530
+ "vault",
1531
+ "waiting in line",
1532
+ "walking the dog",
1533
+ "washing dishes",
1534
+ "washing feet",
1535
+ "washing hair",
1536
+ "washing hands",
1537
+ "water skiing",
1538
+ "water sliding",
1539
+ "watering plants",
1540
+ "waxing back",
1541
+ "waxing chest",
1542
+ "waxing eyebrows",
1543
+ "waxing legs",
1544
+ "weaving basket",
1545
+ "welding",
1546
+ "whistling",
1547
+ "windsurfing",
1548
+ "wrapping present",
1549
+ "wrestling",
1550
+ "writing",
1551
+ "yawning",
1552
+ "yoga",
1553
+ "zumba",
1554
+ ]
pllava/lib/python3.10/site-packages/torchvision/models/feature_extraction.py ADDED
@@ -0,0 +1,572 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import math
3
+ import re
4
+ import warnings
5
+ from collections import OrderedDict
6
+ from copy import deepcopy
7
+ from itertools import chain
8
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
9
+
10
+ import torch
11
+ import torchvision
12
+ from torch import fx, nn
13
+ from torch.fx.graph_module import _copy_attr
14
+
15
+
16
+ __all__ = ["create_feature_extractor", "get_graph_node_names"]
17
+
18
+
19
+ class LeafModuleAwareTracer(fx.Tracer):
20
+ """
21
+ An fx.Tracer that allows the user to specify a set of leaf modules, i.e.
22
+ modules that are not to be traced through. The resulting graph ends up
23
+ having single nodes referencing calls to the leaf modules' forward methods.
24
+ """
25
+
26
+ def __init__(self, *args, **kwargs):
27
+ self.leaf_modules = {}
28
+ if "leaf_modules" in kwargs:
29
+ leaf_modules = kwargs.pop("leaf_modules")
30
+ self.leaf_modules = leaf_modules
31
+ super().__init__(*args, **kwargs)
32
+
33
+ def is_leaf_module(self, m: nn.Module, module_qualname: str) -> bool:
34
+ if isinstance(m, tuple(self.leaf_modules)):
35
+ return True
36
+ return super().is_leaf_module(m, module_qualname)
37
+
38
+
39
+ class NodePathTracer(LeafModuleAwareTracer):
40
+ """
41
+ NodePathTracer is an FX tracer that, for each operation, also records the
42
+ name of the Node from which the operation originated. A node name here is
43
+ a `.` separated path walking the hierarchy from top level module down to
44
+ leaf operation or leaf module. The name of the top level module is not
45
+ included as part of the node name. For example, if we trace a module whose
46
+ forward method applies a ReLU module, the name for that node will simply
47
+ be 'relu'.
48
+
49
+ Some notes on the specifics:
50
+ - Nodes are recorded to `self.node_to_qualname` which is a dictionary
51
+ mapping a given Node object to its node name.
52
+ - Nodes are recorded in the order which they are executed during
53
+ tracing.
54
+ - When a duplicate node name is encountered, a suffix of the form
55
+ _{int} is added. The counter starts from 1.
56
+ """
57
+
58
+ def __init__(self, *args, **kwargs):
59
+ super().__init__(*args, **kwargs)
60
+ # Track the qualified name of the Node being traced
61
+ self.current_module_qualname = ""
62
+ # A map from FX Node to the qualified name\#
63
+ # NOTE: This is loosely like the "qualified name" mentioned in the
64
+ # torch.fx docs https://pytorch.org/docs/stable/fx.html but adapted
65
+ # for the purposes of the torchvision feature extractor
66
+ self.node_to_qualname = OrderedDict()
67
+
68
+ def call_module(self, m: torch.nn.Module, forward: Callable, args, kwargs):
69
+ """
70
+ Override of `fx.Tracer.call_module`
71
+ This override:
72
+ 1) Stores away the qualified name of the caller for restoration later
73
+ 2) Adds the qualified name of the caller to
74
+ `current_module_qualname` for retrieval by `create_proxy`
75
+ 3) Once a leaf module is reached, calls `create_proxy`
76
+ 4) Restores the caller's qualified name into current_module_qualname
77
+ """
78
+ old_qualname = self.current_module_qualname
79
+ try:
80
+ module_qualname = self.path_of_module(m)
81
+ self.current_module_qualname = module_qualname
82
+ if not self.is_leaf_module(m, module_qualname):
83
+ out = forward(*args, **kwargs)
84
+ return out
85
+ return self.create_proxy("call_module", module_qualname, args, kwargs)
86
+ finally:
87
+ self.current_module_qualname = old_qualname
88
+
89
+ def create_proxy(
90
+ self, kind: str, target: fx.node.Target, args, kwargs, name=None, type_expr=None, *_
91
+ ) -> fx.proxy.Proxy:
92
+ """
93
+ Override of `Tracer.create_proxy`. This override intercepts the recording
94
+ of every operation and stores away the current traced module's qualified
95
+ name in `node_to_qualname`
96
+ """
97
+ proxy = super().create_proxy(kind, target, args, kwargs, name, type_expr)
98
+ self.node_to_qualname[proxy.node] = self._get_node_qualname(self.current_module_qualname, proxy.node)
99
+ return proxy
100
+
101
+ def _get_node_qualname(self, module_qualname: str, node: fx.node.Node) -> str:
102
+ node_qualname = module_qualname
103
+
104
+ if node.op != "call_module":
105
+ # In this case module_qualname from torch.fx doesn't go all the
106
+ # way to the leaf function/op, so we need to append it
107
+ if len(node_qualname) > 0:
108
+ # Only append '.' if we are deeper than the top level module
109
+ node_qualname += "."
110
+ node_qualname += str(node)
111
+
112
+ # Now we need to add an _{index} postfix on any repeated node names
113
+ # For modules we do this from scratch
114
+ # But for anything else, torch.fx already has a globally scoped
115
+ # _{index} postfix. But we want it locally (relative to direct parent)
116
+ # scoped. So first we need to undo the torch.fx postfix
117
+ if re.match(r".+_[0-9]+$", node_qualname) is not None:
118
+ node_qualname = node_qualname.rsplit("_", 1)[0]
119
+
120
+ # ... and now we add on our own postfix
121
+ for existing_qualname in reversed(self.node_to_qualname.values()):
122
+ # Check to see if existing_qualname is of the form
123
+ # {node_qualname} or {node_qualname}_{int}
124
+ if re.match(rf"{node_qualname}(_[0-9]+)?$", existing_qualname) is not None:
125
+ postfix = existing_qualname.replace(node_qualname, "")
126
+ if len(postfix):
127
+ # existing_qualname is of the form {node_qualname}_{int}
128
+ next_index = int(postfix[1:]) + 1
129
+ else:
130
+ # existing_qualname is of the form {node_qualname}
131
+ next_index = 1
132
+ node_qualname += f"_{next_index}"
133
+ break
134
+
135
+ return node_qualname
136
+
137
+
138
+ def _is_subseq(x, y):
139
+ """Check if y is a subsequence of x
140
+ https://stackoverflow.com/a/24017747/4391249
141
+ """
142
+ iter_x = iter(x)
143
+ return all(any(x_item == y_item for x_item in iter_x) for y_item in y)
144
+
145
+
146
+ def _warn_graph_differences(train_tracer: NodePathTracer, eval_tracer: NodePathTracer):
147
+ """
148
+ Utility function for warning the user if there are differences between
149
+ the train graph nodes and the eval graph nodes.
150
+ """
151
+ train_nodes = list(train_tracer.node_to_qualname.values())
152
+ eval_nodes = list(eval_tracer.node_to_qualname.values())
153
+
154
+ if len(train_nodes) == len(eval_nodes) and all(t == e for t, e in zip(train_nodes, eval_nodes)):
155
+ return
156
+
157
+ suggestion_msg = (
158
+ "When choosing nodes for feature extraction, you may need to specify "
159
+ "output nodes for train and eval mode separately."
160
+ )
161
+
162
+ if _is_subseq(train_nodes, eval_nodes):
163
+ msg = (
164
+ "NOTE: The nodes obtained by tracing the model in eval mode "
165
+ "are a subsequence of those obtained in train mode. "
166
+ )
167
+ elif _is_subseq(eval_nodes, train_nodes):
168
+ msg = (
169
+ "NOTE: The nodes obtained by tracing the model in train mode "
170
+ "are a subsequence of those obtained in eval mode. "
171
+ )
172
+ else:
173
+ msg = "The nodes obtained by tracing the model in train mode are different to those obtained in eval mode. "
174
+ warnings.warn(msg + suggestion_msg)
175
+
176
+
177
+ def _get_leaf_modules_for_ops() -> List[type]:
178
+ members = inspect.getmembers(torchvision.ops)
179
+ result = []
180
+ for _, obj in members:
181
+ if inspect.isclass(obj) and issubclass(obj, torch.nn.Module):
182
+ result.append(obj)
183
+ return result
184
+
185
+
186
+ def _set_default_tracer_kwargs(original_tr_kwargs: Optional[Dict[str, Any]]) -> Dict[str, Any]:
187
+ default_autowrap_modules = (math, torchvision.ops)
188
+ default_leaf_modules = _get_leaf_modules_for_ops()
189
+ result_tracer_kwargs = {} if original_tr_kwargs is None else original_tr_kwargs
190
+ result_tracer_kwargs["autowrap_modules"] = (
191
+ tuple(set(result_tracer_kwargs["autowrap_modules"] + default_autowrap_modules))
192
+ if "autowrap_modules" in result_tracer_kwargs
193
+ else default_autowrap_modules
194
+ )
195
+ result_tracer_kwargs["leaf_modules"] = (
196
+ list(set(result_tracer_kwargs["leaf_modules"] + default_leaf_modules))
197
+ if "leaf_modules" in result_tracer_kwargs
198
+ else default_leaf_modules
199
+ )
200
+ return result_tracer_kwargs
201
+
202
+
203
+ def get_graph_node_names(
204
+ model: nn.Module,
205
+ tracer_kwargs: Optional[Dict[str, Any]] = None,
206
+ suppress_diff_warning: bool = False,
207
+ concrete_args: Optional[Dict[str, Any]] = None,
208
+ ) -> Tuple[List[str], List[str]]:
209
+ """
210
+ Dev utility to return node names in order of execution. See note on node
211
+ names under :func:`create_feature_extractor`. Useful for seeing which node
212
+ names are available for feature extraction. There are two reasons that
213
+ node names can't easily be read directly from the code for a model:
214
+
215
+ 1. Not all submodules are traced through. Modules from ``torch.nn`` all
216
+ fall within this category.
217
+ 2. Nodes representing the repeated application of the same operation
218
+ or leaf module get a ``_{counter}`` postfix.
219
+
220
+ The model is traced twice: once in train mode, and once in eval mode. Both
221
+ sets of node names are returned.
222
+
223
+ For more details on the node naming conventions used here, please see the
224
+ :ref:`relevant subheading <about-node-names>` in the
225
+ `documentation <https://pytorch.org/vision/stable/feature_extraction.html>`_.
226
+
227
+ Args:
228
+ model (nn.Module): model for which we'd like to print node names
229
+ tracer_kwargs (dict, optional): a dictionary of keyword arguments for
230
+ ``NodePathTracer`` (they are eventually passed onto
231
+ `torch.fx.Tracer <https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer>`_).
232
+ By default, it will be set to wrap and make leaf nodes all torchvision ops:
233
+ {"autowrap_modules": (math, torchvision.ops,),"leaf_modules": _get_leaf_modules_for_ops(),}
234
+ WARNING: In case the user provides tracer_kwargs, above default arguments will be appended to the user
235
+ provided dictionary.
236
+ suppress_diff_warning (bool, optional): whether to suppress a warning
237
+ when there are discrepancies between the train and eval version of
238
+ the graph. Defaults to False.
239
+ concrete_args (Optional[Dict[str, any]]): Concrete arguments that should
240
+ not be treated as Proxies. According to the `Pytorch docs
241
+ <https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer.trace>`_,
242
+ this parameter's API may not be guaranteed.
243
+
244
+ Returns:
245
+ tuple(list, list): a list of node names from tracing the model in
246
+ train mode, and another from tracing the model in eval mode.
247
+
248
+ Examples::
249
+
250
+ >>> model = torchvision.models.resnet18()
251
+ >>> train_nodes, eval_nodes = get_graph_node_names(model)
252
+ """
253
+ tracer_kwargs = _set_default_tracer_kwargs(tracer_kwargs)
254
+ is_training = model.training
255
+ train_tracer = NodePathTracer(**tracer_kwargs)
256
+ train_tracer.trace(model.train(), concrete_args=concrete_args)
257
+ eval_tracer = NodePathTracer(**tracer_kwargs)
258
+ eval_tracer.trace(model.eval(), concrete_args=concrete_args)
259
+ train_nodes = list(train_tracer.node_to_qualname.values())
260
+ eval_nodes = list(eval_tracer.node_to_qualname.values())
261
+ if not suppress_diff_warning:
262
+ _warn_graph_differences(train_tracer, eval_tracer)
263
+ # Restore training state
264
+ model.train(is_training)
265
+ return train_nodes, eval_nodes
266
+
267
+
268
+ class DualGraphModule(fx.GraphModule):
269
+ """
270
+ A derivative of `fx.GraphModule`. Differs in the following ways:
271
+ - Requires a train and eval version of the underlying graph
272
+ - Copies submodules according to the nodes of both train and eval graphs.
273
+ - Calling train(mode) switches between train graph and eval graph.
274
+ """
275
+
276
+ def __init__(
277
+ self, root: torch.nn.Module, train_graph: fx.Graph, eval_graph: fx.Graph, class_name: str = "GraphModule"
278
+ ):
279
+ """
280
+ Args:
281
+ root (nn.Module): module from which the copied module hierarchy is
282
+ built
283
+ train_graph (fx.Graph): the graph that should be used in train mode
284
+ eval_graph (fx.Graph): the graph that should be used in eval mode
285
+ """
286
+ super(fx.GraphModule, self).__init__()
287
+
288
+ self.__class__.__name__ = class_name
289
+
290
+ self.train_graph = train_graph
291
+ self.eval_graph = eval_graph
292
+
293
+ # Copy all get_attr and call_module ops (indicated by BOTH train and
294
+ # eval graphs)
295
+ for node in chain(iter(train_graph.nodes), iter(eval_graph.nodes)):
296
+ if node.op in ["get_attr", "call_module"]:
297
+ if not isinstance(node.target, str):
298
+ raise TypeError(f"node.target should be of type str instead of {type(node.target)}")
299
+ _copy_attr(root, self, node.target)
300
+
301
+ # train mode by default
302
+ self.train()
303
+ self.graph = train_graph
304
+
305
+ # (borrowed from fx.GraphModule):
306
+ # Store the Tracer class responsible for creating a Graph separately as part of the
307
+ # GraphModule state, except when the Tracer is defined in a local namespace.
308
+ # Locally defined Tracers are not pickleable. This is needed because torch.package will
309
+ # serialize a GraphModule without retaining the Graph, and needs to use the correct Tracer
310
+ # to re-create the Graph during deserialization.
311
+ if self.eval_graph._tracer_cls != self.train_graph._tracer_cls:
312
+ raise TypeError(
313
+ f"Train mode and eval mode should use the same tracer class. Instead got {self.eval_graph._tracer_cls} for eval vs {self.train_graph._tracer_cls} for train"
314
+ )
315
+ self._tracer_cls = None
316
+ if self.graph._tracer_cls and "<locals>" not in self.graph._tracer_cls.__qualname__:
317
+ self._tracer_cls = self.graph._tracer_cls
318
+
319
+ def train(self, mode=True):
320
+ """
321
+ Swap out the graph depending on the selected training mode.
322
+ NOTE this should be safe when calling model.eval() because that just
323
+ calls this with mode == False.
324
+ """
325
+ # NOTE: Only set self.graph if the current graph is not the desired
326
+ # one. This saves us from recompiling the graph where not necessary.
327
+ if mode and not self.training:
328
+ self.graph = self.train_graph
329
+ elif not mode and self.training:
330
+ self.graph = self.eval_graph
331
+ return super().train(mode=mode)
332
+
333
+
334
+ def create_feature_extractor(
335
+ model: nn.Module,
336
+ return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
337
+ train_return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
338
+ eval_return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
339
+ tracer_kwargs: Optional[Dict[str, Any]] = None,
340
+ suppress_diff_warning: bool = False,
341
+ concrete_args: Optional[Dict[str, Any]] = None,
342
+ ) -> fx.GraphModule:
343
+ """
344
+ Creates a new graph module that returns intermediate nodes from a given
345
+ model as dictionary with user specified keys as strings, and the requested
346
+ outputs as values. This is achieved by re-writing the computation graph of
347
+ the model via FX to return the desired nodes as outputs. All unused nodes
348
+ are removed, together with their corresponding parameters.
349
+
350
+ Desired output nodes must be specified as a ``.`` separated
351
+ path walking the module hierarchy from top level module down to leaf
352
+ operation or leaf module. For more details on the node naming conventions
353
+ used here, please see the :ref:`relevant subheading <about-node-names>`
354
+ in the `documentation <https://pytorch.org/vision/stable/feature_extraction.html>`_.
355
+
356
+ Not all models will be FX traceable, although with some massaging they can
357
+ be made to cooperate. Here's a (not exhaustive) list of tips:
358
+
359
+ - If you don't need to trace through a particular, problematic
360
+ sub-module, turn it into a "leaf module" by passing a list of
361
+ ``leaf_modules`` as one of the ``tracer_kwargs`` (see example below).
362
+ It will not be traced through, but rather, the resulting graph will
363
+ hold a reference to that module's forward method.
364
+ - Likewise, you may turn functions into leaf functions by passing a
365
+ list of ``autowrap_functions`` as one of the ``tracer_kwargs`` (see
366
+ example below).
367
+ - Some inbuilt Python functions can be problematic. For instance,
368
+ ``int`` will raise an error during tracing. You may wrap them in your
369
+ own function and then pass that in ``autowrap_functions`` as one of
370
+ the ``tracer_kwargs``.
371
+
372
+ For further information on FX see the
373
+ `torch.fx documentation <https://pytorch.org/docs/stable/fx.html>`_.
374
+
375
+ Args:
376
+ model (nn.Module): model on which we will extract the features
377
+ return_nodes (list or dict, optional): either a ``List`` or a ``Dict``
378
+ containing the names (or partial names - see note above)
379
+ of the nodes for which the activations will be returned. If it is
380
+ a ``Dict``, the keys are the node names, and the values
381
+ are the user-specified keys for the graph module's returned
382
+ dictionary. If it is a ``List``, it is treated as a ``Dict`` mapping
383
+ node specification strings directly to output names. In the case
384
+ that ``train_return_nodes`` and ``eval_return_nodes`` are specified,
385
+ this should not be specified.
386
+ train_return_nodes (list or dict, optional): similar to
387
+ ``return_nodes``. This can be used if the return nodes
388
+ for train mode are different than those from eval mode.
389
+ If this is specified, ``eval_return_nodes`` must also be specified,
390
+ and ``return_nodes`` should not be specified.
391
+ eval_return_nodes (list or dict, optional): similar to
392
+ ``return_nodes``. This can be used if the return nodes
393
+ for train mode are different than those from eval mode.
394
+ If this is specified, ``train_return_nodes`` must also be specified,
395
+ and `return_nodes` should not be specified.
396
+ tracer_kwargs (dict, optional): a dictionary of keyword arguments for
397
+ ``NodePathTracer`` (which passes them onto it's parent class
398
+ `torch.fx.Tracer <https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer>`_).
399
+ By default, it will be set to wrap and make leaf nodes all torchvision ops:
400
+ {"autowrap_modules": (math, torchvision.ops,),"leaf_modules": _get_leaf_modules_for_ops(),}
401
+ WARNING: In case the user provides tracer_kwargs, above default arguments will be appended to the user
402
+ provided dictionary.
403
+ suppress_diff_warning (bool, optional): whether to suppress a warning
404
+ when there are discrepancies between the train and eval version of
405
+ the graph. Defaults to False.
406
+ concrete_args (Optional[Dict[str, any]]): Concrete arguments that should
407
+ not be treated as Proxies. According to the `Pytorch docs
408
+ <https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer.trace>`_,
409
+ this parameter's API may not be guaranteed.
410
+
411
+ Examples::
412
+
413
+ >>> # Feature extraction with resnet
414
+ >>> model = torchvision.models.resnet18()
415
+ >>> # extract layer1 and layer3, giving as names `feat1` and feat2`
416
+ >>> model = create_feature_extractor(
417
+ >>> model, {'layer1': 'feat1', 'layer3': 'feat2'})
418
+ >>> out = model(torch.rand(1, 3, 224, 224))
419
+ >>> print([(k, v.shape) for k, v in out.items()])
420
+ >>> [('feat1', torch.Size([1, 64, 56, 56])),
421
+ >>> ('feat2', torch.Size([1, 256, 14, 14]))]
422
+
423
+ >>> # Specifying leaf modules and leaf functions
424
+ >>> def leaf_function(x):
425
+ >>> # This would raise a TypeError if traced through
426
+ >>> return int(x)
427
+ >>>
428
+ >>> class LeafModule(torch.nn.Module):
429
+ >>> def forward(self, x):
430
+ >>> # This would raise a TypeError if traced through
431
+ >>> int(x.shape[0])
432
+ >>> return torch.nn.functional.relu(x + 4)
433
+ >>>
434
+ >>> class MyModule(torch.nn.Module):
435
+ >>> def __init__(self):
436
+ >>> super().__init__()
437
+ >>> self.conv = torch.nn.Conv2d(3, 1, 3)
438
+ >>> self.leaf_module = LeafModule()
439
+ >>>
440
+ >>> def forward(self, x):
441
+ >>> leaf_function(x.shape[0])
442
+ >>> x = self.conv(x)
443
+ >>> return self.leaf_module(x)
444
+ >>>
445
+ >>> model = create_feature_extractor(
446
+ >>> MyModule(), return_nodes=['leaf_module'],
447
+ >>> tracer_kwargs={'leaf_modules': [LeafModule],
448
+ >>> 'autowrap_functions': [leaf_function]})
449
+
450
+ """
451
+ tracer_kwargs = _set_default_tracer_kwargs(tracer_kwargs)
452
+ is_training = model.training
453
+
454
+ if all(arg is None for arg in [return_nodes, train_return_nodes, eval_return_nodes]):
455
+
456
+ raise ValueError(
457
+ "Either `return_nodes` or `train_return_nodes` and `eval_return_nodes` together, should be specified"
458
+ )
459
+
460
+ if (train_return_nodes is None) ^ (eval_return_nodes is None):
461
+ raise ValueError(
462
+ "If any of `train_return_nodes` and `eval_return_nodes` are specified, then both should be specified"
463
+ )
464
+
465
+ if not ((return_nodes is None) ^ (train_return_nodes is None)):
466
+ raise ValueError("If `train_return_nodes` and `eval_return_nodes` are specified, then both should be specified")
467
+
468
+ # Put *_return_nodes into Dict[str, str] format
469
+ def to_strdict(n) -> Dict[str, str]:
470
+ if isinstance(n, list):
471
+ return {str(i): str(i) for i in n}
472
+ return {str(k): str(v) for k, v in n.items()}
473
+
474
+ if train_return_nodes is None:
475
+ return_nodes = to_strdict(return_nodes)
476
+ train_return_nodes = deepcopy(return_nodes)
477
+ eval_return_nodes = deepcopy(return_nodes)
478
+ else:
479
+ train_return_nodes = to_strdict(train_return_nodes)
480
+ eval_return_nodes = to_strdict(eval_return_nodes)
481
+
482
+ # Repeat the tracing and graph rewriting for train and eval mode
483
+ tracers = {}
484
+ graphs = {}
485
+ mode_return_nodes: Dict[str, Dict[str, str]] = {"train": train_return_nodes, "eval": eval_return_nodes}
486
+ for mode in ["train", "eval"]:
487
+ if mode == "train":
488
+ model.train()
489
+ elif mode == "eval":
490
+ model.eval()
491
+
492
+ # Instantiate our NodePathTracer and use that to trace the model
493
+ tracer = NodePathTracer(**tracer_kwargs)
494
+ graph = tracer.trace(model, concrete_args=concrete_args)
495
+
496
+ name = model.__class__.__name__ if isinstance(model, nn.Module) else model.__name__
497
+ graph_module = fx.GraphModule(tracer.root, graph, name)
498
+
499
+ available_nodes = list(tracer.node_to_qualname.values())
500
+ # FIXME We don't know if we should expect this to happen
501
+ if len(set(available_nodes)) != len(available_nodes):
502
+ raise ValueError(
503
+ "There are duplicate nodes! Please raise an issue https://github.com/pytorch/vision/issues"
504
+ )
505
+ # Check that all outputs in return_nodes are present in the model
506
+ for query in mode_return_nodes[mode].keys():
507
+ # To check if a query is available we need to check that at least
508
+ # one of the available names starts with it up to a .
509
+ if not any([re.match(rf"^{query}(\.|$)", n) is not None for n in available_nodes]):
510
+ raise ValueError(
511
+ f"node: '{query}' is not present in model. Hint: use "
512
+ "`get_graph_node_names` to make sure the "
513
+ "`return_nodes` you specified are present. It may even "
514
+ "be that you need to specify `train_return_nodes` and "
515
+ "`eval_return_nodes` separately."
516
+ )
517
+
518
+ # Remove existing output nodes (train mode)
519
+ orig_output_nodes = []
520
+ for n in reversed(graph_module.graph.nodes):
521
+ if n.op == "output":
522
+ orig_output_nodes.append(n)
523
+ if not orig_output_nodes:
524
+ raise ValueError("No output nodes found in graph_module.graph.nodes")
525
+
526
+ for n in orig_output_nodes:
527
+ graph_module.graph.erase_node(n)
528
+
529
+ # Find nodes corresponding to return_nodes and make them into output_nodes
530
+ nodes = [n for n in graph_module.graph.nodes]
531
+ output_nodes = OrderedDict()
532
+ for n in reversed(nodes):
533
+ module_qualname = tracer.node_to_qualname.get(n)
534
+ if module_qualname is None:
535
+ # NOTE - Know cases where this happens:
536
+ # - Node representing creation of a tensor constant - probably
537
+ # not interesting as a return node
538
+ # - When packing outputs into a named tuple like in InceptionV3
539
+ continue
540
+ for query in mode_return_nodes[mode]:
541
+ depth = query.count(".")
542
+ if ".".join(module_qualname.split(".")[: depth + 1]) == query:
543
+ output_nodes[mode_return_nodes[mode][query]] = n
544
+ mode_return_nodes[mode].pop(query)
545
+ break
546
+ output_nodes = OrderedDict(reversed(list(output_nodes.items())))
547
+
548
+ # And add them in the end of the graph
549
+ with graph_module.graph.inserting_after(nodes[-1]):
550
+ graph_module.graph.output(output_nodes)
551
+
552
+ # Remove unused modules / parameters
553
+ graph_module.graph.eliminate_dead_code()
554
+ graph_module.recompile()
555
+
556
+ # Keep track of the tracer and graph, so we can choose the main one
557
+ tracers[mode] = tracer
558
+ graphs[mode] = graph
559
+
560
+ # Warn user if there are any discrepancies between the graphs of the
561
+ # train and eval modes
562
+ if not suppress_diff_warning:
563
+ _warn_graph_differences(tracers["train"], tracers["eval"])
564
+
565
+ # Build the final graph module
566
+ graph_module = DualGraphModule(model, graphs["train"], graphs["eval"], class_name=name)
567
+
568
+ # Restore original training mode
569
+ model.train(is_training)
570
+ graph_module.train(is_training)
571
+
572
+ return graph_module
pllava/lib/python3.10/site-packages/torchvision/models/inception.py ADDED
@@ -0,0 +1,478 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from collections import namedtuple
3
+ from functools import partial
4
+ from typing import Any, Callable, List, Optional, Tuple
5
+
6
+ import torch
7
+ import torch.nn.functional as F
8
+ from torch import nn, Tensor
9
+
10
+ from ..transforms._presets import ImageClassification
11
+ from ..utils import _log_api_usage_once
12
+ from ._api import register_model, Weights, WeightsEnum
13
+ from ._meta import _IMAGENET_CATEGORIES
14
+ from ._utils import _ovewrite_named_param, handle_legacy_interface
15
+
16
+
17
+ __all__ = ["Inception3", "InceptionOutputs", "_InceptionOutputs", "Inception_V3_Weights", "inception_v3"]
18
+
19
+
20
+ InceptionOutputs = namedtuple("InceptionOutputs", ["logits", "aux_logits"])
21
+ InceptionOutputs.__annotations__ = {"logits": Tensor, "aux_logits": Optional[Tensor]}
22
+
23
+ # Script annotations failed with _GoogleNetOutputs = namedtuple ...
24
+ # _InceptionOutputs set here for backwards compat
25
+ _InceptionOutputs = InceptionOutputs
26
+
27
+
28
+ class Inception3(nn.Module):
29
+ def __init__(
30
+ self,
31
+ num_classes: int = 1000,
32
+ aux_logits: bool = True,
33
+ transform_input: bool = False,
34
+ inception_blocks: Optional[List[Callable[..., nn.Module]]] = None,
35
+ init_weights: Optional[bool] = None,
36
+ dropout: float = 0.5,
37
+ ) -> None:
38
+ super().__init__()
39
+ _log_api_usage_once(self)
40
+ if inception_blocks is None:
41
+ inception_blocks = [BasicConv2d, InceptionA, InceptionB, InceptionC, InceptionD, InceptionE, InceptionAux]
42
+ if init_weights is None:
43
+ warnings.warn(
44
+ "The default weight initialization of inception_v3 will be changed in future releases of "
45
+ "torchvision. If you wish to keep the old behavior (which leads to long initialization times"
46
+ " due to scipy/scipy#11299), please set init_weights=True.",
47
+ FutureWarning,
48
+ )
49
+ init_weights = True
50
+ if len(inception_blocks) != 7:
51
+ raise ValueError(f"length of inception_blocks should be 7 instead of {len(inception_blocks)}")
52
+ conv_block = inception_blocks[0]
53
+ inception_a = inception_blocks[1]
54
+ inception_b = inception_blocks[2]
55
+ inception_c = inception_blocks[3]
56
+ inception_d = inception_blocks[4]
57
+ inception_e = inception_blocks[5]
58
+ inception_aux = inception_blocks[6]
59
+
60
+ self.aux_logits = aux_logits
61
+ self.transform_input = transform_input
62
+ self.Conv2d_1a_3x3 = conv_block(3, 32, kernel_size=3, stride=2)
63
+ self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3)
64
+ self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1)
65
+ self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2)
66
+ self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1)
67
+ self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3)
68
+ self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2)
69
+ self.Mixed_5b = inception_a(192, pool_features=32)
70
+ self.Mixed_5c = inception_a(256, pool_features=64)
71
+ self.Mixed_5d = inception_a(288, pool_features=64)
72
+ self.Mixed_6a = inception_b(288)
73
+ self.Mixed_6b = inception_c(768, channels_7x7=128)
74
+ self.Mixed_6c = inception_c(768, channels_7x7=160)
75
+ self.Mixed_6d = inception_c(768, channels_7x7=160)
76
+ self.Mixed_6e = inception_c(768, channels_7x7=192)
77
+ self.AuxLogits: Optional[nn.Module] = None
78
+ if aux_logits:
79
+ self.AuxLogits = inception_aux(768, num_classes)
80
+ self.Mixed_7a = inception_d(768)
81
+ self.Mixed_7b = inception_e(1280)
82
+ self.Mixed_7c = inception_e(2048)
83
+ self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
84
+ self.dropout = nn.Dropout(p=dropout)
85
+ self.fc = nn.Linear(2048, num_classes)
86
+ if init_weights:
87
+ for m in self.modules():
88
+ if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
89
+ stddev = float(m.stddev) if hasattr(m, "stddev") else 0.1 # type: ignore
90
+ torch.nn.init.trunc_normal_(m.weight, mean=0.0, std=stddev, a=-2, b=2)
91
+ elif isinstance(m, nn.BatchNorm2d):
92
+ nn.init.constant_(m.weight, 1)
93
+ nn.init.constant_(m.bias, 0)
94
+
95
+ def _transform_input(self, x: Tensor) -> Tensor:
96
+ if self.transform_input:
97
+ x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
98
+ x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
99
+ x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
100
+ x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
101
+ return x
102
+
103
+ def _forward(self, x: Tensor) -> Tuple[Tensor, Optional[Tensor]]:
104
+ # N x 3 x 299 x 299
105
+ x = self.Conv2d_1a_3x3(x)
106
+ # N x 32 x 149 x 149
107
+ x = self.Conv2d_2a_3x3(x)
108
+ # N x 32 x 147 x 147
109
+ x = self.Conv2d_2b_3x3(x)
110
+ # N x 64 x 147 x 147
111
+ x = self.maxpool1(x)
112
+ # N x 64 x 73 x 73
113
+ x = self.Conv2d_3b_1x1(x)
114
+ # N x 80 x 73 x 73
115
+ x = self.Conv2d_4a_3x3(x)
116
+ # N x 192 x 71 x 71
117
+ x = self.maxpool2(x)
118
+ # N x 192 x 35 x 35
119
+ x = self.Mixed_5b(x)
120
+ # N x 256 x 35 x 35
121
+ x = self.Mixed_5c(x)
122
+ # N x 288 x 35 x 35
123
+ x = self.Mixed_5d(x)
124
+ # N x 288 x 35 x 35
125
+ x = self.Mixed_6a(x)
126
+ # N x 768 x 17 x 17
127
+ x = self.Mixed_6b(x)
128
+ # N x 768 x 17 x 17
129
+ x = self.Mixed_6c(x)
130
+ # N x 768 x 17 x 17
131
+ x = self.Mixed_6d(x)
132
+ # N x 768 x 17 x 17
133
+ x = self.Mixed_6e(x)
134
+ # N x 768 x 17 x 17
135
+ aux: Optional[Tensor] = None
136
+ if self.AuxLogits is not None:
137
+ if self.training:
138
+ aux = self.AuxLogits(x)
139
+ # N x 768 x 17 x 17
140
+ x = self.Mixed_7a(x)
141
+ # N x 1280 x 8 x 8
142
+ x = self.Mixed_7b(x)
143
+ # N x 2048 x 8 x 8
144
+ x = self.Mixed_7c(x)
145
+ # N x 2048 x 8 x 8
146
+ # Adaptive average pooling
147
+ x = self.avgpool(x)
148
+ # N x 2048 x 1 x 1
149
+ x = self.dropout(x)
150
+ # N x 2048 x 1 x 1
151
+ x = torch.flatten(x, 1)
152
+ # N x 2048
153
+ x = self.fc(x)
154
+ # N x 1000 (num_classes)
155
+ return x, aux
156
+
157
+ @torch.jit.unused
158
+ def eager_outputs(self, x: Tensor, aux: Optional[Tensor]) -> InceptionOutputs:
159
+ if self.training and self.aux_logits:
160
+ return InceptionOutputs(x, aux)
161
+ else:
162
+ return x # type: ignore[return-value]
163
+
164
+ def forward(self, x: Tensor) -> InceptionOutputs:
165
+ x = self._transform_input(x)
166
+ x, aux = self._forward(x)
167
+ aux_defined = self.training and self.aux_logits
168
+ if torch.jit.is_scripting():
169
+ if not aux_defined:
170
+ warnings.warn("Scripted Inception3 always returns Inception3 Tuple")
171
+ return InceptionOutputs(x, aux)
172
+ else:
173
+ return self.eager_outputs(x, aux)
174
+
175
+
176
+ class InceptionA(nn.Module):
177
+ def __init__(
178
+ self, in_channels: int, pool_features: int, conv_block: Optional[Callable[..., nn.Module]] = None
179
+ ) -> None:
180
+ super().__init__()
181
+ if conv_block is None:
182
+ conv_block = BasicConv2d
183
+ self.branch1x1 = conv_block(in_channels, 64, kernel_size=1)
184
+
185
+ self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1)
186
+ self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2)
187
+
188
+ self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1)
189
+ self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1)
190
+ self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1)
191
+
192
+ self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1)
193
+
194
+ def _forward(self, x: Tensor) -> List[Tensor]:
195
+ branch1x1 = self.branch1x1(x)
196
+
197
+ branch5x5 = self.branch5x5_1(x)
198
+ branch5x5 = self.branch5x5_2(branch5x5)
199
+
200
+ branch3x3dbl = self.branch3x3dbl_1(x)
201
+ branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
202
+ branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
203
+
204
+ branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
205
+ branch_pool = self.branch_pool(branch_pool)
206
+
207
+ outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
208
+ return outputs
209
+
210
+ def forward(self, x: Tensor) -> Tensor:
211
+ outputs = self._forward(x)
212
+ return torch.cat(outputs, 1)
213
+
214
+
215
+ class InceptionB(nn.Module):
216
+ def __init__(self, in_channels: int, conv_block: Optional[Callable[..., nn.Module]] = None) -> None:
217
+ super().__init__()
218
+ if conv_block is None:
219
+ conv_block = BasicConv2d
220
+ self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2)
221
+
222
+ self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1)
223
+ self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1)
224
+ self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, stride=2)
225
+
226
+ def _forward(self, x: Tensor) -> List[Tensor]:
227
+ branch3x3 = self.branch3x3(x)
228
+
229
+ branch3x3dbl = self.branch3x3dbl_1(x)
230
+ branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
231
+ branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
232
+
233
+ branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
234
+
235
+ outputs = [branch3x3, branch3x3dbl, branch_pool]
236
+ return outputs
237
+
238
+ def forward(self, x: Tensor) -> Tensor:
239
+ outputs = self._forward(x)
240
+ return torch.cat(outputs, 1)
241
+
242
+
243
+ class InceptionC(nn.Module):
244
+ def __init__(
245
+ self, in_channels: int, channels_7x7: int, conv_block: Optional[Callable[..., nn.Module]] = None
246
+ ) -> None:
247
+ super().__init__()
248
+ if conv_block is None:
249
+ conv_block = BasicConv2d
250
+ self.branch1x1 = conv_block(in_channels, 192, kernel_size=1)
251
+
252
+ c7 = channels_7x7
253
+ self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1)
254
+ self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3))
255
+ self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0))
256
+
257
+ self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1)
258
+ self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0))
259
+ self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3))
260
+ self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0))
261
+ self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3))
262
+
263
+ self.branch_pool = conv_block(in_channels, 192, kernel_size=1)
264
+
265
+ def _forward(self, x: Tensor) -> List[Tensor]:
266
+ branch1x1 = self.branch1x1(x)
267
+
268
+ branch7x7 = self.branch7x7_1(x)
269
+ branch7x7 = self.branch7x7_2(branch7x7)
270
+ branch7x7 = self.branch7x7_3(branch7x7)
271
+
272
+ branch7x7dbl = self.branch7x7dbl_1(x)
273
+ branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
274
+ branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
275
+ branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
276
+ branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
277
+
278
+ branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
279
+ branch_pool = self.branch_pool(branch_pool)
280
+
281
+ outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
282
+ return outputs
283
+
284
+ def forward(self, x: Tensor) -> Tensor:
285
+ outputs = self._forward(x)
286
+ return torch.cat(outputs, 1)
287
+
288
+
289
+ class InceptionD(nn.Module):
290
+ def __init__(self, in_channels: int, conv_block: Optional[Callable[..., nn.Module]] = None) -> None:
291
+ super().__init__()
292
+ if conv_block is None:
293
+ conv_block = BasicConv2d
294
+ self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1)
295
+ self.branch3x3_2 = conv_block(192, 320, kernel_size=3, stride=2)
296
+
297
+ self.branch7x7x3_1 = conv_block(in_channels, 192, kernel_size=1)
298
+ self.branch7x7x3_2 = conv_block(192, 192, kernel_size=(1, 7), padding=(0, 3))
299
+ self.branch7x7x3_3 = conv_block(192, 192, kernel_size=(7, 1), padding=(3, 0))
300
+ self.branch7x7x3_4 = conv_block(192, 192, kernel_size=3, stride=2)
301
+
302
+ def _forward(self, x: Tensor) -> List[Tensor]:
303
+ branch3x3 = self.branch3x3_1(x)
304
+ branch3x3 = self.branch3x3_2(branch3x3)
305
+
306
+ branch7x7x3 = self.branch7x7x3_1(x)
307
+ branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
308
+ branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
309
+ branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
310
+
311
+ branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
312
+ outputs = [branch3x3, branch7x7x3, branch_pool]
313
+ return outputs
314
+
315
+ def forward(self, x: Tensor) -> Tensor:
316
+ outputs = self._forward(x)
317
+ return torch.cat(outputs, 1)
318
+
319
+
320
+ class InceptionE(nn.Module):
321
+ def __init__(self, in_channels: int, conv_block: Optional[Callable[..., nn.Module]] = None) -> None:
322
+ super().__init__()
323
+ if conv_block is None:
324
+ conv_block = BasicConv2d
325
+ self.branch1x1 = conv_block(in_channels, 320, kernel_size=1)
326
+
327
+ self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1)
328
+ self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1))
329
+ self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0))
330
+
331
+ self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1)
332
+ self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1)
333
+ self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1))
334
+ self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0))
335
+
336
+ self.branch_pool = conv_block(in_channels, 192, kernel_size=1)
337
+
338
+ def _forward(self, x: Tensor) -> List[Tensor]:
339
+ branch1x1 = self.branch1x1(x)
340
+
341
+ branch3x3 = self.branch3x3_1(x)
342
+ branch3x3 = [
343
+ self.branch3x3_2a(branch3x3),
344
+ self.branch3x3_2b(branch3x3),
345
+ ]
346
+ branch3x3 = torch.cat(branch3x3, 1)
347
+
348
+ branch3x3dbl = self.branch3x3dbl_1(x)
349
+ branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
350
+ branch3x3dbl = [
351
+ self.branch3x3dbl_3a(branch3x3dbl),
352
+ self.branch3x3dbl_3b(branch3x3dbl),
353
+ ]
354
+ branch3x3dbl = torch.cat(branch3x3dbl, 1)
355
+
356
+ branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
357
+ branch_pool = self.branch_pool(branch_pool)
358
+
359
+ outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
360
+ return outputs
361
+
362
+ def forward(self, x: Tensor) -> Tensor:
363
+ outputs = self._forward(x)
364
+ return torch.cat(outputs, 1)
365
+
366
+
367
+ class InceptionAux(nn.Module):
368
+ def __init__(
369
+ self, in_channels: int, num_classes: int, conv_block: Optional[Callable[..., nn.Module]] = None
370
+ ) -> None:
371
+ super().__init__()
372
+ if conv_block is None:
373
+ conv_block = BasicConv2d
374
+ self.conv0 = conv_block(in_channels, 128, kernel_size=1)
375
+ self.conv1 = conv_block(128, 768, kernel_size=5)
376
+ self.conv1.stddev = 0.01 # type: ignore[assignment]
377
+ self.fc = nn.Linear(768, num_classes)
378
+ self.fc.stddev = 0.001 # type: ignore[assignment]
379
+
380
+ def forward(self, x: Tensor) -> Tensor:
381
+ # N x 768 x 17 x 17
382
+ x = F.avg_pool2d(x, kernel_size=5, stride=3)
383
+ # N x 768 x 5 x 5
384
+ x = self.conv0(x)
385
+ # N x 128 x 5 x 5
386
+ x = self.conv1(x)
387
+ # N x 768 x 1 x 1
388
+ # Adaptive average pooling
389
+ x = F.adaptive_avg_pool2d(x, (1, 1))
390
+ # N x 768 x 1 x 1
391
+ x = torch.flatten(x, 1)
392
+ # N x 768
393
+ x = self.fc(x)
394
+ # N x 1000
395
+ return x
396
+
397
+
398
+ class BasicConv2d(nn.Module):
399
+ def __init__(self, in_channels: int, out_channels: int, **kwargs: Any) -> None:
400
+ super().__init__()
401
+ self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
402
+ self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
403
+
404
+ def forward(self, x: Tensor) -> Tensor:
405
+ x = self.conv(x)
406
+ x = self.bn(x)
407
+ return F.relu(x, inplace=True)
408
+
409
+
410
+ class Inception_V3_Weights(WeightsEnum):
411
+ IMAGENET1K_V1 = Weights(
412
+ url="https://download.pytorch.org/models/inception_v3_google-0cc3c7bd.pth",
413
+ transforms=partial(ImageClassification, crop_size=299, resize_size=342),
414
+ meta={
415
+ "num_params": 27161264,
416
+ "min_size": (75, 75),
417
+ "categories": _IMAGENET_CATEGORIES,
418
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#inception-v3",
419
+ "_metrics": {
420
+ "ImageNet-1K": {
421
+ "acc@1": 77.294,
422
+ "acc@5": 93.450,
423
+ }
424
+ },
425
+ "_ops": 5.713,
426
+ "_file_size": 103.903,
427
+ "_docs": """These weights are ported from the original paper.""",
428
+ },
429
+ )
430
+ DEFAULT = IMAGENET1K_V1
431
+
432
+
433
+ @register_model()
434
+ @handle_legacy_interface(weights=("pretrained", Inception_V3_Weights.IMAGENET1K_V1))
435
+ def inception_v3(*, weights: Optional[Inception_V3_Weights] = None, progress: bool = True, **kwargs: Any) -> Inception3:
436
+ """
437
+ Inception v3 model architecture from
438
+ `Rethinking the Inception Architecture for Computer Vision <http://arxiv.org/abs/1512.00567>`_.
439
+
440
+ .. note::
441
+ **Important**: In contrast to the other models the inception_v3 expects tensors with a size of
442
+ N x 3 x 299 x 299, so ensure your images are sized accordingly.
443
+
444
+ Args:
445
+ weights (:class:`~torchvision.models.Inception_V3_Weights`, optional): The
446
+ pretrained weights for the model. See
447
+ :class:`~torchvision.models.Inception_V3_Weights` below for
448
+ more details, and possible values. By default, no pre-trained
449
+ weights are used.
450
+ progress (bool, optional): If True, displays a progress bar of the
451
+ download to stderr. Default is True.
452
+ **kwargs: parameters passed to the ``torchvision.models.Inception3``
453
+ base class. Please refer to the `source code
454
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/inception.py>`_
455
+ for more details about this class.
456
+
457
+ .. autoclass:: torchvision.models.Inception_V3_Weights
458
+ :members:
459
+ """
460
+ weights = Inception_V3_Weights.verify(weights)
461
+
462
+ original_aux_logits = kwargs.get("aux_logits", True)
463
+ if weights is not None:
464
+ if "transform_input" not in kwargs:
465
+ _ovewrite_named_param(kwargs, "transform_input", True)
466
+ _ovewrite_named_param(kwargs, "aux_logits", True)
467
+ _ovewrite_named_param(kwargs, "init_weights", False)
468
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
469
+
470
+ model = Inception3(**kwargs)
471
+
472
+ if weights is not None:
473
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
474
+ if not original_aux_logits:
475
+ model.aux_logits = False
476
+ model.AuxLogits = None
477
+
478
+ return model
pllava/lib/python3.10/site-packages/torchvision/models/maxvit.py ADDED
@@ -0,0 +1,833 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from collections import OrderedDict
3
+ from functools import partial
4
+ from typing import Any, Callable, List, Optional, Sequence, Tuple
5
+
6
+ import numpy as np
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from torch import nn, Tensor
10
+ from torchvision.models._api import register_model, Weights, WeightsEnum
11
+ from torchvision.models._meta import _IMAGENET_CATEGORIES
12
+ from torchvision.models._utils import _ovewrite_named_param, handle_legacy_interface
13
+ from torchvision.ops.misc import Conv2dNormActivation, SqueezeExcitation
14
+ from torchvision.ops.stochastic_depth import StochasticDepth
15
+ from torchvision.transforms._presets import ImageClassification, InterpolationMode
16
+ from torchvision.utils import _log_api_usage_once
17
+
18
+ __all__ = [
19
+ "MaxVit",
20
+ "MaxVit_T_Weights",
21
+ "maxvit_t",
22
+ ]
23
+
24
+
25
+ def _get_conv_output_shape(input_size: Tuple[int, int], kernel_size: int, stride: int, padding: int) -> Tuple[int, int]:
26
+ return (
27
+ (input_size[0] - kernel_size + 2 * padding) // stride + 1,
28
+ (input_size[1] - kernel_size + 2 * padding) // stride + 1,
29
+ )
30
+
31
+
32
+ def _make_block_input_shapes(input_size: Tuple[int, int], n_blocks: int) -> List[Tuple[int, int]]:
33
+ """Util function to check that the input size is correct for a MaxVit configuration."""
34
+ shapes = []
35
+ block_input_shape = _get_conv_output_shape(input_size, 3, 2, 1)
36
+ for _ in range(n_blocks):
37
+ block_input_shape = _get_conv_output_shape(block_input_shape, 3, 2, 1)
38
+ shapes.append(block_input_shape)
39
+ return shapes
40
+
41
+
42
+ def _get_relative_position_index(height: int, width: int) -> torch.Tensor:
43
+ coords = torch.stack(torch.meshgrid([torch.arange(height), torch.arange(width)]))
44
+ coords_flat = torch.flatten(coords, 1)
45
+ relative_coords = coords_flat[:, :, None] - coords_flat[:, None, :]
46
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous()
47
+ relative_coords[:, :, 0] += height - 1
48
+ relative_coords[:, :, 1] += width - 1
49
+ relative_coords[:, :, 0] *= 2 * width - 1
50
+ return relative_coords.sum(-1)
51
+
52
+
53
+ class MBConv(nn.Module):
54
+ """MBConv: Mobile Inverted Residual Bottleneck.
55
+
56
+ Args:
57
+ in_channels (int): Number of input channels.
58
+ out_channels (int): Number of output channels.
59
+ expansion_ratio (float): Expansion ratio in the bottleneck.
60
+ squeeze_ratio (float): Squeeze ratio in the SE Layer.
61
+ stride (int): Stride of the depthwise convolution.
62
+ activation_layer (Callable[..., nn.Module]): Activation function.
63
+ norm_layer (Callable[..., nn.Module]): Normalization function.
64
+ p_stochastic_dropout (float): Probability of stochastic depth.
65
+ """
66
+
67
+ def __init__(
68
+ self,
69
+ in_channels: int,
70
+ out_channels: int,
71
+ expansion_ratio: float,
72
+ squeeze_ratio: float,
73
+ stride: int,
74
+ activation_layer: Callable[..., nn.Module],
75
+ norm_layer: Callable[..., nn.Module],
76
+ p_stochastic_dropout: float = 0.0,
77
+ ) -> None:
78
+ super().__init__()
79
+
80
+ proj: Sequence[nn.Module]
81
+ self.proj: nn.Module
82
+
83
+ should_proj = stride != 1 or in_channels != out_channels
84
+ if should_proj:
85
+ proj = [nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=True)]
86
+ if stride == 2:
87
+ proj = [nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)] + proj # type: ignore
88
+ self.proj = nn.Sequential(*proj)
89
+ else:
90
+ self.proj = nn.Identity() # type: ignore
91
+
92
+ mid_channels = int(out_channels * expansion_ratio)
93
+ sqz_channels = int(out_channels * squeeze_ratio)
94
+
95
+ if p_stochastic_dropout:
96
+ self.stochastic_depth = StochasticDepth(p_stochastic_dropout, mode="row") # type: ignore
97
+ else:
98
+ self.stochastic_depth = nn.Identity() # type: ignore
99
+
100
+ _layers = OrderedDict()
101
+ _layers["pre_norm"] = norm_layer(in_channels)
102
+ _layers["conv_a"] = Conv2dNormActivation(
103
+ in_channels,
104
+ mid_channels,
105
+ kernel_size=1,
106
+ stride=1,
107
+ padding=0,
108
+ activation_layer=activation_layer,
109
+ norm_layer=norm_layer,
110
+ inplace=None,
111
+ )
112
+ _layers["conv_b"] = Conv2dNormActivation(
113
+ mid_channels,
114
+ mid_channels,
115
+ kernel_size=3,
116
+ stride=stride,
117
+ padding=1,
118
+ activation_layer=activation_layer,
119
+ norm_layer=norm_layer,
120
+ groups=mid_channels,
121
+ inplace=None,
122
+ )
123
+ _layers["squeeze_excitation"] = SqueezeExcitation(mid_channels, sqz_channels, activation=nn.SiLU)
124
+ _layers["conv_c"] = nn.Conv2d(in_channels=mid_channels, out_channels=out_channels, kernel_size=1, bias=True)
125
+
126
+ self.layers = nn.Sequential(_layers)
127
+
128
+ def forward(self, x: Tensor) -> Tensor:
129
+ """
130
+ Args:
131
+ x (Tensor): Input tensor with expected layout of [B, C, H, W].
132
+ Returns:
133
+ Tensor: Output tensor with expected layout of [B, C, H / stride, W / stride].
134
+ """
135
+ res = self.proj(x)
136
+ x = self.stochastic_depth(self.layers(x))
137
+ return res + x
138
+
139
+
140
+ class RelativePositionalMultiHeadAttention(nn.Module):
141
+ """Relative Positional Multi-Head Attention.
142
+
143
+ Args:
144
+ feat_dim (int): Number of input features.
145
+ head_dim (int): Number of features per head.
146
+ max_seq_len (int): Maximum sequence length.
147
+ """
148
+
149
+ def __init__(
150
+ self,
151
+ feat_dim: int,
152
+ head_dim: int,
153
+ max_seq_len: int,
154
+ ) -> None:
155
+ super().__init__()
156
+
157
+ if feat_dim % head_dim != 0:
158
+ raise ValueError(f"feat_dim: {feat_dim} must be divisible by head_dim: {head_dim}")
159
+
160
+ self.n_heads = feat_dim // head_dim
161
+ self.head_dim = head_dim
162
+ self.size = int(math.sqrt(max_seq_len))
163
+ self.max_seq_len = max_seq_len
164
+
165
+ self.to_qkv = nn.Linear(feat_dim, self.n_heads * self.head_dim * 3)
166
+ self.scale_factor = feat_dim**-0.5
167
+
168
+ self.merge = nn.Linear(self.head_dim * self.n_heads, feat_dim)
169
+ self.relative_position_bias_table = nn.parameter.Parameter(
170
+ torch.empty(((2 * self.size - 1) * (2 * self.size - 1), self.n_heads), dtype=torch.float32),
171
+ )
172
+
173
+ self.register_buffer("relative_position_index", _get_relative_position_index(self.size, self.size))
174
+ # initialize with truncated normal the bias
175
+ torch.nn.init.trunc_normal_(self.relative_position_bias_table, std=0.02)
176
+
177
+ def get_relative_positional_bias(self) -> torch.Tensor:
178
+ bias_index = self.relative_position_index.view(-1) # type: ignore
179
+ relative_bias = self.relative_position_bias_table[bias_index].view(self.max_seq_len, self.max_seq_len, -1) # type: ignore
180
+ relative_bias = relative_bias.permute(2, 0, 1).contiguous()
181
+ return relative_bias.unsqueeze(0)
182
+
183
+ def forward(self, x: Tensor) -> Tensor:
184
+ """
185
+ Args:
186
+ x (Tensor): Input tensor with expected layout of [B, G, P, D].
187
+ Returns:
188
+ Tensor: Output tensor with expected layout of [B, G, P, D].
189
+ """
190
+ B, G, P, D = x.shape
191
+ H, DH = self.n_heads, self.head_dim
192
+
193
+ qkv = self.to_qkv(x)
194
+ q, k, v = torch.chunk(qkv, 3, dim=-1)
195
+
196
+ q = q.reshape(B, G, P, H, DH).permute(0, 1, 3, 2, 4)
197
+ k = k.reshape(B, G, P, H, DH).permute(0, 1, 3, 2, 4)
198
+ v = v.reshape(B, G, P, H, DH).permute(0, 1, 3, 2, 4)
199
+
200
+ k = k * self.scale_factor
201
+ dot_prod = torch.einsum("B G H I D, B G H J D -> B G H I J", q, k)
202
+ pos_bias = self.get_relative_positional_bias()
203
+
204
+ dot_prod = F.softmax(dot_prod + pos_bias, dim=-1)
205
+
206
+ out = torch.einsum("B G H I J, B G H J D -> B G H I D", dot_prod, v)
207
+ out = out.permute(0, 1, 3, 2, 4).reshape(B, G, P, D)
208
+
209
+ out = self.merge(out)
210
+ return out
211
+
212
+
213
+ class SwapAxes(nn.Module):
214
+ """Permute the axes of a tensor."""
215
+
216
+ def __init__(self, a: int, b: int) -> None:
217
+ super().__init__()
218
+ self.a = a
219
+ self.b = b
220
+
221
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
222
+ res = torch.swapaxes(x, self.a, self.b)
223
+ return res
224
+
225
+
226
+ class WindowPartition(nn.Module):
227
+ """
228
+ Partition the input tensor into non-overlapping windows.
229
+ """
230
+
231
+ def __init__(self) -> None:
232
+ super().__init__()
233
+
234
+ def forward(self, x: Tensor, p: int) -> Tensor:
235
+ """
236
+ Args:
237
+ x (Tensor): Input tensor with expected layout of [B, C, H, W].
238
+ p (int): Number of partitions.
239
+ Returns:
240
+ Tensor: Output tensor with expected layout of [B, H/P, W/P, P*P, C].
241
+ """
242
+ B, C, H, W = x.shape
243
+ P = p
244
+ # chunk up H and W dimensions
245
+ x = x.reshape(B, C, H // P, P, W // P, P)
246
+ x = x.permute(0, 2, 4, 3, 5, 1)
247
+ # colapse P * P dimension
248
+ x = x.reshape(B, (H // P) * (W // P), P * P, C)
249
+ return x
250
+
251
+
252
+ class WindowDepartition(nn.Module):
253
+ """
254
+ Departition the input tensor of non-overlapping windows into a feature volume of layout [B, C, H, W].
255
+ """
256
+
257
+ def __init__(self) -> None:
258
+ super().__init__()
259
+
260
+ def forward(self, x: Tensor, p: int, h_partitions: int, w_partitions: int) -> Tensor:
261
+ """
262
+ Args:
263
+ x (Tensor): Input tensor with expected layout of [B, (H/P * W/P), P*P, C].
264
+ p (int): Number of partitions.
265
+ h_partitions (int): Number of vertical partitions.
266
+ w_partitions (int): Number of horizontal partitions.
267
+ Returns:
268
+ Tensor: Output tensor with expected layout of [B, C, H, W].
269
+ """
270
+ B, G, PP, C = x.shape
271
+ P = p
272
+ HP, WP = h_partitions, w_partitions
273
+ # split P * P dimension into 2 P tile dimensionsa
274
+ x = x.reshape(B, HP, WP, P, P, C)
275
+ # permute into B, C, HP, P, WP, P
276
+ x = x.permute(0, 5, 1, 3, 2, 4)
277
+ # reshape into B, C, H, W
278
+ x = x.reshape(B, C, HP * P, WP * P)
279
+ return x
280
+
281
+
282
+ class PartitionAttentionLayer(nn.Module):
283
+ """
284
+ Layer for partitioning the input tensor into non-overlapping windows and applying attention to each window.
285
+
286
+ Args:
287
+ in_channels (int): Number of input channels.
288
+ head_dim (int): Dimension of each attention head.
289
+ partition_size (int): Size of the partitions.
290
+ partition_type (str): Type of partitioning to use. Can be either "grid" or "window".
291
+ grid_size (Tuple[int, int]): Size of the grid to partition the input tensor into.
292
+ mlp_ratio (int): Ratio of the feature size expansion in the MLP layer.
293
+ activation_layer (Callable[..., nn.Module]): Activation function to use.
294
+ norm_layer (Callable[..., nn.Module]): Normalization function to use.
295
+ attention_dropout (float): Dropout probability for the attention layer.
296
+ mlp_dropout (float): Dropout probability for the MLP layer.
297
+ p_stochastic_dropout (float): Probability of dropping out a partition.
298
+ """
299
+
300
+ def __init__(
301
+ self,
302
+ in_channels: int,
303
+ head_dim: int,
304
+ # partitioning parameters
305
+ partition_size: int,
306
+ partition_type: str,
307
+ # grid size needs to be known at initialization time
308
+ # because we need to know hamy relative offsets there are in the grid
309
+ grid_size: Tuple[int, int],
310
+ mlp_ratio: int,
311
+ activation_layer: Callable[..., nn.Module],
312
+ norm_layer: Callable[..., nn.Module],
313
+ attention_dropout: float,
314
+ mlp_dropout: float,
315
+ p_stochastic_dropout: float,
316
+ ) -> None:
317
+ super().__init__()
318
+
319
+ self.n_heads = in_channels // head_dim
320
+ self.head_dim = head_dim
321
+ self.n_partitions = grid_size[0] // partition_size
322
+ self.partition_type = partition_type
323
+ self.grid_size = grid_size
324
+
325
+ if partition_type not in ["grid", "window"]:
326
+ raise ValueError("partition_type must be either 'grid' or 'window'")
327
+
328
+ if partition_type == "window":
329
+ self.p, self.g = partition_size, self.n_partitions
330
+ else:
331
+ self.p, self.g = self.n_partitions, partition_size
332
+
333
+ self.partition_op = WindowPartition()
334
+ self.departition_op = WindowDepartition()
335
+ self.partition_swap = SwapAxes(-2, -3) if partition_type == "grid" else nn.Identity()
336
+ self.departition_swap = SwapAxes(-2, -3) if partition_type == "grid" else nn.Identity()
337
+
338
+ self.attn_layer = nn.Sequential(
339
+ norm_layer(in_channels),
340
+ # it's always going to be partition_size ** 2 because
341
+ # of the axis swap in the case of grid partitioning
342
+ RelativePositionalMultiHeadAttention(in_channels, head_dim, partition_size**2),
343
+ nn.Dropout(attention_dropout),
344
+ )
345
+
346
+ # pre-normalization similar to transformer layers
347
+ self.mlp_layer = nn.Sequential(
348
+ nn.LayerNorm(in_channels),
349
+ nn.Linear(in_channels, in_channels * mlp_ratio),
350
+ activation_layer(),
351
+ nn.Linear(in_channels * mlp_ratio, in_channels),
352
+ nn.Dropout(mlp_dropout),
353
+ )
354
+
355
+ # layer scale factors
356
+ self.stochastic_dropout = StochasticDepth(p_stochastic_dropout, mode="row")
357
+
358
+ def forward(self, x: Tensor) -> Tensor:
359
+ """
360
+ Args:
361
+ x (Tensor): Input tensor with expected layout of [B, C, H, W].
362
+ Returns:
363
+ Tensor: Output tensor with expected layout of [B, C, H, W].
364
+ """
365
+
366
+ # Undefined behavior if H or W are not divisible by p
367
+ # https://github.com/google-research/maxvit/blob/da76cf0d8a6ec668cc31b399c4126186da7da944/maxvit/models/maxvit.py#L766
368
+ gh, gw = self.grid_size[0] // self.p, self.grid_size[1] // self.p
369
+ torch._assert(
370
+ self.grid_size[0] % self.p == 0 and self.grid_size[1] % self.p == 0,
371
+ "Grid size must be divisible by partition size. Got grid size of {} and partition size of {}".format(
372
+ self.grid_size, self.p
373
+ ),
374
+ )
375
+
376
+ x = self.partition_op(x, self.p)
377
+ x = self.partition_swap(x)
378
+ x = x + self.stochastic_dropout(self.attn_layer(x))
379
+ x = x + self.stochastic_dropout(self.mlp_layer(x))
380
+ x = self.departition_swap(x)
381
+ x = self.departition_op(x, self.p, gh, gw)
382
+
383
+ return x
384
+
385
+
386
+ class MaxVitLayer(nn.Module):
387
+ """
388
+ MaxVit layer consisting of a MBConv layer followed by a PartitionAttentionLayer with `window` and a PartitionAttentionLayer with `grid`.
389
+
390
+ Args:
391
+ in_channels (int): Number of input channels.
392
+ out_channels (int): Number of output channels.
393
+ expansion_ratio (float): Expansion ratio in the bottleneck.
394
+ squeeze_ratio (float): Squeeze ratio in the SE Layer.
395
+ stride (int): Stride of the depthwise convolution.
396
+ activation_layer (Callable[..., nn.Module]): Activation function.
397
+ norm_layer (Callable[..., nn.Module]): Normalization function.
398
+ head_dim (int): Dimension of the attention heads.
399
+ mlp_ratio (int): Ratio of the MLP layer.
400
+ mlp_dropout (float): Dropout probability for the MLP layer.
401
+ attention_dropout (float): Dropout probability for the attention layer.
402
+ p_stochastic_dropout (float): Probability of stochastic depth.
403
+ partition_size (int): Size of the partitions.
404
+ grid_size (Tuple[int, int]): Size of the input feature grid.
405
+ """
406
+
407
+ def __init__(
408
+ self,
409
+ # conv parameters
410
+ in_channels: int,
411
+ out_channels: int,
412
+ squeeze_ratio: float,
413
+ expansion_ratio: float,
414
+ stride: int,
415
+ # conv + transformer parameters
416
+ norm_layer: Callable[..., nn.Module],
417
+ activation_layer: Callable[..., nn.Module],
418
+ # transformer parameters
419
+ head_dim: int,
420
+ mlp_ratio: int,
421
+ mlp_dropout: float,
422
+ attention_dropout: float,
423
+ p_stochastic_dropout: float,
424
+ # partitioning parameters
425
+ partition_size: int,
426
+ grid_size: Tuple[int, int],
427
+ ) -> None:
428
+ super().__init__()
429
+
430
+ layers: OrderedDict = OrderedDict()
431
+
432
+ # convolutional layer
433
+ layers["MBconv"] = MBConv(
434
+ in_channels=in_channels,
435
+ out_channels=out_channels,
436
+ expansion_ratio=expansion_ratio,
437
+ squeeze_ratio=squeeze_ratio,
438
+ stride=stride,
439
+ activation_layer=activation_layer,
440
+ norm_layer=norm_layer,
441
+ p_stochastic_dropout=p_stochastic_dropout,
442
+ )
443
+ # attention layers, block -> grid
444
+ layers["window_attention"] = PartitionAttentionLayer(
445
+ in_channels=out_channels,
446
+ head_dim=head_dim,
447
+ partition_size=partition_size,
448
+ partition_type="window",
449
+ grid_size=grid_size,
450
+ mlp_ratio=mlp_ratio,
451
+ activation_layer=activation_layer,
452
+ norm_layer=nn.LayerNorm,
453
+ attention_dropout=attention_dropout,
454
+ mlp_dropout=mlp_dropout,
455
+ p_stochastic_dropout=p_stochastic_dropout,
456
+ )
457
+ layers["grid_attention"] = PartitionAttentionLayer(
458
+ in_channels=out_channels,
459
+ head_dim=head_dim,
460
+ partition_size=partition_size,
461
+ partition_type="grid",
462
+ grid_size=grid_size,
463
+ mlp_ratio=mlp_ratio,
464
+ activation_layer=activation_layer,
465
+ norm_layer=nn.LayerNorm,
466
+ attention_dropout=attention_dropout,
467
+ mlp_dropout=mlp_dropout,
468
+ p_stochastic_dropout=p_stochastic_dropout,
469
+ )
470
+ self.layers = nn.Sequential(layers)
471
+
472
+ def forward(self, x: Tensor) -> Tensor:
473
+ """
474
+ Args:
475
+ x (Tensor): Input tensor of shape (B, C, H, W).
476
+ Returns:
477
+ Tensor: Output tensor of shape (B, C, H, W).
478
+ """
479
+ x = self.layers(x)
480
+ return x
481
+
482
+
483
+ class MaxVitBlock(nn.Module):
484
+ """
485
+ A MaxVit block consisting of `n_layers` MaxVit layers.
486
+
487
+ Args:
488
+ in_channels (int): Number of input channels.
489
+ out_channels (int): Number of output channels.
490
+ expansion_ratio (float): Expansion ratio in the bottleneck.
491
+ squeeze_ratio (float): Squeeze ratio in the SE Layer.
492
+ activation_layer (Callable[..., nn.Module]): Activation function.
493
+ norm_layer (Callable[..., nn.Module]): Normalization function.
494
+ head_dim (int): Dimension of the attention heads.
495
+ mlp_ratio (int): Ratio of the MLP layer.
496
+ mlp_dropout (float): Dropout probability for the MLP layer.
497
+ attention_dropout (float): Dropout probability for the attention layer.
498
+ p_stochastic_dropout (float): Probability of stochastic depth.
499
+ partition_size (int): Size of the partitions.
500
+ input_grid_size (Tuple[int, int]): Size of the input feature grid.
501
+ n_layers (int): Number of layers in the block.
502
+ p_stochastic (List[float]): List of probabilities for stochastic depth for each layer.
503
+ """
504
+
505
+ def __init__(
506
+ self,
507
+ # conv parameters
508
+ in_channels: int,
509
+ out_channels: int,
510
+ squeeze_ratio: float,
511
+ expansion_ratio: float,
512
+ # conv + transformer parameters
513
+ norm_layer: Callable[..., nn.Module],
514
+ activation_layer: Callable[..., nn.Module],
515
+ # transformer parameters
516
+ head_dim: int,
517
+ mlp_ratio: int,
518
+ mlp_dropout: float,
519
+ attention_dropout: float,
520
+ # partitioning parameters
521
+ partition_size: int,
522
+ input_grid_size: Tuple[int, int],
523
+ # number of layers
524
+ n_layers: int,
525
+ p_stochastic: List[float],
526
+ ) -> None:
527
+ super().__init__()
528
+ if not len(p_stochastic) == n_layers:
529
+ raise ValueError(f"p_stochastic must have length n_layers={n_layers}, got p_stochastic={p_stochastic}.")
530
+
531
+ self.layers = nn.ModuleList()
532
+ # account for the first stride of the first layer
533
+ self.grid_size = _get_conv_output_shape(input_grid_size, kernel_size=3, stride=2, padding=1)
534
+
535
+ for idx, p in enumerate(p_stochastic):
536
+ stride = 2 if idx == 0 else 1
537
+ self.layers += [
538
+ MaxVitLayer(
539
+ in_channels=in_channels if idx == 0 else out_channels,
540
+ out_channels=out_channels,
541
+ squeeze_ratio=squeeze_ratio,
542
+ expansion_ratio=expansion_ratio,
543
+ stride=stride,
544
+ norm_layer=norm_layer,
545
+ activation_layer=activation_layer,
546
+ head_dim=head_dim,
547
+ mlp_ratio=mlp_ratio,
548
+ mlp_dropout=mlp_dropout,
549
+ attention_dropout=attention_dropout,
550
+ partition_size=partition_size,
551
+ grid_size=self.grid_size,
552
+ p_stochastic_dropout=p,
553
+ ),
554
+ ]
555
+
556
+ def forward(self, x: Tensor) -> Tensor:
557
+ """
558
+ Args:
559
+ x (Tensor): Input tensor of shape (B, C, H, W).
560
+ Returns:
561
+ Tensor: Output tensor of shape (B, C, H, W).
562
+ """
563
+ for layer in self.layers:
564
+ x = layer(x)
565
+ return x
566
+
567
+
568
+ class MaxVit(nn.Module):
569
+ """
570
+ Implements MaxVit Transformer from the `MaxViT: Multi-Axis Vision Transformer <https://arxiv.org/abs/2204.01697>`_ paper.
571
+ Args:
572
+ input_size (Tuple[int, int]): Size of the input image.
573
+ stem_channels (int): Number of channels in the stem.
574
+ partition_size (int): Size of the partitions.
575
+ block_channels (List[int]): Number of channels in each block.
576
+ block_layers (List[int]): Number of layers in each block.
577
+ stochastic_depth_prob (float): Probability of stochastic depth. Expands to a list of probabilities for each layer that scales linearly to the specified value.
578
+ squeeze_ratio (float): Squeeze ratio in the SE Layer. Default: 0.25.
579
+ expansion_ratio (float): Expansion ratio in the MBConv bottleneck. Default: 4.
580
+ norm_layer (Callable[..., nn.Module]): Normalization function. Default: None (setting to None will produce a `BatchNorm2d(eps=1e-3, momentum=0.01)`).
581
+ activation_layer (Callable[..., nn.Module]): Activation function Default: nn.GELU.
582
+ head_dim (int): Dimension of the attention heads.
583
+ mlp_ratio (int): Expansion ratio of the MLP layer. Default: 4.
584
+ mlp_dropout (float): Dropout probability for the MLP layer. Default: 0.0.
585
+ attention_dropout (float): Dropout probability for the attention layer. Default: 0.0.
586
+ num_classes (int): Number of classes. Default: 1000.
587
+ """
588
+
589
+ def __init__(
590
+ self,
591
+ # input size parameters
592
+ input_size: Tuple[int, int],
593
+ # stem and task parameters
594
+ stem_channels: int,
595
+ # partitioning parameters
596
+ partition_size: int,
597
+ # block parameters
598
+ block_channels: List[int],
599
+ block_layers: List[int],
600
+ # attention head dimensions
601
+ head_dim: int,
602
+ stochastic_depth_prob: float,
603
+ # conv + transformer parameters
604
+ # norm_layer is applied only to the conv layers
605
+ # activation_layer is applied both to conv and transformer layers
606
+ norm_layer: Optional[Callable[..., nn.Module]] = None,
607
+ activation_layer: Callable[..., nn.Module] = nn.GELU,
608
+ # conv parameters
609
+ squeeze_ratio: float = 0.25,
610
+ expansion_ratio: float = 4,
611
+ # transformer parameters
612
+ mlp_ratio: int = 4,
613
+ mlp_dropout: float = 0.0,
614
+ attention_dropout: float = 0.0,
615
+ # task parameters
616
+ num_classes: int = 1000,
617
+ ) -> None:
618
+ super().__init__()
619
+ _log_api_usage_once(self)
620
+
621
+ input_channels = 3
622
+
623
+ # https://github.com/google-research/maxvit/blob/da76cf0d8a6ec668cc31b399c4126186da7da944/maxvit/models/maxvit.py#L1029-L1030
624
+ # for the exact parameters used in batchnorm
625
+ if norm_layer is None:
626
+ norm_layer = partial(nn.BatchNorm2d, eps=1e-3, momentum=0.01)
627
+
628
+ # Make sure input size will be divisible by the partition size in all blocks
629
+ # Undefined behavior if H or W are not divisible by p
630
+ # https://github.com/google-research/maxvit/blob/da76cf0d8a6ec668cc31b399c4126186da7da944/maxvit/models/maxvit.py#L766
631
+ block_input_sizes = _make_block_input_shapes(input_size, len(block_channels))
632
+ for idx, block_input_size in enumerate(block_input_sizes):
633
+ if block_input_size[0] % partition_size != 0 or block_input_size[1] % partition_size != 0:
634
+ raise ValueError(
635
+ f"Input size {block_input_size} of block {idx} is not divisible by partition size {partition_size}. "
636
+ f"Consider changing the partition size or the input size.\n"
637
+ f"Current configuration yields the following block input sizes: {block_input_sizes}."
638
+ )
639
+
640
+ # stem
641
+ self.stem = nn.Sequential(
642
+ Conv2dNormActivation(
643
+ input_channels,
644
+ stem_channels,
645
+ 3,
646
+ stride=2,
647
+ norm_layer=norm_layer,
648
+ activation_layer=activation_layer,
649
+ bias=False,
650
+ inplace=None,
651
+ ),
652
+ Conv2dNormActivation(
653
+ stem_channels, stem_channels, 3, stride=1, norm_layer=None, activation_layer=None, bias=True
654
+ ),
655
+ )
656
+
657
+ # account for stem stride
658
+ input_size = _get_conv_output_shape(input_size, kernel_size=3, stride=2, padding=1)
659
+ self.partition_size = partition_size
660
+
661
+ # blocks
662
+ self.blocks = nn.ModuleList()
663
+ in_channels = [stem_channels] + block_channels[:-1]
664
+ out_channels = block_channels
665
+
666
+ # precompute the stochastich depth probabilities from 0 to stochastic_depth_prob
667
+ # since we have N blocks with L layers, we will have N * L probabilities uniformly distributed
668
+ # over the range [0, stochastic_depth_prob]
669
+ p_stochastic = np.linspace(0, stochastic_depth_prob, sum(block_layers)).tolist()
670
+
671
+ p_idx = 0
672
+ for in_channel, out_channel, num_layers in zip(in_channels, out_channels, block_layers):
673
+ self.blocks.append(
674
+ MaxVitBlock(
675
+ in_channels=in_channel,
676
+ out_channels=out_channel,
677
+ squeeze_ratio=squeeze_ratio,
678
+ expansion_ratio=expansion_ratio,
679
+ norm_layer=norm_layer,
680
+ activation_layer=activation_layer,
681
+ head_dim=head_dim,
682
+ mlp_ratio=mlp_ratio,
683
+ mlp_dropout=mlp_dropout,
684
+ attention_dropout=attention_dropout,
685
+ partition_size=partition_size,
686
+ input_grid_size=input_size,
687
+ n_layers=num_layers,
688
+ p_stochastic=p_stochastic[p_idx : p_idx + num_layers],
689
+ ),
690
+ )
691
+ input_size = self.blocks[-1].grid_size
692
+ p_idx += num_layers
693
+
694
+ # see https://github.com/google-research/maxvit/blob/da76cf0d8a6ec668cc31b399c4126186da7da944/maxvit/models/maxvit.py#L1137-L1158
695
+ # for why there is Linear -> Tanh -> Linear
696
+ self.classifier = nn.Sequential(
697
+ nn.AdaptiveAvgPool2d(1),
698
+ nn.Flatten(),
699
+ nn.LayerNorm(block_channels[-1]),
700
+ nn.Linear(block_channels[-1], block_channels[-1]),
701
+ nn.Tanh(),
702
+ nn.Linear(block_channels[-1], num_classes, bias=False),
703
+ )
704
+
705
+ self._init_weights()
706
+
707
+ def forward(self, x: Tensor) -> Tensor:
708
+ x = self.stem(x)
709
+ for block in self.blocks:
710
+ x = block(x)
711
+ x = self.classifier(x)
712
+ return x
713
+
714
+ def _init_weights(self):
715
+ for m in self.modules():
716
+ if isinstance(m, nn.Conv2d):
717
+ nn.init.normal_(m.weight, std=0.02)
718
+ if m.bias is not None:
719
+ nn.init.zeros_(m.bias)
720
+ elif isinstance(m, nn.BatchNorm2d):
721
+ nn.init.constant_(m.weight, 1)
722
+ nn.init.constant_(m.bias, 0)
723
+ elif isinstance(m, nn.Linear):
724
+ nn.init.normal_(m.weight, std=0.02)
725
+ if m.bias is not None:
726
+ nn.init.zeros_(m.bias)
727
+
728
+
729
+ def _maxvit(
730
+ # stem parameters
731
+ stem_channels: int,
732
+ # block parameters
733
+ block_channels: List[int],
734
+ block_layers: List[int],
735
+ stochastic_depth_prob: float,
736
+ # partitioning parameters
737
+ partition_size: int,
738
+ # transformer parameters
739
+ head_dim: int,
740
+ # Weights API
741
+ weights: Optional[WeightsEnum] = None,
742
+ progress: bool = False,
743
+ # kwargs,
744
+ **kwargs: Any,
745
+ ) -> MaxVit:
746
+
747
+ if weights is not None:
748
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
749
+ assert weights.meta["min_size"][0] == weights.meta["min_size"][1]
750
+ _ovewrite_named_param(kwargs, "input_size", weights.meta["min_size"])
751
+
752
+ input_size = kwargs.pop("input_size", (224, 224))
753
+
754
+ model = MaxVit(
755
+ stem_channels=stem_channels,
756
+ block_channels=block_channels,
757
+ block_layers=block_layers,
758
+ stochastic_depth_prob=stochastic_depth_prob,
759
+ head_dim=head_dim,
760
+ partition_size=partition_size,
761
+ input_size=input_size,
762
+ **kwargs,
763
+ )
764
+
765
+ if weights is not None:
766
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
767
+
768
+ return model
769
+
770
+
771
+ class MaxVit_T_Weights(WeightsEnum):
772
+ IMAGENET1K_V1 = Weights(
773
+ # URL empty until official release
774
+ url="https://download.pytorch.org/models/maxvit_t-bc5ab103.pth",
775
+ transforms=partial(
776
+ ImageClassification, crop_size=224, resize_size=224, interpolation=InterpolationMode.BICUBIC
777
+ ),
778
+ meta={
779
+ "categories": _IMAGENET_CATEGORIES,
780
+ "num_params": 30919624,
781
+ "min_size": (224, 224),
782
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#maxvit",
783
+ "_metrics": {
784
+ "ImageNet-1K": {
785
+ "acc@1": 83.700,
786
+ "acc@5": 96.722,
787
+ }
788
+ },
789
+ "_ops": 5.558,
790
+ "_file_size": 118.769,
791
+ "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.
792
+ They were trained with a BatchNorm2D momentum of 0.99 instead of the more correct 0.01.""",
793
+ },
794
+ )
795
+ DEFAULT = IMAGENET1K_V1
796
+
797
+
798
+ @register_model()
799
+ @handle_legacy_interface(weights=("pretrained", MaxVit_T_Weights.IMAGENET1K_V1))
800
+ def maxvit_t(*, weights: Optional[MaxVit_T_Weights] = None, progress: bool = True, **kwargs: Any) -> MaxVit:
801
+ """
802
+ Constructs a maxvit_t architecture from
803
+ `MaxViT: Multi-Axis Vision Transformer <https://arxiv.org/abs/2204.01697>`_.
804
+
805
+ Args:
806
+ weights (:class:`~torchvision.models.MaxVit_T_Weights`, optional): The
807
+ pretrained weights to use. See
808
+ :class:`~torchvision.models.MaxVit_T_Weights` below for
809
+ more details, and possible values. By default, no pre-trained
810
+ weights are used.
811
+ progress (bool, optional): If True, displays a progress bar of the
812
+ download to stderr. Default is True.
813
+ **kwargs: parameters passed to the ``torchvision.models.maxvit.MaxVit``
814
+ base class. Please refer to the `source code
815
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/maxvit.py>`_
816
+ for more details about this class.
817
+
818
+ .. autoclass:: torchvision.models.MaxVit_T_Weights
819
+ :members:
820
+ """
821
+ weights = MaxVit_T_Weights.verify(weights)
822
+
823
+ return _maxvit(
824
+ stem_channels=64,
825
+ block_channels=[64, 128, 256, 512],
826
+ block_layers=[2, 2, 5, 2],
827
+ head_dim=32,
828
+ stochastic_depth_prob=0.2,
829
+ partition_size=7,
830
+ weights=weights,
831
+ progress=progress,
832
+ **kwargs,
833
+ )
pllava/lib/python3.10/site-packages/torchvision/models/mnasnet.py ADDED
@@ -0,0 +1,434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from functools import partial
3
+ from typing import Any, Dict, List, Optional
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ from torch import Tensor
8
+
9
+ from ..transforms._presets import ImageClassification
10
+ from ..utils import _log_api_usage_once
11
+ from ._api import register_model, Weights, WeightsEnum
12
+ from ._meta import _IMAGENET_CATEGORIES
13
+ from ._utils import _ovewrite_named_param, handle_legacy_interface
14
+
15
+
16
+ __all__ = [
17
+ "MNASNet",
18
+ "MNASNet0_5_Weights",
19
+ "MNASNet0_75_Weights",
20
+ "MNASNet1_0_Weights",
21
+ "MNASNet1_3_Weights",
22
+ "mnasnet0_5",
23
+ "mnasnet0_75",
24
+ "mnasnet1_0",
25
+ "mnasnet1_3",
26
+ ]
27
+
28
+
29
+ # Paper suggests 0.9997 momentum, for TensorFlow. Equivalent PyTorch momentum is
30
+ # 1.0 - tensorflow.
31
+ _BN_MOMENTUM = 1 - 0.9997
32
+
33
+
34
+ class _InvertedResidual(nn.Module):
35
+ def __init__(
36
+ self, in_ch: int, out_ch: int, kernel_size: int, stride: int, expansion_factor: int, bn_momentum: float = 0.1
37
+ ) -> None:
38
+ super().__init__()
39
+ if stride not in [1, 2]:
40
+ raise ValueError(f"stride should be 1 or 2 instead of {stride}")
41
+ if kernel_size not in [3, 5]:
42
+ raise ValueError(f"kernel_size should be 3 or 5 instead of {kernel_size}")
43
+ mid_ch = in_ch * expansion_factor
44
+ self.apply_residual = in_ch == out_ch and stride == 1
45
+ self.layers = nn.Sequential(
46
+ # Pointwise
47
+ nn.Conv2d(in_ch, mid_ch, 1, bias=False),
48
+ nn.BatchNorm2d(mid_ch, momentum=bn_momentum),
49
+ nn.ReLU(inplace=True),
50
+ # Depthwise
51
+ nn.Conv2d(mid_ch, mid_ch, kernel_size, padding=kernel_size // 2, stride=stride, groups=mid_ch, bias=False),
52
+ nn.BatchNorm2d(mid_ch, momentum=bn_momentum),
53
+ nn.ReLU(inplace=True),
54
+ # Linear pointwise. Note that there's no activation.
55
+ nn.Conv2d(mid_ch, out_ch, 1, bias=False),
56
+ nn.BatchNorm2d(out_ch, momentum=bn_momentum),
57
+ )
58
+
59
+ def forward(self, input: Tensor) -> Tensor:
60
+ if self.apply_residual:
61
+ return self.layers(input) + input
62
+ else:
63
+ return self.layers(input)
64
+
65
+
66
+ def _stack(
67
+ in_ch: int, out_ch: int, kernel_size: int, stride: int, exp_factor: int, repeats: int, bn_momentum: float
68
+ ) -> nn.Sequential:
69
+ """Creates a stack of inverted residuals."""
70
+ if repeats < 1:
71
+ raise ValueError(f"repeats should be >= 1, instead got {repeats}")
72
+ # First one has no skip, because feature map size changes.
73
+ first = _InvertedResidual(in_ch, out_ch, kernel_size, stride, exp_factor, bn_momentum=bn_momentum)
74
+ remaining = []
75
+ for _ in range(1, repeats):
76
+ remaining.append(_InvertedResidual(out_ch, out_ch, kernel_size, 1, exp_factor, bn_momentum=bn_momentum))
77
+ return nn.Sequential(first, *remaining)
78
+
79
+
80
+ def _round_to_multiple_of(val: float, divisor: int, round_up_bias: float = 0.9) -> int:
81
+ """Asymmetric rounding to make `val` divisible by `divisor`. With default
82
+ bias, will round up, unless the number is no more than 10% greater than the
83
+ smaller divisible value, i.e. (83, 8) -> 80, but (84, 8) -> 88."""
84
+ if not 0.0 < round_up_bias < 1.0:
85
+ raise ValueError(f"round_up_bias should be greater than 0.0 and smaller than 1.0 instead of {round_up_bias}")
86
+ new_val = max(divisor, int(val + divisor / 2) // divisor * divisor)
87
+ return new_val if new_val >= round_up_bias * val else new_val + divisor
88
+
89
+
90
+ def _get_depths(alpha: float) -> List[int]:
91
+ """Scales tensor depths as in reference MobileNet code, prefers rounding up
92
+ rather than down."""
93
+ depths = [32, 16, 24, 40, 80, 96, 192, 320]
94
+ return [_round_to_multiple_of(depth * alpha, 8) for depth in depths]
95
+
96
+
97
+ class MNASNet(torch.nn.Module):
98
+ """MNASNet, as described in https://arxiv.org/abs/1807.11626. This
99
+ implements the B1 variant of the model.
100
+ >>> model = MNASNet(1.0, num_classes=1000)
101
+ >>> x = torch.rand(1, 3, 224, 224)
102
+ >>> y = model(x)
103
+ >>> y.dim()
104
+ 2
105
+ >>> y.nelement()
106
+ 1000
107
+ """
108
+
109
+ # Version 2 adds depth scaling in the initial stages of the network.
110
+ _version = 2
111
+
112
+ def __init__(self, alpha: float, num_classes: int = 1000, dropout: float = 0.2) -> None:
113
+ super().__init__()
114
+ _log_api_usage_once(self)
115
+ if alpha <= 0.0:
116
+ raise ValueError(f"alpha should be greater than 0.0 instead of {alpha}")
117
+ self.alpha = alpha
118
+ self.num_classes = num_classes
119
+ depths = _get_depths(alpha)
120
+ layers = [
121
+ # First layer: regular conv.
122
+ nn.Conv2d(3, depths[0], 3, padding=1, stride=2, bias=False),
123
+ nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM),
124
+ nn.ReLU(inplace=True),
125
+ # Depthwise separable, no skip.
126
+ nn.Conv2d(depths[0], depths[0], 3, padding=1, stride=1, groups=depths[0], bias=False),
127
+ nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM),
128
+ nn.ReLU(inplace=True),
129
+ nn.Conv2d(depths[0], depths[1], 1, padding=0, stride=1, bias=False),
130
+ nn.BatchNorm2d(depths[1], momentum=_BN_MOMENTUM),
131
+ # MNASNet blocks: stacks of inverted residuals.
132
+ _stack(depths[1], depths[2], 3, 2, 3, 3, _BN_MOMENTUM),
133
+ _stack(depths[2], depths[3], 5, 2, 3, 3, _BN_MOMENTUM),
134
+ _stack(depths[3], depths[4], 5, 2, 6, 3, _BN_MOMENTUM),
135
+ _stack(depths[4], depths[5], 3, 1, 6, 2, _BN_MOMENTUM),
136
+ _stack(depths[5], depths[6], 5, 2, 6, 4, _BN_MOMENTUM),
137
+ _stack(depths[6], depths[7], 3, 1, 6, 1, _BN_MOMENTUM),
138
+ # Final mapping to classifier input.
139
+ nn.Conv2d(depths[7], 1280, 1, padding=0, stride=1, bias=False),
140
+ nn.BatchNorm2d(1280, momentum=_BN_MOMENTUM),
141
+ nn.ReLU(inplace=True),
142
+ ]
143
+ self.layers = nn.Sequential(*layers)
144
+ self.classifier = nn.Sequential(nn.Dropout(p=dropout, inplace=True), nn.Linear(1280, num_classes))
145
+
146
+ for m in self.modules():
147
+ if isinstance(m, nn.Conv2d):
148
+ nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
149
+ if m.bias is not None:
150
+ nn.init.zeros_(m.bias)
151
+ elif isinstance(m, nn.BatchNorm2d):
152
+ nn.init.ones_(m.weight)
153
+ nn.init.zeros_(m.bias)
154
+ elif isinstance(m, nn.Linear):
155
+ nn.init.kaiming_uniform_(m.weight, mode="fan_out", nonlinearity="sigmoid")
156
+ nn.init.zeros_(m.bias)
157
+
158
+ def forward(self, x: Tensor) -> Tensor:
159
+ x = self.layers(x)
160
+ # Equivalent to global avgpool and removing H and W dimensions.
161
+ x = x.mean([2, 3])
162
+ return self.classifier(x)
163
+
164
+ def _load_from_state_dict(
165
+ self,
166
+ state_dict: Dict,
167
+ prefix: str,
168
+ local_metadata: Dict,
169
+ strict: bool,
170
+ missing_keys: List[str],
171
+ unexpected_keys: List[str],
172
+ error_msgs: List[str],
173
+ ) -> None:
174
+ version = local_metadata.get("version", None)
175
+ if version not in [1, 2]:
176
+ raise ValueError(f"version shluld be set to 1 or 2 instead of {version}")
177
+
178
+ if version == 1 and not self.alpha == 1.0:
179
+ # In the initial version of the model (v1), stem was fixed-size.
180
+ # All other layer configurations were the same. This will patch
181
+ # the model so that it's identical to v1. Model with alpha 1.0 is
182
+ # unaffected.
183
+ depths = _get_depths(self.alpha)
184
+ v1_stem = [
185
+ nn.Conv2d(3, 32, 3, padding=1, stride=2, bias=False),
186
+ nn.BatchNorm2d(32, momentum=_BN_MOMENTUM),
187
+ nn.ReLU(inplace=True),
188
+ nn.Conv2d(32, 32, 3, padding=1, stride=1, groups=32, bias=False),
189
+ nn.BatchNorm2d(32, momentum=_BN_MOMENTUM),
190
+ nn.ReLU(inplace=True),
191
+ nn.Conv2d(32, 16, 1, padding=0, stride=1, bias=False),
192
+ nn.BatchNorm2d(16, momentum=_BN_MOMENTUM),
193
+ _stack(16, depths[2], 3, 2, 3, 3, _BN_MOMENTUM),
194
+ ]
195
+ for idx, layer in enumerate(v1_stem):
196
+ self.layers[idx] = layer
197
+
198
+ # The model is now identical to v1, and must be saved as such.
199
+ self._version = 1
200
+ warnings.warn(
201
+ "A new version of MNASNet model has been implemented. "
202
+ "Your checkpoint was saved using the previous version. "
203
+ "This checkpoint will load and work as before, but "
204
+ "you may want to upgrade by training a newer model or "
205
+ "transfer learning from an updated ImageNet checkpoint.",
206
+ UserWarning,
207
+ )
208
+
209
+ super()._load_from_state_dict(
210
+ state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
211
+ )
212
+
213
+
214
+ _COMMON_META = {
215
+ "min_size": (1, 1),
216
+ "categories": _IMAGENET_CATEGORIES,
217
+ "recipe": "https://github.com/1e100/mnasnet_trainer",
218
+ }
219
+
220
+
221
+ class MNASNet0_5_Weights(WeightsEnum):
222
+ IMAGENET1K_V1 = Weights(
223
+ url="https://download.pytorch.org/models/mnasnet0.5_top1_67.823-3ffadce67e.pth",
224
+ transforms=partial(ImageClassification, crop_size=224),
225
+ meta={
226
+ **_COMMON_META,
227
+ "num_params": 2218512,
228
+ "_metrics": {
229
+ "ImageNet-1K": {
230
+ "acc@1": 67.734,
231
+ "acc@5": 87.490,
232
+ }
233
+ },
234
+ "_ops": 0.104,
235
+ "_file_size": 8.591,
236
+ "_docs": """These weights reproduce closely the results of the paper.""",
237
+ },
238
+ )
239
+ DEFAULT = IMAGENET1K_V1
240
+
241
+
242
+ class MNASNet0_75_Weights(WeightsEnum):
243
+ IMAGENET1K_V1 = Weights(
244
+ url="https://download.pytorch.org/models/mnasnet0_75-7090bc5f.pth",
245
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
246
+ meta={
247
+ **_COMMON_META,
248
+ "recipe": "https://github.com/pytorch/vision/pull/6019",
249
+ "num_params": 3170208,
250
+ "_metrics": {
251
+ "ImageNet-1K": {
252
+ "acc@1": 71.180,
253
+ "acc@5": 90.496,
254
+ }
255
+ },
256
+ "_ops": 0.215,
257
+ "_file_size": 12.303,
258
+ "_docs": """
259
+ These weights were trained from scratch by using TorchVision's `new training recipe
260
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
261
+ """,
262
+ },
263
+ )
264
+ DEFAULT = IMAGENET1K_V1
265
+
266
+
267
+ class MNASNet1_0_Weights(WeightsEnum):
268
+ IMAGENET1K_V1 = Weights(
269
+ url="https://download.pytorch.org/models/mnasnet1.0_top1_73.512-f206786ef8.pth",
270
+ transforms=partial(ImageClassification, crop_size=224),
271
+ meta={
272
+ **_COMMON_META,
273
+ "num_params": 4383312,
274
+ "_metrics": {
275
+ "ImageNet-1K": {
276
+ "acc@1": 73.456,
277
+ "acc@5": 91.510,
278
+ }
279
+ },
280
+ "_ops": 0.314,
281
+ "_file_size": 16.915,
282
+ "_docs": """These weights reproduce closely the results of the paper.""",
283
+ },
284
+ )
285
+ DEFAULT = IMAGENET1K_V1
286
+
287
+
288
+ class MNASNet1_3_Weights(WeightsEnum):
289
+ IMAGENET1K_V1 = Weights(
290
+ url="https://download.pytorch.org/models/mnasnet1_3-a4c69d6f.pth",
291
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
292
+ meta={
293
+ **_COMMON_META,
294
+ "recipe": "https://github.com/pytorch/vision/pull/6019",
295
+ "num_params": 6282256,
296
+ "_metrics": {
297
+ "ImageNet-1K": {
298
+ "acc@1": 76.506,
299
+ "acc@5": 93.522,
300
+ }
301
+ },
302
+ "_ops": 0.526,
303
+ "_file_size": 24.246,
304
+ "_docs": """
305
+ These weights were trained from scratch by using TorchVision's `new training recipe
306
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
307
+ """,
308
+ },
309
+ )
310
+ DEFAULT = IMAGENET1K_V1
311
+
312
+
313
+ def _mnasnet(alpha: float, weights: Optional[WeightsEnum], progress: bool, **kwargs: Any) -> MNASNet:
314
+ if weights is not None:
315
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
316
+
317
+ model = MNASNet(alpha, **kwargs)
318
+
319
+ if weights:
320
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
321
+
322
+ return model
323
+
324
+
325
+ @register_model()
326
+ @handle_legacy_interface(weights=("pretrained", MNASNet0_5_Weights.IMAGENET1K_V1))
327
+ def mnasnet0_5(*, weights: Optional[MNASNet0_5_Weights] = None, progress: bool = True, **kwargs: Any) -> MNASNet:
328
+ """MNASNet with depth multiplier of 0.5 from
329
+ `MnasNet: Platform-Aware Neural Architecture Search for Mobile
330
+ <https://arxiv.org/abs/1807.11626>`_ paper.
331
+
332
+ Args:
333
+ weights (:class:`~torchvision.models.MNASNet0_5_Weights`, optional): The
334
+ pretrained weights to use. See
335
+ :class:`~torchvision.models.MNASNet0_5_Weights` below for
336
+ more details, and possible values. By default, no pre-trained
337
+ weights are used.
338
+ progress (bool, optional): If True, displays a progress bar of the
339
+ download to stderr. Default is True.
340
+ **kwargs: parameters passed to the ``torchvision.models.mnasnet.MNASNet``
341
+ base class. Please refer to the `source code
342
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/mnasnet.py>`_
343
+ for more details about this class.
344
+
345
+ .. autoclass:: torchvision.models.MNASNet0_5_Weights
346
+ :members:
347
+ """
348
+ weights = MNASNet0_5_Weights.verify(weights)
349
+
350
+ return _mnasnet(0.5, weights, progress, **kwargs)
351
+
352
+
353
+ @register_model()
354
+ @handle_legacy_interface(weights=("pretrained", MNASNet0_75_Weights.IMAGENET1K_V1))
355
+ def mnasnet0_75(*, weights: Optional[MNASNet0_75_Weights] = None, progress: bool = True, **kwargs: Any) -> MNASNet:
356
+ """MNASNet with depth multiplier of 0.75 from
357
+ `MnasNet: Platform-Aware Neural Architecture Search for Mobile
358
+ <https://arxiv.org/abs/1807.11626>`_ paper.
359
+
360
+ Args:
361
+ weights (:class:`~torchvision.models.MNASNet0_75_Weights`, optional): The
362
+ pretrained weights to use. See
363
+ :class:`~torchvision.models.MNASNet0_75_Weights` below for
364
+ more details, and possible values. By default, no pre-trained
365
+ weights are used.
366
+ progress (bool, optional): If True, displays a progress bar of the
367
+ download to stderr. Default is True.
368
+ **kwargs: parameters passed to the ``torchvision.models.mnasnet.MNASNet``
369
+ base class. Please refer to the `source code
370
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/mnasnet.py>`_
371
+ for more details about this class.
372
+
373
+ .. autoclass:: torchvision.models.MNASNet0_75_Weights
374
+ :members:
375
+ """
376
+ weights = MNASNet0_75_Weights.verify(weights)
377
+
378
+ return _mnasnet(0.75, weights, progress, **kwargs)
379
+
380
+
381
+ @register_model()
382
+ @handle_legacy_interface(weights=("pretrained", MNASNet1_0_Weights.IMAGENET1K_V1))
383
+ def mnasnet1_0(*, weights: Optional[MNASNet1_0_Weights] = None, progress: bool = True, **kwargs: Any) -> MNASNet:
384
+ """MNASNet with depth multiplier of 1.0 from
385
+ `MnasNet: Platform-Aware Neural Architecture Search for Mobile
386
+ <https://arxiv.org/abs/1807.11626>`_ paper.
387
+
388
+ Args:
389
+ weights (:class:`~torchvision.models.MNASNet1_0_Weights`, optional): The
390
+ pretrained weights to use. See
391
+ :class:`~torchvision.models.MNASNet1_0_Weights` below for
392
+ more details, and possible values. By default, no pre-trained
393
+ weights are used.
394
+ progress (bool, optional): If True, displays a progress bar of the
395
+ download to stderr. Default is True.
396
+ **kwargs: parameters passed to the ``torchvision.models.mnasnet.MNASNet``
397
+ base class. Please refer to the `source code
398
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/mnasnet.py>`_
399
+ for more details about this class.
400
+
401
+ .. autoclass:: torchvision.models.MNASNet1_0_Weights
402
+ :members:
403
+ """
404
+ weights = MNASNet1_0_Weights.verify(weights)
405
+
406
+ return _mnasnet(1.0, weights, progress, **kwargs)
407
+
408
+
409
+ @register_model()
410
+ @handle_legacy_interface(weights=("pretrained", MNASNet1_3_Weights.IMAGENET1K_V1))
411
+ def mnasnet1_3(*, weights: Optional[MNASNet1_3_Weights] = None, progress: bool = True, **kwargs: Any) -> MNASNet:
412
+ """MNASNet with depth multiplier of 1.3 from
413
+ `MnasNet: Platform-Aware Neural Architecture Search for Mobile
414
+ <https://arxiv.org/abs/1807.11626>`_ paper.
415
+
416
+ Args:
417
+ weights (:class:`~torchvision.models.MNASNet1_3_Weights`, optional): The
418
+ pretrained weights to use. See
419
+ :class:`~torchvision.models.MNASNet1_3_Weights` below for
420
+ more details, and possible values. By default, no pre-trained
421
+ weights are used.
422
+ progress (bool, optional): If True, displays a progress bar of the
423
+ download to stderr. Default is True.
424
+ **kwargs: parameters passed to the ``torchvision.models.mnasnet.MNASNet``
425
+ base class. Please refer to the `source code
426
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/mnasnet.py>`_
427
+ for more details about this class.
428
+
429
+ .. autoclass:: torchvision.models.MNASNet1_3_Weights
430
+ :members:
431
+ """
432
+ weights = MNASNet1_3_Weights.verify(weights)
433
+
434
+ return _mnasnet(1.3, weights, progress, **kwargs)
pllava/lib/python3.10/site-packages/torchvision/models/mobilenet.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from .mobilenetv2 import * # noqa: F401, F403
2
+ from .mobilenetv3 import * # noqa: F401, F403
3
+ from .mobilenetv2 import __all__ as mv2_all
4
+ from .mobilenetv3 import __all__ as mv3_all
5
+
6
+ __all__ = mv2_all + mv3_all
pllava/lib/python3.10/site-packages/torchvision/models/mobilenetv3.py ADDED
@@ -0,0 +1,423 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from typing import Any, Callable, List, Optional, Sequence
3
+
4
+ import torch
5
+ from torch import nn, Tensor
6
+
7
+ from ..ops.misc import Conv2dNormActivation, SqueezeExcitation as SElayer
8
+ from ..transforms._presets import ImageClassification
9
+ from ..utils import _log_api_usage_once
10
+ from ._api import register_model, Weights, WeightsEnum
11
+ from ._meta import _IMAGENET_CATEGORIES
12
+ from ._utils import _make_divisible, _ovewrite_named_param, handle_legacy_interface
13
+
14
+
15
+ __all__ = [
16
+ "MobileNetV3",
17
+ "MobileNet_V3_Large_Weights",
18
+ "MobileNet_V3_Small_Weights",
19
+ "mobilenet_v3_large",
20
+ "mobilenet_v3_small",
21
+ ]
22
+
23
+
24
+ class InvertedResidualConfig:
25
+ # Stores information listed at Tables 1 and 2 of the MobileNetV3 paper
26
+ def __init__(
27
+ self,
28
+ input_channels: int,
29
+ kernel: int,
30
+ expanded_channels: int,
31
+ out_channels: int,
32
+ use_se: bool,
33
+ activation: str,
34
+ stride: int,
35
+ dilation: int,
36
+ width_mult: float,
37
+ ):
38
+ self.input_channels = self.adjust_channels(input_channels, width_mult)
39
+ self.kernel = kernel
40
+ self.expanded_channels = self.adjust_channels(expanded_channels, width_mult)
41
+ self.out_channels = self.adjust_channels(out_channels, width_mult)
42
+ self.use_se = use_se
43
+ self.use_hs = activation == "HS"
44
+ self.stride = stride
45
+ self.dilation = dilation
46
+
47
+ @staticmethod
48
+ def adjust_channels(channels: int, width_mult: float):
49
+ return _make_divisible(channels * width_mult, 8)
50
+
51
+
52
+ class InvertedResidual(nn.Module):
53
+ # Implemented as described at section 5 of MobileNetV3 paper
54
+ def __init__(
55
+ self,
56
+ cnf: InvertedResidualConfig,
57
+ norm_layer: Callable[..., nn.Module],
58
+ se_layer: Callable[..., nn.Module] = partial(SElayer, scale_activation=nn.Hardsigmoid),
59
+ ):
60
+ super().__init__()
61
+ if not (1 <= cnf.stride <= 2):
62
+ raise ValueError("illegal stride value")
63
+
64
+ self.use_res_connect = cnf.stride == 1 and cnf.input_channels == cnf.out_channels
65
+
66
+ layers: List[nn.Module] = []
67
+ activation_layer = nn.Hardswish if cnf.use_hs else nn.ReLU
68
+
69
+ # expand
70
+ if cnf.expanded_channels != cnf.input_channels:
71
+ layers.append(
72
+ Conv2dNormActivation(
73
+ cnf.input_channels,
74
+ cnf.expanded_channels,
75
+ kernel_size=1,
76
+ norm_layer=norm_layer,
77
+ activation_layer=activation_layer,
78
+ )
79
+ )
80
+
81
+ # depthwise
82
+ stride = 1 if cnf.dilation > 1 else cnf.stride
83
+ layers.append(
84
+ Conv2dNormActivation(
85
+ cnf.expanded_channels,
86
+ cnf.expanded_channels,
87
+ kernel_size=cnf.kernel,
88
+ stride=stride,
89
+ dilation=cnf.dilation,
90
+ groups=cnf.expanded_channels,
91
+ norm_layer=norm_layer,
92
+ activation_layer=activation_layer,
93
+ )
94
+ )
95
+ if cnf.use_se:
96
+ squeeze_channels = _make_divisible(cnf.expanded_channels // 4, 8)
97
+ layers.append(se_layer(cnf.expanded_channels, squeeze_channels))
98
+
99
+ # project
100
+ layers.append(
101
+ Conv2dNormActivation(
102
+ cnf.expanded_channels, cnf.out_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=None
103
+ )
104
+ )
105
+
106
+ self.block = nn.Sequential(*layers)
107
+ self.out_channels = cnf.out_channels
108
+ self._is_cn = cnf.stride > 1
109
+
110
+ def forward(self, input: Tensor) -> Tensor:
111
+ result = self.block(input)
112
+ if self.use_res_connect:
113
+ result += input
114
+ return result
115
+
116
+
117
+ class MobileNetV3(nn.Module):
118
+ def __init__(
119
+ self,
120
+ inverted_residual_setting: List[InvertedResidualConfig],
121
+ last_channel: int,
122
+ num_classes: int = 1000,
123
+ block: Optional[Callable[..., nn.Module]] = None,
124
+ norm_layer: Optional[Callable[..., nn.Module]] = None,
125
+ dropout: float = 0.2,
126
+ **kwargs: Any,
127
+ ) -> None:
128
+ """
129
+ MobileNet V3 main class
130
+
131
+ Args:
132
+ inverted_residual_setting (List[InvertedResidualConfig]): Network structure
133
+ last_channel (int): The number of channels on the penultimate layer
134
+ num_classes (int): Number of classes
135
+ block (Optional[Callable[..., nn.Module]]): Module specifying inverted residual building block for mobilenet
136
+ norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use
137
+ dropout (float): The droupout probability
138
+ """
139
+ super().__init__()
140
+ _log_api_usage_once(self)
141
+
142
+ if not inverted_residual_setting:
143
+ raise ValueError("The inverted_residual_setting should not be empty")
144
+ elif not (
145
+ isinstance(inverted_residual_setting, Sequence)
146
+ and all([isinstance(s, InvertedResidualConfig) for s in inverted_residual_setting])
147
+ ):
148
+ raise TypeError("The inverted_residual_setting should be List[InvertedResidualConfig]")
149
+
150
+ if block is None:
151
+ block = InvertedResidual
152
+
153
+ if norm_layer is None:
154
+ norm_layer = partial(nn.BatchNorm2d, eps=0.001, momentum=0.01)
155
+
156
+ layers: List[nn.Module] = []
157
+
158
+ # building first layer
159
+ firstconv_output_channels = inverted_residual_setting[0].input_channels
160
+ layers.append(
161
+ Conv2dNormActivation(
162
+ 3,
163
+ firstconv_output_channels,
164
+ kernel_size=3,
165
+ stride=2,
166
+ norm_layer=norm_layer,
167
+ activation_layer=nn.Hardswish,
168
+ )
169
+ )
170
+
171
+ # building inverted residual blocks
172
+ for cnf in inverted_residual_setting:
173
+ layers.append(block(cnf, norm_layer))
174
+
175
+ # building last several layers
176
+ lastconv_input_channels = inverted_residual_setting[-1].out_channels
177
+ lastconv_output_channels = 6 * lastconv_input_channels
178
+ layers.append(
179
+ Conv2dNormActivation(
180
+ lastconv_input_channels,
181
+ lastconv_output_channels,
182
+ kernel_size=1,
183
+ norm_layer=norm_layer,
184
+ activation_layer=nn.Hardswish,
185
+ )
186
+ )
187
+
188
+ self.features = nn.Sequential(*layers)
189
+ self.avgpool = nn.AdaptiveAvgPool2d(1)
190
+ self.classifier = nn.Sequential(
191
+ nn.Linear(lastconv_output_channels, last_channel),
192
+ nn.Hardswish(inplace=True),
193
+ nn.Dropout(p=dropout, inplace=True),
194
+ nn.Linear(last_channel, num_classes),
195
+ )
196
+
197
+ for m in self.modules():
198
+ if isinstance(m, nn.Conv2d):
199
+ nn.init.kaiming_normal_(m.weight, mode="fan_out")
200
+ if m.bias is not None:
201
+ nn.init.zeros_(m.bias)
202
+ elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
203
+ nn.init.ones_(m.weight)
204
+ nn.init.zeros_(m.bias)
205
+ elif isinstance(m, nn.Linear):
206
+ nn.init.normal_(m.weight, 0, 0.01)
207
+ nn.init.zeros_(m.bias)
208
+
209
+ def _forward_impl(self, x: Tensor) -> Tensor:
210
+ x = self.features(x)
211
+
212
+ x = self.avgpool(x)
213
+ x = torch.flatten(x, 1)
214
+
215
+ x = self.classifier(x)
216
+
217
+ return x
218
+
219
+ def forward(self, x: Tensor) -> Tensor:
220
+ return self._forward_impl(x)
221
+
222
+
223
+ def _mobilenet_v3_conf(
224
+ arch: str, width_mult: float = 1.0, reduced_tail: bool = False, dilated: bool = False, **kwargs: Any
225
+ ):
226
+ reduce_divider = 2 if reduced_tail else 1
227
+ dilation = 2 if dilated else 1
228
+
229
+ bneck_conf = partial(InvertedResidualConfig, width_mult=width_mult)
230
+ adjust_channels = partial(InvertedResidualConfig.adjust_channels, width_mult=width_mult)
231
+
232
+ if arch == "mobilenet_v3_large":
233
+ inverted_residual_setting = [
234
+ bneck_conf(16, 3, 16, 16, False, "RE", 1, 1),
235
+ bneck_conf(16, 3, 64, 24, False, "RE", 2, 1), # C1
236
+ bneck_conf(24, 3, 72, 24, False, "RE", 1, 1),
237
+ bneck_conf(24, 5, 72, 40, True, "RE", 2, 1), # C2
238
+ bneck_conf(40, 5, 120, 40, True, "RE", 1, 1),
239
+ bneck_conf(40, 5, 120, 40, True, "RE", 1, 1),
240
+ bneck_conf(40, 3, 240, 80, False, "HS", 2, 1), # C3
241
+ bneck_conf(80, 3, 200, 80, False, "HS", 1, 1),
242
+ bneck_conf(80, 3, 184, 80, False, "HS", 1, 1),
243
+ bneck_conf(80, 3, 184, 80, False, "HS", 1, 1),
244
+ bneck_conf(80, 3, 480, 112, True, "HS", 1, 1),
245
+ bneck_conf(112, 3, 672, 112, True, "HS", 1, 1),
246
+ bneck_conf(112, 5, 672, 160 // reduce_divider, True, "HS", 2, dilation), # C4
247
+ bneck_conf(160 // reduce_divider, 5, 960 // reduce_divider, 160 // reduce_divider, True, "HS", 1, dilation),
248
+ bneck_conf(160 // reduce_divider, 5, 960 // reduce_divider, 160 // reduce_divider, True, "HS", 1, dilation),
249
+ ]
250
+ last_channel = adjust_channels(1280 // reduce_divider) # C5
251
+ elif arch == "mobilenet_v3_small":
252
+ inverted_residual_setting = [
253
+ bneck_conf(16, 3, 16, 16, True, "RE", 2, 1), # C1
254
+ bneck_conf(16, 3, 72, 24, False, "RE", 2, 1), # C2
255
+ bneck_conf(24, 3, 88, 24, False, "RE", 1, 1),
256
+ bneck_conf(24, 5, 96, 40, True, "HS", 2, 1), # C3
257
+ bneck_conf(40, 5, 240, 40, True, "HS", 1, 1),
258
+ bneck_conf(40, 5, 240, 40, True, "HS", 1, 1),
259
+ bneck_conf(40, 5, 120, 48, True, "HS", 1, 1),
260
+ bneck_conf(48, 5, 144, 48, True, "HS", 1, 1),
261
+ bneck_conf(48, 5, 288, 96 // reduce_divider, True, "HS", 2, dilation), # C4
262
+ bneck_conf(96 // reduce_divider, 5, 576 // reduce_divider, 96 // reduce_divider, True, "HS", 1, dilation),
263
+ bneck_conf(96 // reduce_divider, 5, 576 // reduce_divider, 96 // reduce_divider, True, "HS", 1, dilation),
264
+ ]
265
+ last_channel = adjust_channels(1024 // reduce_divider) # C5
266
+ else:
267
+ raise ValueError(f"Unsupported model type {arch}")
268
+
269
+ return inverted_residual_setting, last_channel
270
+
271
+
272
+ def _mobilenet_v3(
273
+ inverted_residual_setting: List[InvertedResidualConfig],
274
+ last_channel: int,
275
+ weights: Optional[WeightsEnum],
276
+ progress: bool,
277
+ **kwargs: Any,
278
+ ) -> MobileNetV3:
279
+ if weights is not None:
280
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
281
+
282
+ model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs)
283
+
284
+ if weights is not None:
285
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
286
+
287
+ return model
288
+
289
+
290
+ _COMMON_META = {
291
+ "min_size": (1, 1),
292
+ "categories": _IMAGENET_CATEGORIES,
293
+ }
294
+
295
+
296
+ class MobileNet_V3_Large_Weights(WeightsEnum):
297
+ IMAGENET1K_V1 = Weights(
298
+ url="https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth",
299
+ transforms=partial(ImageClassification, crop_size=224),
300
+ meta={
301
+ **_COMMON_META,
302
+ "num_params": 5483032,
303
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small",
304
+ "_metrics": {
305
+ "ImageNet-1K": {
306
+ "acc@1": 74.042,
307
+ "acc@5": 91.340,
308
+ }
309
+ },
310
+ "_ops": 0.217,
311
+ "_file_size": 21.114,
312
+ "_docs": """These weights were trained from scratch by using a simple training recipe.""",
313
+ },
314
+ )
315
+ IMAGENET1K_V2 = Weights(
316
+ url="https://download.pytorch.org/models/mobilenet_v3_large-5c1a4163.pth",
317
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
318
+ meta={
319
+ **_COMMON_META,
320
+ "num_params": 5483032,
321
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning",
322
+ "_metrics": {
323
+ "ImageNet-1K": {
324
+ "acc@1": 75.274,
325
+ "acc@5": 92.566,
326
+ }
327
+ },
328
+ "_ops": 0.217,
329
+ "_file_size": 21.107,
330
+ "_docs": """
331
+ These weights improve marginally upon the results of the original paper by using a modified version of
332
+ TorchVision's `new training recipe
333
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
334
+ """,
335
+ },
336
+ )
337
+ DEFAULT = IMAGENET1K_V2
338
+
339
+
340
+ class MobileNet_V3_Small_Weights(WeightsEnum):
341
+ IMAGENET1K_V1 = Weights(
342
+ url="https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth",
343
+ transforms=partial(ImageClassification, crop_size=224),
344
+ meta={
345
+ **_COMMON_META,
346
+ "num_params": 2542856,
347
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small",
348
+ "_metrics": {
349
+ "ImageNet-1K": {
350
+ "acc@1": 67.668,
351
+ "acc@5": 87.402,
352
+ }
353
+ },
354
+ "_ops": 0.057,
355
+ "_file_size": 9.829,
356
+ "_docs": """
357
+ These weights improve upon the results of the original paper by using a simple training recipe.
358
+ """,
359
+ },
360
+ )
361
+ DEFAULT = IMAGENET1K_V1
362
+
363
+
364
+ @register_model()
365
+ @handle_legacy_interface(weights=("pretrained", MobileNet_V3_Large_Weights.IMAGENET1K_V1))
366
+ def mobilenet_v3_large(
367
+ *, weights: Optional[MobileNet_V3_Large_Weights] = None, progress: bool = True, **kwargs: Any
368
+ ) -> MobileNetV3:
369
+ """
370
+ Constructs a large MobileNetV3 architecture from
371
+ `Searching for MobileNetV3 <https://arxiv.org/abs/1905.02244>`__.
372
+
373
+ Args:
374
+ weights (:class:`~torchvision.models.MobileNet_V3_Large_Weights`, optional): The
375
+ pretrained weights to use. See
376
+ :class:`~torchvision.models.MobileNet_V3_Large_Weights` below for
377
+ more details, and possible values. By default, no pre-trained
378
+ weights are used.
379
+ progress (bool, optional): If True, displays a progress bar of the
380
+ download to stderr. Default is True.
381
+ **kwargs: parameters passed to the ``torchvision.models.mobilenet.MobileNetV3``
382
+ base class. Please refer to the `source code
383
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv3.py>`_
384
+ for more details about this class.
385
+
386
+ .. autoclass:: torchvision.models.MobileNet_V3_Large_Weights
387
+ :members:
388
+ """
389
+ weights = MobileNet_V3_Large_Weights.verify(weights)
390
+
391
+ inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_large", **kwargs)
392
+ return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs)
393
+
394
+
395
+ @register_model()
396
+ @handle_legacy_interface(weights=("pretrained", MobileNet_V3_Small_Weights.IMAGENET1K_V1))
397
+ def mobilenet_v3_small(
398
+ *, weights: Optional[MobileNet_V3_Small_Weights] = None, progress: bool = True, **kwargs: Any
399
+ ) -> MobileNetV3:
400
+ """
401
+ Constructs a small MobileNetV3 architecture from
402
+ `Searching for MobileNetV3 <https://arxiv.org/abs/1905.02244>`__.
403
+
404
+ Args:
405
+ weights (:class:`~torchvision.models.MobileNet_V3_Small_Weights`, optional): The
406
+ pretrained weights to use. See
407
+ :class:`~torchvision.models.MobileNet_V3_Small_Weights` below for
408
+ more details, and possible values. By default, no pre-trained
409
+ weights are used.
410
+ progress (bool, optional): If True, displays a progress bar of the
411
+ download to stderr. Default is True.
412
+ **kwargs: parameters passed to the ``torchvision.models.mobilenet.MobileNetV3``
413
+ base class. Please refer to the `source code
414
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv3.py>`_
415
+ for more details about this class.
416
+
417
+ .. autoclass:: torchvision.models.MobileNet_V3_Small_Weights
418
+ :members:
419
+ """
420
+ weights = MobileNet_V3_Small_Weights.verify(weights)
421
+
422
+ inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_small", **kwargs)
423
+ return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs)
pllava/lib/python3.10/site-packages/torchvision/models/optical_flow/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .raft import *
pllava/lib/python3.10/site-packages/torchvision/models/optical_flow/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (204 Bytes). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/optical_flow/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (2.16 kB). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/optical_flow/__pycache__/raft.cpython-310.pyc ADDED
Binary file (28.4 kB). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/optical_flow/_utils.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch
4
+ import torch.nn.functional as F
5
+ from torch import Tensor
6
+
7
+
8
+ def grid_sample(img: Tensor, absolute_grid: Tensor, mode: str = "bilinear", align_corners: Optional[bool] = None):
9
+ """Same as torch's grid_sample, with absolute pixel coordinates instead of normalized coordinates."""
10
+ h, w = img.shape[-2:]
11
+
12
+ xgrid, ygrid = absolute_grid.split([1, 1], dim=-1)
13
+ xgrid = 2 * xgrid / (w - 1) - 1
14
+ # Adding condition if h > 1 to enable this function be reused in raft-stereo
15
+ if h > 1:
16
+ ygrid = 2 * ygrid / (h - 1) - 1
17
+ normalized_grid = torch.cat([xgrid, ygrid], dim=-1)
18
+
19
+ return F.grid_sample(img, normalized_grid, mode=mode, align_corners=align_corners)
20
+
21
+
22
+ def make_coords_grid(batch_size: int, h: int, w: int, device: str = "cpu"):
23
+ device = torch.device(device)
24
+ coords = torch.meshgrid(torch.arange(h, device=device), torch.arange(w, device=device), indexing="ij")
25
+ coords = torch.stack(coords[::-1], dim=0).float()
26
+ return coords[None].repeat(batch_size, 1, 1, 1)
27
+
28
+
29
+ def upsample_flow(flow, up_mask: Optional[Tensor] = None, factor: int = 8):
30
+ """Upsample flow by the input factor (default 8).
31
+
32
+ If up_mask is None we just interpolate.
33
+ If up_mask is specified, we upsample using a convex combination of its weights. See paper page 8 and appendix B.
34
+ Note that in appendix B the picture assumes a downsample factor of 4 instead of 8.
35
+ """
36
+ batch_size, num_channels, h, w = flow.shape
37
+ new_h, new_w = h * factor, w * factor
38
+
39
+ if up_mask is None:
40
+ return factor * F.interpolate(flow, size=(new_h, new_w), mode="bilinear", align_corners=True)
41
+
42
+ up_mask = up_mask.view(batch_size, 1, 9, factor, factor, h, w)
43
+ up_mask = torch.softmax(up_mask, dim=2) # "convex" == weights sum to 1
44
+
45
+ upsampled_flow = F.unfold(factor * flow, kernel_size=3, padding=1).view(batch_size, num_channels, 9, 1, 1, h, w)
46
+ upsampled_flow = torch.sum(up_mask * upsampled_flow, dim=2)
47
+
48
+ return upsampled_flow.permute(0, 1, 4, 2, 5, 3).reshape(batch_size, num_channels, new_h, new_w)
pllava/lib/python3.10/site-packages/torchvision/models/optical_flow/raft.py ADDED
@@ -0,0 +1,947 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ from torch import Tensor
7
+ from torch.nn.modules.batchnorm import BatchNorm2d
8
+ from torch.nn.modules.instancenorm import InstanceNorm2d
9
+ from torchvision.ops import Conv2dNormActivation
10
+
11
+ from ...transforms._presets import OpticalFlow
12
+ from ...utils import _log_api_usage_once
13
+ from .._api import register_model, Weights, WeightsEnum
14
+ from .._utils import handle_legacy_interface
15
+ from ._utils import grid_sample, make_coords_grid, upsample_flow
16
+
17
+
18
+ __all__ = (
19
+ "RAFT",
20
+ "raft_large",
21
+ "raft_small",
22
+ "Raft_Large_Weights",
23
+ "Raft_Small_Weights",
24
+ )
25
+
26
+
27
+ class ResidualBlock(nn.Module):
28
+ """Slightly modified Residual block with extra relu and biases."""
29
+
30
+ def __init__(self, in_channels, out_channels, *, norm_layer, stride=1, always_project: bool = False):
31
+ super().__init__()
32
+
33
+ # Note regarding bias=True:
34
+ # Usually we can pass bias=False in conv layers followed by a norm layer.
35
+ # But in the RAFT training reference, the BatchNorm2d layers are only activated for the first dataset,
36
+ # and frozen for the rest of the training process (i.e. set as eval()). The bias term is thus still useful
37
+ # for the rest of the datasets. Technically, we could remove the bias for other norm layers like Instance norm
38
+ # because these aren't frozen, but we don't bother (also, we wouldn't be able to load the original weights).
39
+ self.convnormrelu1 = Conv2dNormActivation(
40
+ in_channels, out_channels, norm_layer=norm_layer, kernel_size=3, stride=stride, bias=True
41
+ )
42
+ self.convnormrelu2 = Conv2dNormActivation(
43
+ out_channels, out_channels, norm_layer=norm_layer, kernel_size=3, bias=True
44
+ )
45
+
46
+ # make mypy happy
47
+ self.downsample: nn.Module
48
+
49
+ if stride == 1 and not always_project:
50
+ self.downsample = nn.Identity()
51
+ else:
52
+ self.downsample = Conv2dNormActivation(
53
+ in_channels,
54
+ out_channels,
55
+ norm_layer=norm_layer,
56
+ kernel_size=1,
57
+ stride=stride,
58
+ bias=True,
59
+ activation_layer=None,
60
+ )
61
+
62
+ self.relu = nn.ReLU(inplace=True)
63
+
64
+ def forward(self, x):
65
+ y = x
66
+ y = self.convnormrelu1(y)
67
+ y = self.convnormrelu2(y)
68
+
69
+ x = self.downsample(x)
70
+
71
+ return self.relu(x + y)
72
+
73
+
74
+ class BottleneckBlock(nn.Module):
75
+ """Slightly modified BottleNeck block (extra relu and biases)"""
76
+
77
+ def __init__(self, in_channels, out_channels, *, norm_layer, stride=1):
78
+ super().__init__()
79
+
80
+ # See note in ResidualBlock for the reason behind bias=True
81
+ self.convnormrelu1 = Conv2dNormActivation(
82
+ in_channels, out_channels // 4, norm_layer=norm_layer, kernel_size=1, bias=True
83
+ )
84
+ self.convnormrelu2 = Conv2dNormActivation(
85
+ out_channels // 4, out_channels // 4, norm_layer=norm_layer, kernel_size=3, stride=stride, bias=True
86
+ )
87
+ self.convnormrelu3 = Conv2dNormActivation(
88
+ out_channels // 4, out_channels, norm_layer=norm_layer, kernel_size=1, bias=True
89
+ )
90
+ self.relu = nn.ReLU(inplace=True)
91
+
92
+ if stride == 1:
93
+ self.downsample = nn.Identity()
94
+ else:
95
+ self.downsample = Conv2dNormActivation(
96
+ in_channels,
97
+ out_channels,
98
+ norm_layer=norm_layer,
99
+ kernel_size=1,
100
+ stride=stride,
101
+ bias=True,
102
+ activation_layer=None,
103
+ )
104
+
105
+ def forward(self, x):
106
+ y = x
107
+ y = self.convnormrelu1(y)
108
+ y = self.convnormrelu2(y)
109
+ y = self.convnormrelu3(y)
110
+
111
+ x = self.downsample(x)
112
+
113
+ return self.relu(x + y)
114
+
115
+
116
+ class FeatureEncoder(nn.Module):
117
+ """The feature encoder, used both as the actual feature encoder, and as the context encoder.
118
+
119
+ It must downsample its input by 8.
120
+ """
121
+
122
+ def __init__(
123
+ self, *, block=ResidualBlock, layers=(64, 64, 96, 128, 256), strides=(2, 1, 2, 2), norm_layer=nn.BatchNorm2d
124
+ ):
125
+ super().__init__()
126
+
127
+ if len(layers) != 5:
128
+ raise ValueError(f"The expected number of layers is 5, instead got {len(layers)}")
129
+
130
+ # See note in ResidualBlock for the reason behind bias=True
131
+ self.convnormrelu = Conv2dNormActivation(
132
+ 3, layers[0], norm_layer=norm_layer, kernel_size=7, stride=strides[0], bias=True
133
+ )
134
+
135
+ self.layer1 = self._make_2_blocks(block, layers[0], layers[1], norm_layer=norm_layer, first_stride=strides[1])
136
+ self.layer2 = self._make_2_blocks(block, layers[1], layers[2], norm_layer=norm_layer, first_stride=strides[2])
137
+ self.layer3 = self._make_2_blocks(block, layers[2], layers[3], norm_layer=norm_layer, first_stride=strides[3])
138
+
139
+ self.conv = nn.Conv2d(layers[3], layers[4], kernel_size=1)
140
+
141
+ for m in self.modules():
142
+ if isinstance(m, nn.Conv2d):
143
+ nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
144
+ elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d)):
145
+ if m.weight is not None:
146
+ nn.init.constant_(m.weight, 1)
147
+ if m.bias is not None:
148
+ nn.init.constant_(m.bias, 0)
149
+
150
+ num_downsamples = len(list(filter(lambda s: s == 2, strides)))
151
+ self.output_dim = layers[-1]
152
+ self.downsample_factor = 2**num_downsamples
153
+
154
+ def _make_2_blocks(self, block, in_channels, out_channels, norm_layer, first_stride):
155
+ block1 = block(in_channels, out_channels, norm_layer=norm_layer, stride=first_stride)
156
+ block2 = block(out_channels, out_channels, norm_layer=norm_layer, stride=1)
157
+ return nn.Sequential(block1, block2)
158
+
159
+ def forward(self, x):
160
+ x = self.convnormrelu(x)
161
+
162
+ x = self.layer1(x)
163
+ x = self.layer2(x)
164
+ x = self.layer3(x)
165
+
166
+ x = self.conv(x)
167
+
168
+ return x
169
+
170
+
171
+ class MotionEncoder(nn.Module):
172
+ """The motion encoder, part of the update block.
173
+
174
+ Takes the current predicted flow and the correlation features as input and returns an encoded version of these.
175
+ """
176
+
177
+ def __init__(self, *, in_channels_corr, corr_layers=(256, 192), flow_layers=(128, 64), out_channels=128):
178
+ super().__init__()
179
+
180
+ if len(flow_layers) != 2:
181
+ raise ValueError(f"The expected number of flow_layers is 2, instead got {len(flow_layers)}")
182
+ if len(corr_layers) not in (1, 2):
183
+ raise ValueError(f"The number of corr_layers should be 1 or 2, instead got {len(corr_layers)}")
184
+
185
+ self.convcorr1 = Conv2dNormActivation(in_channels_corr, corr_layers[0], norm_layer=None, kernel_size=1)
186
+ if len(corr_layers) == 2:
187
+ self.convcorr2 = Conv2dNormActivation(corr_layers[0], corr_layers[1], norm_layer=None, kernel_size=3)
188
+ else:
189
+ self.convcorr2 = nn.Identity()
190
+
191
+ self.convflow1 = Conv2dNormActivation(2, flow_layers[0], norm_layer=None, kernel_size=7)
192
+ self.convflow2 = Conv2dNormActivation(flow_layers[0], flow_layers[1], norm_layer=None, kernel_size=3)
193
+
194
+ # out_channels - 2 because we cat the flow (2 channels) at the end
195
+ self.conv = Conv2dNormActivation(
196
+ corr_layers[-1] + flow_layers[-1], out_channels - 2, norm_layer=None, kernel_size=3
197
+ )
198
+
199
+ self.out_channels = out_channels
200
+
201
+ def forward(self, flow, corr_features):
202
+ corr = self.convcorr1(corr_features)
203
+ corr = self.convcorr2(corr)
204
+
205
+ flow_orig = flow
206
+ flow = self.convflow1(flow)
207
+ flow = self.convflow2(flow)
208
+
209
+ corr_flow = torch.cat([corr, flow], dim=1)
210
+ corr_flow = self.conv(corr_flow)
211
+ return torch.cat([corr_flow, flow_orig], dim=1)
212
+
213
+
214
+ class ConvGRU(nn.Module):
215
+ """Convolutional Gru unit."""
216
+
217
+ def __init__(self, *, input_size, hidden_size, kernel_size, padding):
218
+ super().__init__()
219
+ self.convz = nn.Conv2d(hidden_size + input_size, hidden_size, kernel_size=kernel_size, padding=padding)
220
+ self.convr = nn.Conv2d(hidden_size + input_size, hidden_size, kernel_size=kernel_size, padding=padding)
221
+ self.convq = nn.Conv2d(hidden_size + input_size, hidden_size, kernel_size=kernel_size, padding=padding)
222
+
223
+ def forward(self, h, x):
224
+ hx = torch.cat([h, x], dim=1)
225
+ z = torch.sigmoid(self.convz(hx))
226
+ r = torch.sigmoid(self.convr(hx))
227
+ q = torch.tanh(self.convq(torch.cat([r * h, x], dim=1)))
228
+ h = (1 - z) * h + z * q
229
+ return h
230
+
231
+
232
+ def _pass_through_h(h, _):
233
+ # Declared here for torchscript
234
+ return h
235
+
236
+
237
+ class RecurrentBlock(nn.Module):
238
+ """Recurrent block, part of the update block.
239
+
240
+ Takes the current hidden state and the concatenation of (motion encoder output, context) as input.
241
+ Returns an updated hidden state.
242
+ """
243
+
244
+ def __init__(self, *, input_size, hidden_size, kernel_size=((1, 5), (5, 1)), padding=((0, 2), (2, 0))):
245
+ super().__init__()
246
+
247
+ if len(kernel_size) != len(padding):
248
+ raise ValueError(
249
+ f"kernel_size should have the same length as padding, instead got len(kernel_size) = {len(kernel_size)} and len(padding) = {len(padding)}"
250
+ )
251
+ if len(kernel_size) not in (1, 2):
252
+ raise ValueError(f"kernel_size should either 1 or 2, instead got {len(kernel_size)}")
253
+
254
+ self.convgru1 = ConvGRU(
255
+ input_size=input_size, hidden_size=hidden_size, kernel_size=kernel_size[0], padding=padding[0]
256
+ )
257
+ if len(kernel_size) == 2:
258
+ self.convgru2 = ConvGRU(
259
+ input_size=input_size, hidden_size=hidden_size, kernel_size=kernel_size[1], padding=padding[1]
260
+ )
261
+ else:
262
+ self.convgru2 = _pass_through_h
263
+
264
+ self.hidden_size = hidden_size
265
+
266
+ def forward(self, h, x):
267
+ h = self.convgru1(h, x)
268
+ h = self.convgru2(h, x)
269
+ return h
270
+
271
+
272
+ class FlowHead(nn.Module):
273
+ """Flow head, part of the update block.
274
+
275
+ Takes the hidden state of the recurrent unit as input, and outputs the predicted "delta flow".
276
+ """
277
+
278
+ def __init__(self, *, in_channels, hidden_size):
279
+ super().__init__()
280
+ self.conv1 = nn.Conv2d(in_channels, hidden_size, 3, padding=1)
281
+ self.conv2 = nn.Conv2d(hidden_size, 2, 3, padding=1)
282
+ self.relu = nn.ReLU(inplace=True)
283
+
284
+ def forward(self, x):
285
+ return self.conv2(self.relu(self.conv1(x)))
286
+
287
+
288
+ class UpdateBlock(nn.Module):
289
+ """The update block which contains the motion encoder, the recurrent block, and the flow head.
290
+
291
+ It must expose a ``hidden_state_size`` attribute which is the hidden state size of its recurrent block.
292
+ """
293
+
294
+ def __init__(self, *, motion_encoder, recurrent_block, flow_head):
295
+ super().__init__()
296
+ self.motion_encoder = motion_encoder
297
+ self.recurrent_block = recurrent_block
298
+ self.flow_head = flow_head
299
+
300
+ self.hidden_state_size = recurrent_block.hidden_size
301
+
302
+ def forward(self, hidden_state, context, corr_features, flow):
303
+ motion_features = self.motion_encoder(flow, corr_features)
304
+ x = torch.cat([context, motion_features], dim=1)
305
+
306
+ hidden_state = self.recurrent_block(hidden_state, x)
307
+ delta_flow = self.flow_head(hidden_state)
308
+ return hidden_state, delta_flow
309
+
310
+
311
+ class MaskPredictor(nn.Module):
312
+ """Mask predictor to be used when upsampling the predicted flow.
313
+
314
+ It takes the hidden state of the recurrent unit as input and outputs the mask.
315
+ This is not used in the raft-small model.
316
+ """
317
+
318
+ def __init__(self, *, in_channels, hidden_size, multiplier=0.25):
319
+ super().__init__()
320
+ self.convrelu = Conv2dNormActivation(in_channels, hidden_size, norm_layer=None, kernel_size=3)
321
+ # 8 * 8 * 9 because the predicted flow is downsampled by 8, from the downsampling of the initial FeatureEncoder,
322
+ # and we interpolate with all 9 surrounding neighbors. See paper and appendix B.
323
+ self.conv = nn.Conv2d(hidden_size, 8 * 8 * 9, 1, padding=0)
324
+
325
+ # In the original code, they use a factor of 0.25 to "downweight the gradients" of that branch.
326
+ # See e.g. https://github.com/princeton-vl/RAFT/issues/119#issuecomment-953950419
327
+ # or https://github.com/princeton-vl/RAFT/issues/24.
328
+ # It doesn't seem to affect epe significantly and can likely be set to 1.
329
+ self.multiplier = multiplier
330
+
331
+ def forward(self, x):
332
+ x = self.convrelu(x)
333
+ x = self.conv(x)
334
+ return self.multiplier * x
335
+
336
+
337
+ class CorrBlock(nn.Module):
338
+ """The correlation block.
339
+
340
+ Creates a correlation pyramid with ``num_levels`` levels from the outputs of the feature encoder,
341
+ and then indexes from this pyramid to create correlation features.
342
+ The "indexing" of a given centroid pixel x' is done by concatenating its surrounding neighbors that
343
+ are within a ``radius``, according to the infinity norm (see paper section 3.2).
344
+ Note: typo in the paper, it should be infinity norm, not 1-norm.
345
+ """
346
+
347
+ def __init__(self, *, num_levels: int = 4, radius: int = 4):
348
+ super().__init__()
349
+ self.num_levels = num_levels
350
+ self.radius = radius
351
+
352
+ self.corr_pyramid: List[Tensor] = [torch.tensor(0)] # useless, but torchscript is otherwise confused :')
353
+
354
+ # The neighborhood of a centroid pixel x' is {x' + delta, ||delta||_inf <= radius}
355
+ # so it's a square surrounding x', and its sides have a length of 2 * radius + 1
356
+ # The paper claims that it's ||.||_1 instead of ||.||_inf but it's a typo:
357
+ # https://github.com/princeton-vl/RAFT/issues/122
358
+ self.out_channels = num_levels * (2 * radius + 1) ** 2
359
+
360
+ def build_pyramid(self, fmap1, fmap2):
361
+ """Build the correlation pyramid from two feature maps.
362
+
363
+ The correlation volume is first computed as the dot product of each pair (pixel_in_fmap1, pixel_in_fmap2)
364
+ The last 2 dimensions of the correlation volume are then pooled num_levels times at different resolutions
365
+ to build the correlation pyramid.
366
+ """
367
+
368
+ if fmap1.shape != fmap2.shape:
369
+ raise ValueError(
370
+ f"Input feature maps should have the same shape, instead got {fmap1.shape} (fmap1.shape) != {fmap2.shape} (fmap2.shape)"
371
+ )
372
+
373
+ # Explaining min_fmap_size below: the fmaps are down-sampled (num_levels - 1) times by a factor of 2.
374
+ # The last corr_volume most have at least 2 values (hence the 2* factor), otherwise grid_sample() would
375
+ # produce nans in its output.
376
+ min_fmap_size = 2 * (2 ** (self.num_levels - 1))
377
+ if any(fmap_size < min_fmap_size for fmap_size in fmap1.shape[-2:]):
378
+ raise ValueError(
379
+ "Feature maps are too small to be down-sampled by the correlation pyramid. "
380
+ f"H and W of feature maps should be at least {min_fmap_size}; got: {fmap1.shape[-2:]}. "
381
+ "Remember that input images to the model are downsampled by 8, so that means their "
382
+ f"dimensions should be at least 8 * {min_fmap_size} = {8 * min_fmap_size}."
383
+ )
384
+
385
+ corr_volume = self._compute_corr_volume(fmap1, fmap2)
386
+
387
+ batch_size, h, w, num_channels, _, _ = corr_volume.shape # _, _ = h, w
388
+ corr_volume = corr_volume.reshape(batch_size * h * w, num_channels, h, w)
389
+ self.corr_pyramid = [corr_volume]
390
+ for _ in range(self.num_levels - 1):
391
+ corr_volume = F.avg_pool2d(corr_volume, kernel_size=2, stride=2)
392
+ self.corr_pyramid.append(corr_volume)
393
+
394
+ def index_pyramid(self, centroids_coords):
395
+ """Return correlation features by indexing from the pyramid."""
396
+ neighborhood_side_len = 2 * self.radius + 1 # see note in __init__ about out_channels
397
+ di = torch.linspace(-self.radius, self.radius, neighborhood_side_len)
398
+ dj = torch.linspace(-self.radius, self.radius, neighborhood_side_len)
399
+ delta = torch.stack(torch.meshgrid(di, dj, indexing="ij"), dim=-1).to(centroids_coords.device)
400
+ delta = delta.view(1, neighborhood_side_len, neighborhood_side_len, 2)
401
+
402
+ batch_size, _, h, w = centroids_coords.shape # _ = 2
403
+ centroids_coords = centroids_coords.permute(0, 2, 3, 1).reshape(batch_size * h * w, 1, 1, 2)
404
+
405
+ indexed_pyramid = []
406
+ for corr_volume in self.corr_pyramid:
407
+ sampling_coords = centroids_coords + delta # end shape is (batch_size * h * w, side_len, side_len, 2)
408
+ indexed_corr_volume = grid_sample(corr_volume, sampling_coords, align_corners=True, mode="bilinear").view(
409
+ batch_size, h, w, -1
410
+ )
411
+ indexed_pyramid.append(indexed_corr_volume)
412
+ centroids_coords = centroids_coords / 2
413
+
414
+ corr_features = torch.cat(indexed_pyramid, dim=-1).permute(0, 3, 1, 2).contiguous()
415
+
416
+ expected_output_shape = (batch_size, self.out_channels, h, w)
417
+ if corr_features.shape != expected_output_shape:
418
+ raise ValueError(
419
+ f"Output shape of index pyramid is incorrect. Should be {expected_output_shape}, got {corr_features.shape}"
420
+ )
421
+
422
+ return corr_features
423
+
424
+ def _compute_corr_volume(self, fmap1, fmap2):
425
+ batch_size, num_channels, h, w = fmap1.shape
426
+ fmap1 = fmap1.view(batch_size, num_channels, h * w)
427
+ fmap2 = fmap2.view(batch_size, num_channels, h * w)
428
+
429
+ corr = torch.matmul(fmap1.transpose(1, 2), fmap2)
430
+ corr = corr.view(batch_size, h, w, 1, h, w)
431
+ return corr / torch.sqrt(torch.tensor(num_channels))
432
+
433
+
434
+ class RAFT(nn.Module):
435
+ def __init__(self, *, feature_encoder, context_encoder, corr_block, update_block, mask_predictor=None):
436
+ """RAFT model from
437
+ `RAFT: Recurrent All Pairs Field Transforms for Optical Flow <https://arxiv.org/abs/2003.12039>`_.
438
+
439
+ args:
440
+ feature_encoder (nn.Module): The feature encoder. It must downsample the input by 8.
441
+ Its input is the concatenation of ``image1`` and ``image2``.
442
+ context_encoder (nn.Module): The context encoder. It must downsample the input by 8.
443
+ Its input is ``image1``. As in the original implementation, its output will be split into 2 parts:
444
+
445
+ - one part will be used as the actual "context", passed to the recurrent unit of the ``update_block``
446
+ - one part will be used to initialize the hidden state of the recurrent unit of
447
+ the ``update_block``
448
+
449
+ These 2 parts are split according to the ``hidden_state_size`` of the ``update_block``, so the output
450
+ of the ``context_encoder`` must be strictly greater than ``hidden_state_size``.
451
+
452
+ corr_block (nn.Module): The correlation block, which creates a correlation pyramid from the output of the
453
+ ``feature_encoder``, and then indexes from this pyramid to create correlation features. It must expose
454
+ 2 methods:
455
+
456
+ - a ``build_pyramid`` method that takes ``feature_map_1`` and ``feature_map_2`` as input (these are the
457
+ output of the ``feature_encoder``).
458
+ - a ``index_pyramid`` method that takes the coordinates of the centroid pixels as input, and returns
459
+ the correlation features. See paper section 3.2.
460
+
461
+ It must expose an ``out_channels`` attribute.
462
+
463
+ update_block (nn.Module): The update block, which contains the motion encoder, the recurrent unit, and the
464
+ flow head. It takes as input the hidden state of its recurrent unit, the context, the correlation
465
+ features, and the current predicted flow. It outputs an updated hidden state, and the ``delta_flow``
466
+ prediction (see paper appendix A). It must expose a ``hidden_state_size`` attribute.
467
+ mask_predictor (nn.Module, optional): Predicts the mask that will be used to upsample the predicted flow.
468
+ The output channel must be 8 * 8 * 9 - see paper section 3.3, and Appendix B.
469
+ If ``None`` (default), the flow is upsampled using interpolation.
470
+ """
471
+ super().__init__()
472
+ _log_api_usage_once(self)
473
+
474
+ self.feature_encoder = feature_encoder
475
+ self.context_encoder = context_encoder
476
+ self.corr_block = corr_block
477
+ self.update_block = update_block
478
+
479
+ self.mask_predictor = mask_predictor
480
+
481
+ if not hasattr(self.update_block, "hidden_state_size"):
482
+ raise ValueError("The update_block parameter should expose a 'hidden_state_size' attribute.")
483
+
484
+ def forward(self, image1, image2, num_flow_updates: int = 12):
485
+
486
+ batch_size, _, h, w = image1.shape
487
+ if (h, w) != image2.shape[-2:]:
488
+ raise ValueError(f"input images should have the same shape, instead got ({h}, {w}) != {image2.shape[-2:]}")
489
+ if not (h % 8 == 0) and (w % 8 == 0):
490
+ raise ValueError(f"input image H and W should be divisible by 8, instead got {h} (h) and {w} (w)")
491
+
492
+ fmaps = self.feature_encoder(torch.cat([image1, image2], dim=0))
493
+ fmap1, fmap2 = torch.chunk(fmaps, chunks=2, dim=0)
494
+ if fmap1.shape[-2:] != (h // 8, w // 8):
495
+ raise ValueError("The feature encoder should downsample H and W by 8")
496
+
497
+ self.corr_block.build_pyramid(fmap1, fmap2)
498
+
499
+ context_out = self.context_encoder(image1)
500
+ if context_out.shape[-2:] != (h // 8, w // 8):
501
+ raise ValueError("The context encoder should downsample H and W by 8")
502
+
503
+ # As in the original paper, the actual output of the context encoder is split in 2 parts:
504
+ # - one part is used to initialize the hidden state of the recurent units of the update block
505
+ # - the rest is the "actual" context.
506
+ hidden_state_size = self.update_block.hidden_state_size
507
+ out_channels_context = context_out.shape[1] - hidden_state_size
508
+ if out_channels_context <= 0:
509
+ raise ValueError(
510
+ f"The context encoder outputs {context_out.shape[1]} channels, but it should have at strictly more than hidden_state={hidden_state_size} channels"
511
+ )
512
+ hidden_state, context = torch.split(context_out, [hidden_state_size, out_channels_context], dim=1)
513
+ hidden_state = torch.tanh(hidden_state)
514
+ context = F.relu(context)
515
+
516
+ coords0 = make_coords_grid(batch_size, h // 8, w // 8).to(fmap1.device)
517
+ coords1 = make_coords_grid(batch_size, h // 8, w // 8).to(fmap1.device)
518
+
519
+ flow_predictions = []
520
+ for _ in range(num_flow_updates):
521
+ coords1 = coords1.detach() # Don't backpropagate gradients through this branch, see paper
522
+ corr_features = self.corr_block.index_pyramid(centroids_coords=coords1)
523
+
524
+ flow = coords1 - coords0
525
+ hidden_state, delta_flow = self.update_block(hidden_state, context, corr_features, flow)
526
+
527
+ coords1 = coords1 + delta_flow
528
+
529
+ up_mask = None if self.mask_predictor is None else self.mask_predictor(hidden_state)
530
+ upsampled_flow = upsample_flow(flow=(coords1 - coords0), up_mask=up_mask)
531
+ flow_predictions.append(upsampled_flow)
532
+
533
+ return flow_predictions
534
+
535
+
536
+ _COMMON_META = {
537
+ "min_size": (128, 128),
538
+ }
539
+
540
+
541
+ class Raft_Large_Weights(WeightsEnum):
542
+ """The metrics reported here are as follows.
543
+
544
+ ``epe`` is the "end-point-error" and indicates how far (in pixels) the
545
+ predicted flow is from its true value. This is averaged over all pixels
546
+ of all images. ``per_image_epe`` is similar, but the average is different:
547
+ the epe is first computed on each image independently, and then averaged
548
+ over all images. This corresponds to "Fl-epe" (sometimes written "F1-epe")
549
+ in the original paper, and it's only used on Kitti. ``fl-all`` is also a
550
+ Kitti-specific metric, defined by the author of the dataset and used for the
551
+ Kitti leaderboard. It corresponds to the average of pixels whose epe is
552
+ either <3px, or <5% of flow's 2-norm.
553
+ """
554
+
555
+ C_T_V1 = Weights(
556
+ # Weights ported from https://github.com/princeton-vl/RAFT
557
+ url="https://download.pytorch.org/models/raft_large_C_T_V1-22a6c225.pth",
558
+ transforms=OpticalFlow,
559
+ meta={
560
+ **_COMMON_META,
561
+ "num_params": 5257536,
562
+ "recipe": "https://github.com/princeton-vl/RAFT",
563
+ "_metrics": {
564
+ "Sintel-Train-Cleanpass": {"epe": 1.4411},
565
+ "Sintel-Train-Finalpass": {"epe": 2.7894},
566
+ "Kitti-Train": {"per_image_epe": 5.0172, "fl_all": 17.4506},
567
+ },
568
+ "_ops": 211.007,
569
+ "_file_size": 20.129,
570
+ "_docs": """These weights were ported from the original paper. They
571
+ are trained on :class:`~torchvision.datasets.FlyingChairs` +
572
+ :class:`~torchvision.datasets.FlyingThings3D`.""",
573
+ },
574
+ )
575
+
576
+ C_T_V2 = Weights(
577
+ url="https://download.pytorch.org/models/raft_large_C_T_V2-1bb1363a.pth",
578
+ transforms=OpticalFlow,
579
+ meta={
580
+ **_COMMON_META,
581
+ "num_params": 5257536,
582
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/optical_flow",
583
+ "_metrics": {
584
+ "Sintel-Train-Cleanpass": {"epe": 1.3822},
585
+ "Sintel-Train-Finalpass": {"epe": 2.7161},
586
+ "Kitti-Train": {"per_image_epe": 4.5118, "fl_all": 16.0679},
587
+ },
588
+ "_ops": 211.007,
589
+ "_file_size": 20.129,
590
+ "_docs": """These weights were trained from scratch on
591
+ :class:`~torchvision.datasets.FlyingChairs` +
592
+ :class:`~torchvision.datasets.FlyingThings3D`.""",
593
+ },
594
+ )
595
+
596
+ C_T_SKHT_V1 = Weights(
597
+ # Weights ported from https://github.com/princeton-vl/RAFT
598
+ url="https://download.pytorch.org/models/raft_large_C_T_SKHT_V1-0b8c9e55.pth",
599
+ transforms=OpticalFlow,
600
+ meta={
601
+ **_COMMON_META,
602
+ "num_params": 5257536,
603
+ "recipe": "https://github.com/princeton-vl/RAFT",
604
+ "_metrics": {
605
+ "Sintel-Test-Cleanpass": {"epe": 1.94},
606
+ "Sintel-Test-Finalpass": {"epe": 3.18},
607
+ },
608
+ "_ops": 211.007,
609
+ "_file_size": 20.129,
610
+ "_docs": """
611
+ These weights were ported from the original paper. They are
612
+ trained on :class:`~torchvision.datasets.FlyingChairs` +
613
+ :class:`~torchvision.datasets.FlyingThings3D` and fine-tuned on
614
+ Sintel. The Sintel fine-tuning step is a combination of
615
+ :class:`~torchvision.datasets.Sintel`,
616
+ :class:`~torchvision.datasets.KittiFlow`,
617
+ :class:`~torchvision.datasets.HD1K`, and
618
+ :class:`~torchvision.datasets.FlyingThings3D` (clean pass).
619
+ """,
620
+ },
621
+ )
622
+
623
+ C_T_SKHT_V2 = Weights(
624
+ url="https://download.pytorch.org/models/raft_large_C_T_SKHT_V2-ff5fadd5.pth",
625
+ transforms=OpticalFlow,
626
+ meta={
627
+ **_COMMON_META,
628
+ "num_params": 5257536,
629
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/optical_flow",
630
+ "_metrics": {
631
+ "Sintel-Test-Cleanpass": {"epe": 1.819},
632
+ "Sintel-Test-Finalpass": {"epe": 3.067},
633
+ },
634
+ "_ops": 211.007,
635
+ "_file_size": 20.129,
636
+ "_docs": """
637
+ These weights were trained from scratch. They are
638
+ pre-trained on :class:`~torchvision.datasets.FlyingChairs` +
639
+ :class:`~torchvision.datasets.FlyingThings3D` and then
640
+ fine-tuned on Sintel. The Sintel fine-tuning step is a
641
+ combination of :class:`~torchvision.datasets.Sintel`,
642
+ :class:`~torchvision.datasets.KittiFlow`,
643
+ :class:`~torchvision.datasets.HD1K`, and
644
+ :class:`~torchvision.datasets.FlyingThings3D` (clean pass).
645
+ """,
646
+ },
647
+ )
648
+
649
+ C_T_SKHT_K_V1 = Weights(
650
+ # Weights ported from https://github.com/princeton-vl/RAFT
651
+ url="https://download.pytorch.org/models/raft_large_C_T_SKHT_K_V1-4a6a5039.pth",
652
+ transforms=OpticalFlow,
653
+ meta={
654
+ **_COMMON_META,
655
+ "num_params": 5257536,
656
+ "recipe": "https://github.com/princeton-vl/RAFT",
657
+ "_metrics": {
658
+ "Kitti-Test": {"fl_all": 5.10},
659
+ },
660
+ "_ops": 211.007,
661
+ "_file_size": 20.129,
662
+ "_docs": """
663
+ These weights were ported from the original paper. They are
664
+ pre-trained on :class:`~torchvision.datasets.FlyingChairs` +
665
+ :class:`~torchvision.datasets.FlyingThings3D`,
666
+ fine-tuned on Sintel, and then fine-tuned on
667
+ :class:`~torchvision.datasets.KittiFlow`. The Sintel fine-tuning
668
+ step was described above.
669
+ """,
670
+ },
671
+ )
672
+
673
+ C_T_SKHT_K_V2 = Weights(
674
+ url="https://download.pytorch.org/models/raft_large_C_T_SKHT_K_V2-b5c70766.pth",
675
+ transforms=OpticalFlow,
676
+ meta={
677
+ **_COMMON_META,
678
+ "num_params": 5257536,
679
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/optical_flow",
680
+ "_metrics": {
681
+ "Kitti-Test": {"fl_all": 5.19},
682
+ },
683
+ "_ops": 211.007,
684
+ "_file_size": 20.129,
685
+ "_docs": """
686
+ These weights were trained from scratch. They are
687
+ pre-trained on :class:`~torchvision.datasets.FlyingChairs` +
688
+ :class:`~torchvision.datasets.FlyingThings3D`,
689
+ fine-tuned on Sintel, and then fine-tuned on
690
+ :class:`~torchvision.datasets.KittiFlow`. The Sintel fine-tuning
691
+ step was described above.
692
+ """,
693
+ },
694
+ )
695
+
696
+ DEFAULT = C_T_SKHT_V2
697
+
698
+
699
+ class Raft_Small_Weights(WeightsEnum):
700
+ """The metrics reported here are as follows.
701
+
702
+ ``epe`` is the "end-point-error" and indicates how far (in pixels) the
703
+ predicted flow is from its true value. This is averaged over all pixels
704
+ of all images. ``per_image_epe`` is similar, but the average is different:
705
+ the epe is first computed on each image independently, and then averaged
706
+ over all images. This corresponds to "Fl-epe" (sometimes written "F1-epe")
707
+ in the original paper, and it's only used on Kitti. ``fl-all`` is also a
708
+ Kitti-specific metric, defined by the author of the dataset and used for the
709
+ Kitti leaderboard. It corresponds to the average of pixels whose epe is
710
+ either <3px, or <5% of flow's 2-norm.
711
+ """
712
+
713
+ C_T_V1 = Weights(
714
+ # Weights ported from https://github.com/princeton-vl/RAFT
715
+ url="https://download.pytorch.org/models/raft_small_C_T_V1-ad48884c.pth",
716
+ transforms=OpticalFlow,
717
+ meta={
718
+ **_COMMON_META,
719
+ "num_params": 990162,
720
+ "recipe": "https://github.com/princeton-vl/RAFT",
721
+ "_metrics": {
722
+ "Sintel-Train-Cleanpass": {"epe": 2.1231},
723
+ "Sintel-Train-Finalpass": {"epe": 3.2790},
724
+ "Kitti-Train": {"per_image_epe": 7.6557, "fl_all": 25.2801},
725
+ },
726
+ "_ops": 47.655,
727
+ "_file_size": 3.821,
728
+ "_docs": """These weights were ported from the original paper. They
729
+ are trained on :class:`~torchvision.datasets.FlyingChairs` +
730
+ :class:`~torchvision.datasets.FlyingThings3D`.""",
731
+ },
732
+ )
733
+ C_T_V2 = Weights(
734
+ url="https://download.pytorch.org/models/raft_small_C_T_V2-01064c6d.pth",
735
+ transforms=OpticalFlow,
736
+ meta={
737
+ **_COMMON_META,
738
+ "num_params": 990162,
739
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/optical_flow",
740
+ "_metrics": {
741
+ "Sintel-Train-Cleanpass": {"epe": 1.9901},
742
+ "Sintel-Train-Finalpass": {"epe": 3.2831},
743
+ "Kitti-Train": {"per_image_epe": 7.5978, "fl_all": 25.2369},
744
+ },
745
+ "_ops": 47.655,
746
+ "_file_size": 3.821,
747
+ "_docs": """These weights were trained from scratch on
748
+ :class:`~torchvision.datasets.FlyingChairs` +
749
+ :class:`~torchvision.datasets.FlyingThings3D`.""",
750
+ },
751
+ )
752
+
753
+ DEFAULT = C_T_V2
754
+
755
+
756
+ def _raft(
757
+ *,
758
+ weights=None,
759
+ progress=False,
760
+ # Feature encoder
761
+ feature_encoder_layers,
762
+ feature_encoder_block,
763
+ feature_encoder_norm_layer,
764
+ # Context encoder
765
+ context_encoder_layers,
766
+ context_encoder_block,
767
+ context_encoder_norm_layer,
768
+ # Correlation block
769
+ corr_block_num_levels,
770
+ corr_block_radius,
771
+ # Motion encoder
772
+ motion_encoder_corr_layers,
773
+ motion_encoder_flow_layers,
774
+ motion_encoder_out_channels,
775
+ # Recurrent block
776
+ recurrent_block_hidden_state_size,
777
+ recurrent_block_kernel_size,
778
+ recurrent_block_padding,
779
+ # Flow Head
780
+ flow_head_hidden_size,
781
+ # Mask predictor
782
+ use_mask_predictor,
783
+ **kwargs,
784
+ ):
785
+ feature_encoder = kwargs.pop("feature_encoder", None) or FeatureEncoder(
786
+ block=feature_encoder_block, layers=feature_encoder_layers, norm_layer=feature_encoder_norm_layer
787
+ )
788
+ context_encoder = kwargs.pop("context_encoder", None) or FeatureEncoder(
789
+ block=context_encoder_block, layers=context_encoder_layers, norm_layer=context_encoder_norm_layer
790
+ )
791
+
792
+ corr_block = kwargs.pop("corr_block", None) or CorrBlock(num_levels=corr_block_num_levels, radius=corr_block_radius)
793
+
794
+ update_block = kwargs.pop("update_block", None)
795
+ if update_block is None:
796
+ motion_encoder = MotionEncoder(
797
+ in_channels_corr=corr_block.out_channels,
798
+ corr_layers=motion_encoder_corr_layers,
799
+ flow_layers=motion_encoder_flow_layers,
800
+ out_channels=motion_encoder_out_channels,
801
+ )
802
+
803
+ # See comments in forward pass of RAFT class about why we split the output of the context encoder
804
+ out_channels_context = context_encoder_layers[-1] - recurrent_block_hidden_state_size
805
+ recurrent_block = RecurrentBlock(
806
+ input_size=motion_encoder.out_channels + out_channels_context,
807
+ hidden_size=recurrent_block_hidden_state_size,
808
+ kernel_size=recurrent_block_kernel_size,
809
+ padding=recurrent_block_padding,
810
+ )
811
+
812
+ flow_head = FlowHead(in_channels=recurrent_block_hidden_state_size, hidden_size=flow_head_hidden_size)
813
+
814
+ update_block = UpdateBlock(motion_encoder=motion_encoder, recurrent_block=recurrent_block, flow_head=flow_head)
815
+
816
+ mask_predictor = kwargs.pop("mask_predictor", None)
817
+ if mask_predictor is None and use_mask_predictor:
818
+ mask_predictor = MaskPredictor(
819
+ in_channels=recurrent_block_hidden_state_size,
820
+ hidden_size=256,
821
+ multiplier=0.25, # See comment in MaskPredictor about this
822
+ )
823
+
824
+ model = RAFT(
825
+ feature_encoder=feature_encoder,
826
+ context_encoder=context_encoder,
827
+ corr_block=corr_block,
828
+ update_block=update_block,
829
+ mask_predictor=mask_predictor,
830
+ **kwargs, # not really needed, all params should be consumed by now
831
+ )
832
+
833
+ if weights is not None:
834
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
835
+
836
+ return model
837
+
838
+
839
+ @register_model()
840
+ @handle_legacy_interface(weights=("pretrained", Raft_Large_Weights.C_T_SKHT_V2))
841
+ def raft_large(*, weights: Optional[Raft_Large_Weights] = None, progress=True, **kwargs) -> RAFT:
842
+ """RAFT model from
843
+ `RAFT: Recurrent All Pairs Field Transforms for Optical Flow <https://arxiv.org/abs/2003.12039>`_.
844
+
845
+ Please see the example below for a tutorial on how to use this model.
846
+
847
+ Args:
848
+ weights(:class:`~torchvision.models.optical_flow.Raft_Large_Weights`, optional): The
849
+ pretrained weights to use. See
850
+ :class:`~torchvision.models.optical_flow.Raft_Large_Weights`
851
+ below for more details, and possible values. By default, no
852
+ pre-trained weights are used.
853
+ progress (bool): If True, displays a progress bar of the download to stderr. Default is True.
854
+ **kwargs: parameters passed to the ``torchvision.models.optical_flow.RAFT``
855
+ base class. Please refer to the `source code
856
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/optical_flow/raft.py>`_
857
+ for more details about this class.
858
+
859
+ .. autoclass:: torchvision.models.optical_flow.Raft_Large_Weights
860
+ :members:
861
+ """
862
+
863
+ weights = Raft_Large_Weights.verify(weights)
864
+
865
+ return _raft(
866
+ weights=weights,
867
+ progress=progress,
868
+ # Feature encoder
869
+ feature_encoder_layers=(64, 64, 96, 128, 256),
870
+ feature_encoder_block=ResidualBlock,
871
+ feature_encoder_norm_layer=InstanceNorm2d,
872
+ # Context encoder
873
+ context_encoder_layers=(64, 64, 96, 128, 256),
874
+ context_encoder_block=ResidualBlock,
875
+ context_encoder_norm_layer=BatchNorm2d,
876
+ # Correlation block
877
+ corr_block_num_levels=4,
878
+ corr_block_radius=4,
879
+ # Motion encoder
880
+ motion_encoder_corr_layers=(256, 192),
881
+ motion_encoder_flow_layers=(128, 64),
882
+ motion_encoder_out_channels=128,
883
+ # Recurrent block
884
+ recurrent_block_hidden_state_size=128,
885
+ recurrent_block_kernel_size=((1, 5), (5, 1)),
886
+ recurrent_block_padding=((0, 2), (2, 0)),
887
+ # Flow head
888
+ flow_head_hidden_size=256,
889
+ # Mask predictor
890
+ use_mask_predictor=True,
891
+ **kwargs,
892
+ )
893
+
894
+
895
+ @register_model()
896
+ @handle_legacy_interface(weights=("pretrained", Raft_Small_Weights.C_T_V2))
897
+ def raft_small(*, weights: Optional[Raft_Small_Weights] = None, progress=True, **kwargs) -> RAFT:
898
+ """RAFT "small" model from
899
+ `RAFT: Recurrent All Pairs Field Transforms for Optical Flow <https://arxiv.org/abs/2003.12039>`__.
900
+
901
+ Please see the example below for a tutorial on how to use this model.
902
+
903
+ Args:
904
+ weights(:class:`~torchvision.models.optical_flow.Raft_Small_Weights`, optional): The
905
+ pretrained weights to use. See
906
+ :class:`~torchvision.models.optical_flow.Raft_Small_Weights`
907
+ below for more details, and possible values. By default, no
908
+ pre-trained weights are used.
909
+ progress (bool): If True, displays a progress bar of the download to stderr. Default is True.
910
+ **kwargs: parameters passed to the ``torchvision.models.optical_flow.RAFT``
911
+ base class. Please refer to the `source code
912
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/optical_flow/raft.py>`_
913
+ for more details about this class.
914
+
915
+ .. autoclass:: torchvision.models.optical_flow.Raft_Small_Weights
916
+ :members:
917
+ """
918
+ weights = Raft_Small_Weights.verify(weights)
919
+
920
+ return _raft(
921
+ weights=weights,
922
+ progress=progress,
923
+ # Feature encoder
924
+ feature_encoder_layers=(32, 32, 64, 96, 128),
925
+ feature_encoder_block=BottleneckBlock,
926
+ feature_encoder_norm_layer=InstanceNorm2d,
927
+ # Context encoder
928
+ context_encoder_layers=(32, 32, 64, 96, 160),
929
+ context_encoder_block=BottleneckBlock,
930
+ context_encoder_norm_layer=None,
931
+ # Correlation block
932
+ corr_block_num_levels=4,
933
+ corr_block_radius=3,
934
+ # Motion encoder
935
+ motion_encoder_corr_layers=(96,),
936
+ motion_encoder_flow_layers=(64, 32),
937
+ motion_encoder_out_channels=82,
938
+ # Recurrent block
939
+ recurrent_block_hidden_state_size=96,
940
+ recurrent_block_kernel_size=(3,),
941
+ recurrent_block_padding=(1,),
942
+ # Flow head
943
+ flow_head_hidden_size=128,
944
+ # Mask predictor
945
+ use_mask_predictor=False,
946
+ **kwargs,
947
+ )
pllava/lib/python3.10/site-packages/torchvision/models/quantization/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .googlenet import *
2
+ from .inception import *
3
+ from .mobilenet import *
4
+ from .resnet import *
5
+ from .shufflenetv2 import *
pllava/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (293 Bytes). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/googlenet.cpython-310.pyc ADDED
Binary file (8.07 kB). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/inception.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/mobilenet.cpython-310.pyc ADDED
Binary file (307 Bytes). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/mobilenetv2.cpython-310.pyc ADDED
Binary file (6.19 kB). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/mobilenetv3.cpython-310.pyc ADDED
Binary file (8.58 kB). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/resnet.cpython-310.pyc ADDED
Binary file (15.1 kB). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/shufflenetv2.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.73 kB). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/quantization/googlenet.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from functools import partial
3
+ from typing import Any, Optional, Union
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ from torch import Tensor
8
+ from torch.nn import functional as F
9
+
10
+ from ...transforms._presets import ImageClassification
11
+ from .._api import register_model, Weights, WeightsEnum
12
+ from .._meta import _IMAGENET_CATEGORIES
13
+ from .._utils import _ovewrite_named_param, handle_legacy_interface
14
+ from ..googlenet import BasicConv2d, GoogLeNet, GoogLeNet_Weights, GoogLeNetOutputs, Inception, InceptionAux
15
+ from .utils import _fuse_modules, _replace_relu, quantize_model
16
+
17
+
18
+ __all__ = [
19
+ "QuantizableGoogLeNet",
20
+ "GoogLeNet_QuantizedWeights",
21
+ "googlenet",
22
+ ]
23
+
24
+
25
+ class QuantizableBasicConv2d(BasicConv2d):
26
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
27
+ super().__init__(*args, **kwargs)
28
+ self.relu = nn.ReLU()
29
+
30
+ def forward(self, x: Tensor) -> Tensor:
31
+ x = self.conv(x)
32
+ x = self.bn(x)
33
+ x = self.relu(x)
34
+ return x
35
+
36
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
37
+ _fuse_modules(self, ["conv", "bn", "relu"], is_qat, inplace=True)
38
+
39
+
40
+ class QuantizableInception(Inception):
41
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
42
+ super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
43
+ self.cat = nn.quantized.FloatFunctional()
44
+
45
+ def forward(self, x: Tensor) -> Tensor:
46
+ outputs = self._forward(x)
47
+ return self.cat.cat(outputs, 1)
48
+
49
+
50
+ class QuantizableInceptionAux(InceptionAux):
51
+ # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
52
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
53
+ super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
54
+ self.relu = nn.ReLU()
55
+
56
+ def forward(self, x: Tensor) -> Tensor:
57
+ # aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14
58
+ x = F.adaptive_avg_pool2d(x, (4, 4))
59
+ # aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4
60
+ x = self.conv(x)
61
+ # N x 128 x 4 x 4
62
+ x = torch.flatten(x, 1)
63
+ # N x 2048
64
+ x = self.relu(self.fc1(x))
65
+ # N x 1024
66
+ x = self.dropout(x)
67
+ # N x 1024
68
+ x = self.fc2(x)
69
+ # N x 1000 (num_classes)
70
+
71
+ return x
72
+
73
+
74
+ class QuantizableGoogLeNet(GoogLeNet):
75
+ # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
76
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
77
+ super().__init__( # type: ignore[misc]
78
+ *args, blocks=[QuantizableBasicConv2d, QuantizableInception, QuantizableInceptionAux], **kwargs
79
+ )
80
+ self.quant = torch.ao.quantization.QuantStub()
81
+ self.dequant = torch.ao.quantization.DeQuantStub()
82
+
83
+ def forward(self, x: Tensor) -> GoogLeNetOutputs:
84
+ x = self._transform_input(x)
85
+ x = self.quant(x)
86
+ x, aux1, aux2 = self._forward(x)
87
+ x = self.dequant(x)
88
+ aux_defined = self.training and self.aux_logits
89
+ if torch.jit.is_scripting():
90
+ if not aux_defined:
91
+ warnings.warn("Scripted QuantizableGoogleNet always returns GoogleNetOutputs Tuple")
92
+ return GoogLeNetOutputs(x, aux2, aux1)
93
+ else:
94
+ return self.eager_outputs(x, aux2, aux1)
95
+
96
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
97
+ r"""Fuse conv/bn/relu modules in googlenet model
98
+
99
+ Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.
100
+ Model is modified in place. Note that this operation does not change numerics
101
+ and the model after modification is in floating point
102
+ """
103
+
104
+ for m in self.modules():
105
+ if type(m) is QuantizableBasicConv2d:
106
+ m.fuse_model(is_qat)
107
+
108
+
109
+ class GoogLeNet_QuantizedWeights(WeightsEnum):
110
+ IMAGENET1K_FBGEMM_V1 = Weights(
111
+ url="https://download.pytorch.org/models/quantized/googlenet_fbgemm-c81f6644.pth",
112
+ transforms=partial(ImageClassification, crop_size=224),
113
+ meta={
114
+ "num_params": 6624904,
115
+ "min_size": (15, 15),
116
+ "categories": _IMAGENET_CATEGORIES,
117
+ "backend": "fbgemm",
118
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models",
119
+ "unquantized": GoogLeNet_Weights.IMAGENET1K_V1,
120
+ "_metrics": {
121
+ "ImageNet-1K": {
122
+ "acc@1": 69.826,
123
+ "acc@5": 89.404,
124
+ }
125
+ },
126
+ "_ops": 1.498,
127
+ "_file_size": 12.618,
128
+ "_docs": """
129
+ These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
130
+ weights listed below.
131
+ """,
132
+ },
133
+ )
134
+ DEFAULT = IMAGENET1K_FBGEMM_V1
135
+
136
+
137
+ @register_model(name="quantized_googlenet")
138
+ @handle_legacy_interface(
139
+ weights=(
140
+ "pretrained",
141
+ lambda kwargs: GoogLeNet_QuantizedWeights.IMAGENET1K_FBGEMM_V1
142
+ if kwargs.get("quantize", False)
143
+ else GoogLeNet_Weights.IMAGENET1K_V1,
144
+ )
145
+ )
146
+ def googlenet(
147
+ *,
148
+ weights: Optional[Union[GoogLeNet_QuantizedWeights, GoogLeNet_Weights]] = None,
149
+ progress: bool = True,
150
+ quantize: bool = False,
151
+ **kwargs: Any,
152
+ ) -> QuantizableGoogLeNet:
153
+ """GoogLeNet (Inception v1) model architecture from `Going Deeper with Convolutions <http://arxiv.org/abs/1409.4842>`__.
154
+
155
+ .. note::
156
+ Note that ``quantize = True`` returns a quantized model with 8 bit
157
+ weights. Quantized models only support inference and run on CPUs.
158
+ GPU inference is not yet supported.
159
+
160
+ Args:
161
+ weights (:class:`~torchvision.models.quantization.GoogLeNet_QuantizedWeights` or :class:`~torchvision.models.GoogLeNet_Weights`, optional): The
162
+ pretrained weights for the model. See
163
+ :class:`~torchvision.models.quantization.GoogLeNet_QuantizedWeights` below for
164
+ more details, and possible values. By default, no pre-trained
165
+ weights are used.
166
+ progress (bool, optional): If True, displays a progress bar of the
167
+ download to stderr. Default is True.
168
+ quantize (bool, optional): If True, return a quantized version of the model. Default is False.
169
+ **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableGoogLeNet``
170
+ base class. Please refer to the `source code
171
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/googlenet.py>`_
172
+ for more details about this class.
173
+
174
+ .. autoclass:: torchvision.models.quantization.GoogLeNet_QuantizedWeights
175
+ :members:
176
+
177
+ .. autoclass:: torchvision.models.GoogLeNet_Weights
178
+ :members:
179
+ :noindex:
180
+ """
181
+ weights = (GoogLeNet_QuantizedWeights if quantize else GoogLeNet_Weights).verify(weights)
182
+
183
+ original_aux_logits = kwargs.get("aux_logits", False)
184
+ if weights is not None:
185
+ if "transform_input" not in kwargs:
186
+ _ovewrite_named_param(kwargs, "transform_input", True)
187
+ _ovewrite_named_param(kwargs, "aux_logits", True)
188
+ _ovewrite_named_param(kwargs, "init_weights", False)
189
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
190
+ if "backend" in weights.meta:
191
+ _ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
192
+ backend = kwargs.pop("backend", "fbgemm")
193
+
194
+ model = QuantizableGoogLeNet(**kwargs)
195
+ _replace_relu(model)
196
+ if quantize:
197
+ quantize_model(model, backend)
198
+
199
+ if weights is not None:
200
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
201
+ if not original_aux_logits:
202
+ model.aux_logits = False
203
+ model.aux1 = None # type: ignore[assignment]
204
+ model.aux2 = None # type: ignore[assignment]
205
+ else:
206
+ warnings.warn(
207
+ "auxiliary heads in the pretrained googlenet model are NOT pretrained, so make sure to train them"
208
+ )
209
+
210
+ return model
pllava/lib/python3.10/site-packages/torchvision/models/quantization/inception.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from functools import partial
3
+ from typing import Any, List, Optional, Union
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+ from torch import Tensor
9
+ from torchvision.models import inception as inception_module
10
+ from torchvision.models.inception import Inception_V3_Weights, InceptionOutputs
11
+
12
+ from ...transforms._presets import ImageClassification
13
+ from .._api import register_model, Weights, WeightsEnum
14
+ from .._meta import _IMAGENET_CATEGORIES
15
+ from .._utils import _ovewrite_named_param, handle_legacy_interface
16
+ from .utils import _fuse_modules, _replace_relu, quantize_model
17
+
18
+
19
+ __all__ = [
20
+ "QuantizableInception3",
21
+ "Inception_V3_QuantizedWeights",
22
+ "inception_v3",
23
+ ]
24
+
25
+
26
+ class QuantizableBasicConv2d(inception_module.BasicConv2d):
27
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
28
+ super().__init__(*args, **kwargs)
29
+ self.relu = nn.ReLU()
30
+
31
+ def forward(self, x: Tensor) -> Tensor:
32
+ x = self.conv(x)
33
+ x = self.bn(x)
34
+ x = self.relu(x)
35
+ return x
36
+
37
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
38
+ _fuse_modules(self, ["conv", "bn", "relu"], is_qat, inplace=True)
39
+
40
+
41
+ class QuantizableInceptionA(inception_module.InceptionA):
42
+ # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
43
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
44
+ super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
45
+ self.myop = nn.quantized.FloatFunctional()
46
+
47
+ def forward(self, x: Tensor) -> Tensor:
48
+ outputs = self._forward(x)
49
+ return self.myop.cat(outputs, 1)
50
+
51
+
52
+ class QuantizableInceptionB(inception_module.InceptionB):
53
+ # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
54
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
55
+ super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
56
+ self.myop = nn.quantized.FloatFunctional()
57
+
58
+ def forward(self, x: Tensor) -> Tensor:
59
+ outputs = self._forward(x)
60
+ return self.myop.cat(outputs, 1)
61
+
62
+
63
+ class QuantizableInceptionC(inception_module.InceptionC):
64
+ # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
65
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
66
+ super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
67
+ self.myop = nn.quantized.FloatFunctional()
68
+
69
+ def forward(self, x: Tensor) -> Tensor:
70
+ outputs = self._forward(x)
71
+ return self.myop.cat(outputs, 1)
72
+
73
+
74
+ class QuantizableInceptionD(inception_module.InceptionD):
75
+ # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
76
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
77
+ super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
78
+ self.myop = nn.quantized.FloatFunctional()
79
+
80
+ def forward(self, x: Tensor) -> Tensor:
81
+ outputs = self._forward(x)
82
+ return self.myop.cat(outputs, 1)
83
+
84
+
85
+ class QuantizableInceptionE(inception_module.InceptionE):
86
+ # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
87
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
88
+ super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
89
+ self.myop1 = nn.quantized.FloatFunctional()
90
+ self.myop2 = nn.quantized.FloatFunctional()
91
+ self.myop3 = nn.quantized.FloatFunctional()
92
+
93
+ def _forward(self, x: Tensor) -> List[Tensor]:
94
+ branch1x1 = self.branch1x1(x)
95
+
96
+ branch3x3 = self.branch3x3_1(x)
97
+ branch3x3 = [self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3)]
98
+ branch3x3 = self.myop1.cat(branch3x3, 1)
99
+
100
+ branch3x3dbl = self.branch3x3dbl_1(x)
101
+ branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
102
+ branch3x3dbl = [
103
+ self.branch3x3dbl_3a(branch3x3dbl),
104
+ self.branch3x3dbl_3b(branch3x3dbl),
105
+ ]
106
+ branch3x3dbl = self.myop2.cat(branch3x3dbl, 1)
107
+
108
+ branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
109
+ branch_pool = self.branch_pool(branch_pool)
110
+
111
+ outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
112
+ return outputs
113
+
114
+ def forward(self, x: Tensor) -> Tensor:
115
+ outputs = self._forward(x)
116
+ return self.myop3.cat(outputs, 1)
117
+
118
+
119
+ class QuantizableInceptionAux(inception_module.InceptionAux):
120
+ # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
121
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
122
+ super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
123
+
124
+
125
+ class QuantizableInception3(inception_module.Inception3):
126
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
127
+ super().__init__( # type: ignore[misc]
128
+ *args,
129
+ inception_blocks=[
130
+ QuantizableBasicConv2d,
131
+ QuantizableInceptionA,
132
+ QuantizableInceptionB,
133
+ QuantizableInceptionC,
134
+ QuantizableInceptionD,
135
+ QuantizableInceptionE,
136
+ QuantizableInceptionAux,
137
+ ],
138
+ **kwargs,
139
+ )
140
+ self.quant = torch.ao.quantization.QuantStub()
141
+ self.dequant = torch.ao.quantization.DeQuantStub()
142
+
143
+ def forward(self, x: Tensor) -> InceptionOutputs:
144
+ x = self._transform_input(x)
145
+ x = self.quant(x)
146
+ x, aux = self._forward(x)
147
+ x = self.dequant(x)
148
+ aux_defined = self.training and self.aux_logits
149
+ if torch.jit.is_scripting():
150
+ if not aux_defined:
151
+ warnings.warn("Scripted QuantizableInception3 always returns QuantizableInception3 Tuple")
152
+ return InceptionOutputs(x, aux)
153
+ else:
154
+ return self.eager_outputs(x, aux)
155
+
156
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
157
+ r"""Fuse conv/bn/relu modules in inception model
158
+
159
+ Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.
160
+ Model is modified in place. Note that this operation does not change numerics
161
+ and the model after modification is in floating point
162
+ """
163
+
164
+ for m in self.modules():
165
+ if type(m) is QuantizableBasicConv2d:
166
+ m.fuse_model(is_qat)
167
+
168
+
169
+ class Inception_V3_QuantizedWeights(WeightsEnum):
170
+ IMAGENET1K_FBGEMM_V1 = Weights(
171
+ url="https://download.pytorch.org/models/quantized/inception_v3_google_fbgemm-a2837893.pth",
172
+ transforms=partial(ImageClassification, crop_size=299, resize_size=342),
173
+ meta={
174
+ "num_params": 27161264,
175
+ "min_size": (75, 75),
176
+ "categories": _IMAGENET_CATEGORIES,
177
+ "backend": "fbgemm",
178
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models",
179
+ "unquantized": Inception_V3_Weights.IMAGENET1K_V1,
180
+ "_metrics": {
181
+ "ImageNet-1K": {
182
+ "acc@1": 77.176,
183
+ "acc@5": 93.354,
184
+ }
185
+ },
186
+ "_ops": 5.713,
187
+ "_file_size": 23.146,
188
+ "_docs": """
189
+ These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
190
+ weights listed below.
191
+ """,
192
+ },
193
+ )
194
+ DEFAULT = IMAGENET1K_FBGEMM_V1
195
+
196
+
197
+ @register_model(name="quantized_inception_v3")
198
+ @handle_legacy_interface(
199
+ weights=(
200
+ "pretrained",
201
+ lambda kwargs: Inception_V3_QuantizedWeights.IMAGENET1K_FBGEMM_V1
202
+ if kwargs.get("quantize", False)
203
+ else Inception_V3_Weights.IMAGENET1K_V1,
204
+ )
205
+ )
206
+ def inception_v3(
207
+ *,
208
+ weights: Optional[Union[Inception_V3_QuantizedWeights, Inception_V3_Weights]] = None,
209
+ progress: bool = True,
210
+ quantize: bool = False,
211
+ **kwargs: Any,
212
+ ) -> QuantizableInception3:
213
+ r"""Inception v3 model architecture from
214
+ `Rethinking the Inception Architecture for Computer Vision <http://arxiv.org/abs/1512.00567>`__.
215
+
216
+ .. note::
217
+ **Important**: In contrast to the other models the inception_v3 expects tensors with a size of
218
+ N x 3 x 299 x 299, so ensure your images are sized accordingly.
219
+
220
+ .. note::
221
+ Note that ``quantize = True`` returns a quantized model with 8 bit
222
+ weights. Quantized models only support inference and run on CPUs.
223
+ GPU inference is not yet supported.
224
+
225
+ Args:
226
+ weights (:class:`~torchvision.models.quantization.Inception_V3_QuantizedWeights` or :class:`~torchvision.models.Inception_V3_Weights`, optional): The pretrained
227
+ weights for the model. See
228
+ :class:`~torchvision.models.quantization.Inception_V3_QuantizedWeights` below for
229
+ more details, and possible values. By default, no pre-trained
230
+ weights are used.
231
+ progress (bool, optional): If True, displays a progress bar of the download to stderr.
232
+ Default is True.
233
+ quantize (bool, optional): If True, return a quantized version of the model.
234
+ Default is False.
235
+ **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableInception3``
236
+ base class. Please refer to the `source code
237
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/inception.py>`_
238
+ for more details about this class.
239
+
240
+ .. autoclass:: torchvision.models.quantization.Inception_V3_QuantizedWeights
241
+ :members:
242
+
243
+ .. autoclass:: torchvision.models.Inception_V3_Weights
244
+ :members:
245
+ :noindex:
246
+ """
247
+ weights = (Inception_V3_QuantizedWeights if quantize else Inception_V3_Weights).verify(weights)
248
+
249
+ original_aux_logits = kwargs.get("aux_logits", False)
250
+ if weights is not None:
251
+ if "transform_input" not in kwargs:
252
+ _ovewrite_named_param(kwargs, "transform_input", True)
253
+ _ovewrite_named_param(kwargs, "aux_logits", True)
254
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
255
+ if "backend" in weights.meta:
256
+ _ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
257
+ backend = kwargs.pop("backend", "fbgemm")
258
+
259
+ model = QuantizableInception3(**kwargs)
260
+ _replace_relu(model)
261
+ if quantize:
262
+ quantize_model(model, backend)
263
+
264
+ if weights is not None:
265
+ if quantize and not original_aux_logits:
266
+ model.aux_logits = False
267
+ model.AuxLogits = None
268
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
269
+ if not quantize and not original_aux_logits:
270
+ model.aux_logits = False
271
+ model.AuxLogits = None
272
+
273
+ return model
pllava/lib/python3.10/site-packages/torchvision/models/quantization/mobilenet.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from .mobilenetv2 import * # noqa: F401, F403
2
+ from .mobilenetv3 import * # noqa: F401, F403
3
+ from .mobilenetv2 import __all__ as mv2_all
4
+ from .mobilenetv3 import __all__ as mv3_all
5
+
6
+ __all__ = mv2_all + mv3_all
pllava/lib/python3.10/site-packages/torchvision/models/quantization/mobilenetv2.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from typing import Any, Optional, Union
3
+
4
+ from torch import nn, Tensor
5
+ from torch.ao.quantization import DeQuantStub, QuantStub
6
+ from torchvision.models.mobilenetv2 import InvertedResidual, MobileNet_V2_Weights, MobileNetV2
7
+
8
+ from ...ops.misc import Conv2dNormActivation
9
+ from ...transforms._presets import ImageClassification
10
+ from .._api import register_model, Weights, WeightsEnum
11
+ from .._meta import _IMAGENET_CATEGORIES
12
+ from .._utils import _ovewrite_named_param, handle_legacy_interface
13
+ from .utils import _fuse_modules, _replace_relu, quantize_model
14
+
15
+
16
+ __all__ = [
17
+ "QuantizableMobileNetV2",
18
+ "MobileNet_V2_QuantizedWeights",
19
+ "mobilenet_v2",
20
+ ]
21
+
22
+
23
+ class QuantizableInvertedResidual(InvertedResidual):
24
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
25
+ super().__init__(*args, **kwargs)
26
+ self.skip_add = nn.quantized.FloatFunctional()
27
+
28
+ def forward(self, x: Tensor) -> Tensor:
29
+ if self.use_res_connect:
30
+ return self.skip_add.add(x, self.conv(x))
31
+ else:
32
+ return self.conv(x)
33
+
34
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
35
+ for idx in range(len(self.conv)):
36
+ if type(self.conv[idx]) is nn.Conv2d:
37
+ _fuse_modules(self.conv, [str(idx), str(idx + 1)], is_qat, inplace=True)
38
+
39
+
40
+ class QuantizableMobileNetV2(MobileNetV2):
41
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
42
+ """
43
+ MobileNet V2 main class
44
+
45
+ Args:
46
+ Inherits args from floating point MobileNetV2
47
+ """
48
+ super().__init__(*args, **kwargs)
49
+ self.quant = QuantStub()
50
+ self.dequant = DeQuantStub()
51
+
52
+ def forward(self, x: Tensor) -> Tensor:
53
+ x = self.quant(x)
54
+ x = self._forward_impl(x)
55
+ x = self.dequant(x)
56
+ return x
57
+
58
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
59
+ for m in self.modules():
60
+ if type(m) is Conv2dNormActivation:
61
+ _fuse_modules(m, ["0", "1", "2"], is_qat, inplace=True)
62
+ if type(m) is QuantizableInvertedResidual:
63
+ m.fuse_model(is_qat)
64
+
65
+
66
+ class MobileNet_V2_QuantizedWeights(WeightsEnum):
67
+ IMAGENET1K_QNNPACK_V1 = Weights(
68
+ url="https://download.pytorch.org/models/quantized/mobilenet_v2_qnnpack_37f702c5.pth",
69
+ transforms=partial(ImageClassification, crop_size=224),
70
+ meta={
71
+ "num_params": 3504872,
72
+ "min_size": (1, 1),
73
+ "categories": _IMAGENET_CATEGORIES,
74
+ "backend": "qnnpack",
75
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#qat-mobilenetv2",
76
+ "unquantized": MobileNet_V2_Weights.IMAGENET1K_V1,
77
+ "_metrics": {
78
+ "ImageNet-1K": {
79
+ "acc@1": 71.658,
80
+ "acc@5": 90.150,
81
+ }
82
+ },
83
+ "_ops": 0.301,
84
+ "_file_size": 3.423,
85
+ "_docs": """
86
+ These weights were produced by doing Quantization Aware Training (eager mode) on top of the unquantized
87
+ weights listed below.
88
+ """,
89
+ },
90
+ )
91
+ DEFAULT = IMAGENET1K_QNNPACK_V1
92
+
93
+
94
+ @register_model(name="quantized_mobilenet_v2")
95
+ @handle_legacy_interface(
96
+ weights=(
97
+ "pretrained",
98
+ lambda kwargs: MobileNet_V2_QuantizedWeights.IMAGENET1K_QNNPACK_V1
99
+ if kwargs.get("quantize", False)
100
+ else MobileNet_V2_Weights.IMAGENET1K_V1,
101
+ )
102
+ )
103
+ def mobilenet_v2(
104
+ *,
105
+ weights: Optional[Union[MobileNet_V2_QuantizedWeights, MobileNet_V2_Weights]] = None,
106
+ progress: bool = True,
107
+ quantize: bool = False,
108
+ **kwargs: Any,
109
+ ) -> QuantizableMobileNetV2:
110
+ """
111
+ Constructs a MobileNetV2 architecture from
112
+ `MobileNetV2: Inverted Residuals and Linear Bottlenecks
113
+ <https://arxiv.org/abs/1801.04381>`_.
114
+
115
+ .. note::
116
+ Note that ``quantize = True`` returns a quantized model with 8 bit
117
+ weights. Quantized models only support inference and run on CPUs.
118
+ GPU inference is not yet supported.
119
+
120
+ Args:
121
+ weights (:class:`~torchvision.models.quantization.MobileNet_V2_QuantizedWeights` or :class:`~torchvision.models.MobileNet_V2_Weights`, optional): The
122
+ pretrained weights for the model. See
123
+ :class:`~torchvision.models.quantization.MobileNet_V2_QuantizedWeights` below for
124
+ more details, and possible values. By default, no pre-trained
125
+ weights are used.
126
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
127
+ quantize (bool, optional): If True, returns a quantized version of the model. Default is False.
128
+ **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableMobileNetV2``
129
+ base class. Please refer to the `source code
130
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/mobilenetv2.py>`_
131
+ for more details about this class.
132
+ .. autoclass:: torchvision.models.quantization.MobileNet_V2_QuantizedWeights
133
+ :members:
134
+ .. autoclass:: torchvision.models.MobileNet_V2_Weights
135
+ :members:
136
+ :noindex:
137
+ """
138
+ weights = (MobileNet_V2_QuantizedWeights if quantize else MobileNet_V2_Weights).verify(weights)
139
+
140
+ if weights is not None:
141
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
142
+ if "backend" in weights.meta:
143
+ _ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
144
+ backend = kwargs.pop("backend", "qnnpack")
145
+
146
+ model = QuantizableMobileNetV2(block=QuantizableInvertedResidual, **kwargs)
147
+ _replace_relu(model)
148
+ if quantize:
149
+ quantize_model(model, backend)
150
+
151
+ if weights is not None:
152
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
153
+
154
+ return model
pllava/lib/python3.10/site-packages/torchvision/models/quantization/mobilenetv3.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from typing import Any, List, Optional, Union
3
+
4
+ import torch
5
+ from torch import nn, Tensor
6
+ from torch.ao.quantization import DeQuantStub, QuantStub
7
+
8
+ from ...ops.misc import Conv2dNormActivation, SqueezeExcitation
9
+ from ...transforms._presets import ImageClassification
10
+ from .._api import register_model, Weights, WeightsEnum
11
+ from .._meta import _IMAGENET_CATEGORIES
12
+ from .._utils import _ovewrite_named_param, handle_legacy_interface
13
+ from ..mobilenetv3 import (
14
+ _mobilenet_v3_conf,
15
+ InvertedResidual,
16
+ InvertedResidualConfig,
17
+ MobileNet_V3_Large_Weights,
18
+ MobileNetV3,
19
+ )
20
+ from .utils import _fuse_modules, _replace_relu
21
+
22
+
23
+ __all__ = [
24
+ "QuantizableMobileNetV3",
25
+ "MobileNet_V3_Large_QuantizedWeights",
26
+ "mobilenet_v3_large",
27
+ ]
28
+
29
+
30
+ class QuantizableSqueezeExcitation(SqueezeExcitation):
31
+ _version = 2
32
+
33
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
34
+ kwargs["scale_activation"] = nn.Hardsigmoid
35
+ super().__init__(*args, **kwargs)
36
+ self.skip_mul = nn.quantized.FloatFunctional()
37
+
38
+ def forward(self, input: Tensor) -> Tensor:
39
+ return self.skip_mul.mul(self._scale(input), input)
40
+
41
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
42
+ _fuse_modules(self, ["fc1", "activation"], is_qat, inplace=True)
43
+
44
+ def _load_from_state_dict(
45
+ self,
46
+ state_dict,
47
+ prefix,
48
+ local_metadata,
49
+ strict,
50
+ missing_keys,
51
+ unexpected_keys,
52
+ error_msgs,
53
+ ):
54
+ version = local_metadata.get("version", None)
55
+
56
+ if hasattr(self, "qconfig") and (version is None or version < 2):
57
+ default_state_dict = {
58
+ "scale_activation.activation_post_process.scale": torch.tensor([1.0]),
59
+ "scale_activation.activation_post_process.activation_post_process.scale": torch.tensor([1.0]),
60
+ "scale_activation.activation_post_process.zero_point": torch.tensor([0], dtype=torch.int32),
61
+ "scale_activation.activation_post_process.activation_post_process.zero_point": torch.tensor(
62
+ [0], dtype=torch.int32
63
+ ),
64
+ "scale_activation.activation_post_process.fake_quant_enabled": torch.tensor([1]),
65
+ "scale_activation.activation_post_process.observer_enabled": torch.tensor([1]),
66
+ }
67
+ for k, v in default_state_dict.items():
68
+ full_key = prefix + k
69
+ if full_key not in state_dict:
70
+ state_dict[full_key] = v
71
+
72
+ super()._load_from_state_dict(
73
+ state_dict,
74
+ prefix,
75
+ local_metadata,
76
+ strict,
77
+ missing_keys,
78
+ unexpected_keys,
79
+ error_msgs,
80
+ )
81
+
82
+
83
+ class QuantizableInvertedResidual(InvertedResidual):
84
+ # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
85
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
86
+ super().__init__(*args, se_layer=QuantizableSqueezeExcitation, **kwargs) # type: ignore[misc]
87
+ self.skip_add = nn.quantized.FloatFunctional()
88
+
89
+ def forward(self, x: Tensor) -> Tensor:
90
+ if self.use_res_connect:
91
+ return self.skip_add.add(x, self.block(x))
92
+ else:
93
+ return self.block(x)
94
+
95
+
96
+ class QuantizableMobileNetV3(MobileNetV3):
97
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
98
+ """
99
+ MobileNet V3 main class
100
+
101
+ Args:
102
+ Inherits args from floating point MobileNetV3
103
+ """
104
+ super().__init__(*args, **kwargs)
105
+ self.quant = QuantStub()
106
+ self.dequant = DeQuantStub()
107
+
108
+ def forward(self, x: Tensor) -> Tensor:
109
+ x = self.quant(x)
110
+ x = self._forward_impl(x)
111
+ x = self.dequant(x)
112
+ return x
113
+
114
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
115
+ for m in self.modules():
116
+ if type(m) is Conv2dNormActivation:
117
+ modules_to_fuse = ["0", "1"]
118
+ if len(m) == 3 and type(m[2]) is nn.ReLU:
119
+ modules_to_fuse.append("2")
120
+ _fuse_modules(m, modules_to_fuse, is_qat, inplace=True)
121
+ elif type(m) is QuantizableSqueezeExcitation:
122
+ m.fuse_model(is_qat)
123
+
124
+
125
+ def _mobilenet_v3_model(
126
+ inverted_residual_setting: List[InvertedResidualConfig],
127
+ last_channel: int,
128
+ weights: Optional[WeightsEnum],
129
+ progress: bool,
130
+ quantize: bool,
131
+ **kwargs: Any,
132
+ ) -> QuantizableMobileNetV3:
133
+ if weights is not None:
134
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
135
+ if "backend" in weights.meta:
136
+ _ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
137
+ backend = kwargs.pop("backend", "qnnpack")
138
+
139
+ model = QuantizableMobileNetV3(inverted_residual_setting, last_channel, block=QuantizableInvertedResidual, **kwargs)
140
+ _replace_relu(model)
141
+
142
+ if quantize:
143
+ # Instead of quantizing the model and then loading the quantized weights we take a different approach.
144
+ # We prepare the QAT model, load the QAT weights from training and then convert it.
145
+ # This is done to avoid extremely low accuracies observed on the specific model. This is rather a workaround
146
+ # for an unresolved bug on the eager quantization API detailed at: https://github.com/pytorch/vision/issues/5890
147
+ model.fuse_model(is_qat=True)
148
+ model.qconfig = torch.ao.quantization.get_default_qat_qconfig(backend)
149
+ torch.ao.quantization.prepare_qat(model, inplace=True)
150
+
151
+ if weights is not None:
152
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
153
+
154
+ if quantize:
155
+ torch.ao.quantization.convert(model, inplace=True)
156
+ model.eval()
157
+
158
+ return model
159
+
160
+
161
+ class MobileNet_V3_Large_QuantizedWeights(WeightsEnum):
162
+ IMAGENET1K_QNNPACK_V1 = Weights(
163
+ url="https://download.pytorch.org/models/quantized/mobilenet_v3_large_qnnpack-5bcacf28.pth",
164
+ transforms=partial(ImageClassification, crop_size=224),
165
+ meta={
166
+ "num_params": 5483032,
167
+ "min_size": (1, 1),
168
+ "categories": _IMAGENET_CATEGORIES,
169
+ "backend": "qnnpack",
170
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#qat-mobilenetv3",
171
+ "unquantized": MobileNet_V3_Large_Weights.IMAGENET1K_V1,
172
+ "_metrics": {
173
+ "ImageNet-1K": {
174
+ "acc@1": 73.004,
175
+ "acc@5": 90.858,
176
+ }
177
+ },
178
+ "_ops": 0.217,
179
+ "_file_size": 21.554,
180
+ "_docs": """
181
+ These weights were produced by doing Quantization Aware Training (eager mode) on top of the unquantized
182
+ weights listed below.
183
+ """,
184
+ },
185
+ )
186
+ DEFAULT = IMAGENET1K_QNNPACK_V1
187
+
188
+
189
+ @register_model(name="quantized_mobilenet_v3_large")
190
+ @handle_legacy_interface(
191
+ weights=(
192
+ "pretrained",
193
+ lambda kwargs: MobileNet_V3_Large_QuantizedWeights.IMAGENET1K_QNNPACK_V1
194
+ if kwargs.get("quantize", False)
195
+ else MobileNet_V3_Large_Weights.IMAGENET1K_V1,
196
+ )
197
+ )
198
+ def mobilenet_v3_large(
199
+ *,
200
+ weights: Optional[Union[MobileNet_V3_Large_QuantizedWeights, MobileNet_V3_Large_Weights]] = None,
201
+ progress: bool = True,
202
+ quantize: bool = False,
203
+ **kwargs: Any,
204
+ ) -> QuantizableMobileNetV3:
205
+ """
206
+ MobileNetV3 (Large) model from
207
+ `Searching for MobileNetV3 <https://arxiv.org/abs/1905.02244>`_.
208
+
209
+ .. note::
210
+ Note that ``quantize = True`` returns a quantized model with 8 bit
211
+ weights. Quantized models only support inference and run on CPUs.
212
+ GPU inference is not yet supported.
213
+
214
+ Args:
215
+ weights (:class:`~torchvision.models.quantization.MobileNet_V3_Large_QuantizedWeights` or :class:`~torchvision.models.MobileNet_V3_Large_Weights`, optional): The
216
+ pretrained weights for the model. See
217
+ :class:`~torchvision.models.quantization.MobileNet_V3_Large_QuantizedWeights` below for
218
+ more details, and possible values. By default, no pre-trained
219
+ weights are used.
220
+ progress (bool): If True, displays a progress bar of the
221
+ download to stderr. Default is True.
222
+ quantize (bool): If True, return a quantized version of the model. Default is False.
223
+ **kwargs: parameters passed to the ``torchvision.models.quantization.MobileNet_V3_Large_QuantizedWeights``
224
+ base class. Please refer to the `source code
225
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/mobilenetv3.py>`_
226
+ for more details about this class.
227
+
228
+ .. autoclass:: torchvision.models.quantization.MobileNet_V3_Large_QuantizedWeights
229
+ :members:
230
+ .. autoclass:: torchvision.models.MobileNet_V3_Large_Weights
231
+ :members:
232
+ :noindex:
233
+ """
234
+ weights = (MobileNet_V3_Large_QuantizedWeights if quantize else MobileNet_V3_Large_Weights).verify(weights)
235
+
236
+ inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_large", **kwargs)
237
+ return _mobilenet_v3_model(inverted_residual_setting, last_channel, weights, progress, quantize, **kwargs)
pllava/lib/python3.10/site-packages/torchvision/models/quantization/resnet.py ADDED
@@ -0,0 +1,484 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from typing import Any, List, Optional, Type, Union
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from torch import Tensor
7
+ from torchvision.models.resnet import (
8
+ BasicBlock,
9
+ Bottleneck,
10
+ ResNet,
11
+ ResNet18_Weights,
12
+ ResNet50_Weights,
13
+ ResNeXt101_32X8D_Weights,
14
+ ResNeXt101_64X4D_Weights,
15
+ )
16
+
17
+ from ...transforms._presets import ImageClassification
18
+ from .._api import register_model, Weights, WeightsEnum
19
+ from .._meta import _IMAGENET_CATEGORIES
20
+ from .._utils import _ovewrite_named_param, handle_legacy_interface
21
+ from .utils import _fuse_modules, _replace_relu, quantize_model
22
+
23
+
24
+ __all__ = [
25
+ "QuantizableResNet",
26
+ "ResNet18_QuantizedWeights",
27
+ "ResNet50_QuantizedWeights",
28
+ "ResNeXt101_32X8D_QuantizedWeights",
29
+ "ResNeXt101_64X4D_QuantizedWeights",
30
+ "resnet18",
31
+ "resnet50",
32
+ "resnext101_32x8d",
33
+ "resnext101_64x4d",
34
+ ]
35
+
36
+
37
+ class QuantizableBasicBlock(BasicBlock):
38
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
39
+ super().__init__(*args, **kwargs)
40
+ self.add_relu = torch.nn.quantized.FloatFunctional()
41
+
42
+ def forward(self, x: Tensor) -> Tensor:
43
+ identity = x
44
+
45
+ out = self.conv1(x)
46
+ out = self.bn1(out)
47
+ out = self.relu(out)
48
+
49
+ out = self.conv2(out)
50
+ out = self.bn2(out)
51
+
52
+ if self.downsample is not None:
53
+ identity = self.downsample(x)
54
+
55
+ out = self.add_relu.add_relu(out, identity)
56
+
57
+ return out
58
+
59
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
60
+ _fuse_modules(self, [["conv1", "bn1", "relu"], ["conv2", "bn2"]], is_qat, inplace=True)
61
+ if self.downsample:
62
+ _fuse_modules(self.downsample, ["0", "1"], is_qat, inplace=True)
63
+
64
+
65
+ class QuantizableBottleneck(Bottleneck):
66
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
67
+ super().__init__(*args, **kwargs)
68
+ self.skip_add_relu = nn.quantized.FloatFunctional()
69
+ self.relu1 = nn.ReLU(inplace=False)
70
+ self.relu2 = nn.ReLU(inplace=False)
71
+
72
+ def forward(self, x: Tensor) -> Tensor:
73
+ identity = x
74
+ out = self.conv1(x)
75
+ out = self.bn1(out)
76
+ out = self.relu1(out)
77
+ out = self.conv2(out)
78
+ out = self.bn2(out)
79
+ out = self.relu2(out)
80
+
81
+ out = self.conv3(out)
82
+ out = self.bn3(out)
83
+
84
+ if self.downsample is not None:
85
+ identity = self.downsample(x)
86
+ out = self.skip_add_relu.add_relu(out, identity)
87
+
88
+ return out
89
+
90
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
91
+ _fuse_modules(
92
+ self, [["conv1", "bn1", "relu1"], ["conv2", "bn2", "relu2"], ["conv3", "bn3"]], is_qat, inplace=True
93
+ )
94
+ if self.downsample:
95
+ _fuse_modules(self.downsample, ["0", "1"], is_qat, inplace=True)
96
+
97
+
98
+ class QuantizableResNet(ResNet):
99
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
100
+ super().__init__(*args, **kwargs)
101
+
102
+ self.quant = torch.ao.quantization.QuantStub()
103
+ self.dequant = torch.ao.quantization.DeQuantStub()
104
+
105
+ def forward(self, x: Tensor) -> Tensor:
106
+ x = self.quant(x)
107
+ # Ensure scriptability
108
+ # super(QuantizableResNet,self).forward(x)
109
+ # is not scriptable
110
+ x = self._forward_impl(x)
111
+ x = self.dequant(x)
112
+ return x
113
+
114
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
115
+ r"""Fuse conv/bn/relu modules in resnet models
116
+
117
+ Fuse conv+bn+relu/ Conv+relu/conv+Bn modules to prepare for quantization.
118
+ Model is modified in place. Note that this operation does not change numerics
119
+ and the model after modification is in floating point
120
+ """
121
+ _fuse_modules(self, ["conv1", "bn1", "relu"], is_qat, inplace=True)
122
+ for m in self.modules():
123
+ if type(m) is QuantizableBottleneck or type(m) is QuantizableBasicBlock:
124
+ m.fuse_model(is_qat)
125
+
126
+
127
+ def _resnet(
128
+ block: Type[Union[QuantizableBasicBlock, QuantizableBottleneck]],
129
+ layers: List[int],
130
+ weights: Optional[WeightsEnum],
131
+ progress: bool,
132
+ quantize: bool,
133
+ **kwargs: Any,
134
+ ) -> QuantizableResNet:
135
+ if weights is not None:
136
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
137
+ if "backend" in weights.meta:
138
+ _ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
139
+ backend = kwargs.pop("backend", "fbgemm")
140
+
141
+ model = QuantizableResNet(block, layers, **kwargs)
142
+ _replace_relu(model)
143
+ if quantize:
144
+ quantize_model(model, backend)
145
+
146
+ if weights is not None:
147
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
148
+
149
+ return model
150
+
151
+
152
+ _COMMON_META = {
153
+ "min_size": (1, 1),
154
+ "categories": _IMAGENET_CATEGORIES,
155
+ "backend": "fbgemm",
156
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models",
157
+ "_docs": """
158
+ These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
159
+ weights listed below.
160
+ """,
161
+ }
162
+
163
+
164
+ class ResNet18_QuantizedWeights(WeightsEnum):
165
+ IMAGENET1K_FBGEMM_V1 = Weights(
166
+ url="https://download.pytorch.org/models/quantized/resnet18_fbgemm_16fa66dd.pth",
167
+ transforms=partial(ImageClassification, crop_size=224),
168
+ meta={
169
+ **_COMMON_META,
170
+ "num_params": 11689512,
171
+ "unquantized": ResNet18_Weights.IMAGENET1K_V1,
172
+ "_metrics": {
173
+ "ImageNet-1K": {
174
+ "acc@1": 69.494,
175
+ "acc@5": 88.882,
176
+ }
177
+ },
178
+ "_ops": 1.814,
179
+ "_file_size": 11.238,
180
+ },
181
+ )
182
+ DEFAULT = IMAGENET1K_FBGEMM_V1
183
+
184
+
185
+ class ResNet50_QuantizedWeights(WeightsEnum):
186
+ IMAGENET1K_FBGEMM_V1 = Weights(
187
+ url="https://download.pytorch.org/models/quantized/resnet50_fbgemm_bf931d71.pth",
188
+ transforms=partial(ImageClassification, crop_size=224),
189
+ meta={
190
+ **_COMMON_META,
191
+ "num_params": 25557032,
192
+ "unquantized": ResNet50_Weights.IMAGENET1K_V1,
193
+ "_metrics": {
194
+ "ImageNet-1K": {
195
+ "acc@1": 75.920,
196
+ "acc@5": 92.814,
197
+ }
198
+ },
199
+ "_ops": 4.089,
200
+ "_file_size": 24.759,
201
+ },
202
+ )
203
+ IMAGENET1K_FBGEMM_V2 = Weights(
204
+ url="https://download.pytorch.org/models/quantized/resnet50_fbgemm-23753f79.pth",
205
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
206
+ meta={
207
+ **_COMMON_META,
208
+ "num_params": 25557032,
209
+ "unquantized": ResNet50_Weights.IMAGENET1K_V2,
210
+ "_metrics": {
211
+ "ImageNet-1K": {
212
+ "acc@1": 80.282,
213
+ "acc@5": 94.976,
214
+ }
215
+ },
216
+ "_ops": 4.089,
217
+ "_file_size": 24.953,
218
+ },
219
+ )
220
+ DEFAULT = IMAGENET1K_FBGEMM_V2
221
+
222
+
223
+ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum):
224
+ IMAGENET1K_FBGEMM_V1 = Weights(
225
+ url="https://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm_09835ccf.pth",
226
+ transforms=partial(ImageClassification, crop_size=224),
227
+ meta={
228
+ **_COMMON_META,
229
+ "num_params": 88791336,
230
+ "unquantized": ResNeXt101_32X8D_Weights.IMAGENET1K_V1,
231
+ "_metrics": {
232
+ "ImageNet-1K": {
233
+ "acc@1": 78.986,
234
+ "acc@5": 94.480,
235
+ }
236
+ },
237
+ "_ops": 16.414,
238
+ "_file_size": 86.034,
239
+ },
240
+ )
241
+ IMAGENET1K_FBGEMM_V2 = Weights(
242
+ url="https://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm-ee16d00c.pth",
243
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
244
+ meta={
245
+ **_COMMON_META,
246
+ "num_params": 88791336,
247
+ "unquantized": ResNeXt101_32X8D_Weights.IMAGENET1K_V2,
248
+ "_metrics": {
249
+ "ImageNet-1K": {
250
+ "acc@1": 82.574,
251
+ "acc@5": 96.132,
252
+ }
253
+ },
254
+ "_ops": 16.414,
255
+ "_file_size": 86.645,
256
+ },
257
+ )
258
+ DEFAULT = IMAGENET1K_FBGEMM_V2
259
+
260
+
261
+ class ResNeXt101_64X4D_QuantizedWeights(WeightsEnum):
262
+ IMAGENET1K_FBGEMM_V1 = Weights(
263
+ url="https://download.pytorch.org/models/quantized/resnext101_64x4d_fbgemm-605a1cb3.pth",
264
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
265
+ meta={
266
+ **_COMMON_META,
267
+ "num_params": 83455272,
268
+ "recipe": "https://github.com/pytorch/vision/pull/5935",
269
+ "unquantized": ResNeXt101_64X4D_Weights.IMAGENET1K_V1,
270
+ "_metrics": {
271
+ "ImageNet-1K": {
272
+ "acc@1": 82.898,
273
+ "acc@5": 96.326,
274
+ }
275
+ },
276
+ "_ops": 15.46,
277
+ "_file_size": 81.556,
278
+ },
279
+ )
280
+ DEFAULT = IMAGENET1K_FBGEMM_V1
281
+
282
+
283
+ @register_model(name="quantized_resnet18")
284
+ @handle_legacy_interface(
285
+ weights=(
286
+ "pretrained",
287
+ lambda kwargs: ResNet18_QuantizedWeights.IMAGENET1K_FBGEMM_V1
288
+ if kwargs.get("quantize", False)
289
+ else ResNet18_Weights.IMAGENET1K_V1,
290
+ )
291
+ )
292
+ def resnet18(
293
+ *,
294
+ weights: Optional[Union[ResNet18_QuantizedWeights, ResNet18_Weights]] = None,
295
+ progress: bool = True,
296
+ quantize: bool = False,
297
+ **kwargs: Any,
298
+ ) -> QuantizableResNet:
299
+ """ResNet-18 model from
300
+ `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`_
301
+
302
+ .. note::
303
+ Note that ``quantize = True`` returns a quantized model with 8 bit
304
+ weights. Quantized models only support inference and run on CPUs.
305
+ GPU inference is not yet supported.
306
+
307
+ Args:
308
+ weights (:class:`~torchvision.models.quantization.ResNet18_QuantizedWeights` or :class:`~torchvision.models.ResNet18_Weights`, optional): The
309
+ pretrained weights for the model. See
310
+ :class:`~torchvision.models.quantization.ResNet18_QuantizedWeights` below for
311
+ more details, and possible values. By default, no pre-trained
312
+ weights are used.
313
+ progress (bool, optional): If True, displays a progress bar of the
314
+ download to stderr. Default is True.
315
+ quantize (bool, optional): If True, return a quantized version of the model. Default is False.
316
+ **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
317
+ base class. Please refer to the `source code
318
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
319
+ for more details about this class.
320
+
321
+ .. autoclass:: torchvision.models.quantization.ResNet18_QuantizedWeights
322
+ :members:
323
+
324
+ .. autoclass:: torchvision.models.ResNet18_Weights
325
+ :members:
326
+ :noindex:
327
+ """
328
+ weights = (ResNet18_QuantizedWeights if quantize else ResNet18_Weights).verify(weights)
329
+
330
+ return _resnet(QuantizableBasicBlock, [2, 2, 2, 2], weights, progress, quantize, **kwargs)
331
+
332
+
333
+ @register_model(name="quantized_resnet50")
334
+ @handle_legacy_interface(
335
+ weights=(
336
+ "pretrained",
337
+ lambda kwargs: ResNet50_QuantizedWeights.IMAGENET1K_FBGEMM_V1
338
+ if kwargs.get("quantize", False)
339
+ else ResNet50_Weights.IMAGENET1K_V1,
340
+ )
341
+ )
342
+ def resnet50(
343
+ *,
344
+ weights: Optional[Union[ResNet50_QuantizedWeights, ResNet50_Weights]] = None,
345
+ progress: bool = True,
346
+ quantize: bool = False,
347
+ **kwargs: Any,
348
+ ) -> QuantizableResNet:
349
+ """ResNet-50 model from
350
+ `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`_
351
+
352
+ .. note::
353
+ Note that ``quantize = True`` returns a quantized model with 8 bit
354
+ weights. Quantized models only support inference and run on CPUs.
355
+ GPU inference is not yet supported.
356
+
357
+ Args:
358
+ weights (:class:`~torchvision.models.quantization.ResNet50_QuantizedWeights` or :class:`~torchvision.models.ResNet50_Weights`, optional): The
359
+ pretrained weights for the model. See
360
+ :class:`~torchvision.models.quantization.ResNet50_QuantizedWeights` below for
361
+ more details, and possible values. By default, no pre-trained
362
+ weights are used.
363
+ progress (bool, optional): If True, displays a progress bar of the
364
+ download to stderr. Default is True.
365
+ quantize (bool, optional): If True, return a quantized version of the model. Default is False.
366
+ **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
367
+ base class. Please refer to the `source code
368
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
369
+ for more details about this class.
370
+
371
+ .. autoclass:: torchvision.models.quantization.ResNet50_QuantizedWeights
372
+ :members:
373
+
374
+ .. autoclass:: torchvision.models.ResNet50_Weights
375
+ :members:
376
+ :noindex:
377
+ """
378
+ weights = (ResNet50_QuantizedWeights if quantize else ResNet50_Weights).verify(weights)
379
+
380
+ return _resnet(QuantizableBottleneck, [3, 4, 6, 3], weights, progress, quantize, **kwargs)
381
+
382
+
383
+ @register_model(name="quantized_resnext101_32x8d")
384
+ @handle_legacy_interface(
385
+ weights=(
386
+ "pretrained",
387
+ lambda kwargs: ResNeXt101_32X8D_QuantizedWeights.IMAGENET1K_FBGEMM_V1
388
+ if kwargs.get("quantize", False)
389
+ else ResNeXt101_32X8D_Weights.IMAGENET1K_V1,
390
+ )
391
+ )
392
+ def resnext101_32x8d(
393
+ *,
394
+ weights: Optional[Union[ResNeXt101_32X8D_QuantizedWeights, ResNeXt101_32X8D_Weights]] = None,
395
+ progress: bool = True,
396
+ quantize: bool = False,
397
+ **kwargs: Any,
398
+ ) -> QuantizableResNet:
399
+ """ResNeXt-101 32x8d model from
400
+ `Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_
401
+
402
+ .. note::
403
+ Note that ``quantize = True`` returns a quantized model with 8 bit
404
+ weights. Quantized models only support inference and run on CPUs.
405
+ GPU inference is not yet supported.
406
+
407
+ Args:
408
+ weights (:class:`~torchvision.models.quantization.ResNeXt101_32X8D_QuantizedWeights` or :class:`~torchvision.models.ResNeXt101_32X8D_Weights`, optional): The
409
+ pretrained weights for the model. See
410
+ :class:`~torchvision.models.quantization.ResNet101_32X8D_QuantizedWeights` below for
411
+ more details, and possible values. By default, no pre-trained
412
+ weights are used.
413
+ progress (bool, optional): If True, displays a progress bar of the
414
+ download to stderr. Default is True.
415
+ quantize (bool, optional): If True, return a quantized version of the model. Default is False.
416
+ **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
417
+ base class. Please refer to the `source code
418
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
419
+ for more details about this class.
420
+
421
+ .. autoclass:: torchvision.models.quantization.ResNeXt101_32X8D_QuantizedWeights
422
+ :members:
423
+
424
+ .. autoclass:: torchvision.models.ResNeXt101_32X8D_Weights
425
+ :members:
426
+ :noindex:
427
+ """
428
+ weights = (ResNeXt101_32X8D_QuantizedWeights if quantize else ResNeXt101_32X8D_Weights).verify(weights)
429
+
430
+ _ovewrite_named_param(kwargs, "groups", 32)
431
+ _ovewrite_named_param(kwargs, "width_per_group", 8)
432
+ return _resnet(QuantizableBottleneck, [3, 4, 23, 3], weights, progress, quantize, **kwargs)
433
+
434
+
435
+ @register_model(name="quantized_resnext101_64x4d")
436
+ @handle_legacy_interface(
437
+ weights=(
438
+ "pretrained",
439
+ lambda kwargs: ResNeXt101_64X4D_QuantizedWeights.IMAGENET1K_FBGEMM_V1
440
+ if kwargs.get("quantize", False)
441
+ else ResNeXt101_64X4D_Weights.IMAGENET1K_V1,
442
+ )
443
+ )
444
+ def resnext101_64x4d(
445
+ *,
446
+ weights: Optional[Union[ResNeXt101_64X4D_QuantizedWeights, ResNeXt101_64X4D_Weights]] = None,
447
+ progress: bool = True,
448
+ quantize: bool = False,
449
+ **kwargs: Any,
450
+ ) -> QuantizableResNet:
451
+ """ResNeXt-101 64x4d model from
452
+ `Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_
453
+
454
+ .. note::
455
+ Note that ``quantize = True`` returns a quantized model with 8 bit
456
+ weights. Quantized models only support inference and run on CPUs.
457
+ GPU inference is not yet supported.
458
+
459
+ Args:
460
+ weights (:class:`~torchvision.models.quantization.ResNeXt101_64X4D_QuantizedWeights` or :class:`~torchvision.models.ResNeXt101_64X4D_Weights`, optional): The
461
+ pretrained weights for the model. See
462
+ :class:`~torchvision.models.quantization.ResNet101_64X4D_QuantizedWeights` below for
463
+ more details, and possible values. By default, no pre-trained
464
+ weights are used.
465
+ progress (bool, optional): If True, displays a progress bar of the
466
+ download to stderr. Default is True.
467
+ quantize (bool, optional): If True, return a quantized version of the model. Default is False.
468
+ **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
469
+ base class. Please refer to the `source code
470
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
471
+ for more details about this class.
472
+
473
+ .. autoclass:: torchvision.models.quantization.ResNeXt101_64X4D_QuantizedWeights
474
+ :members:
475
+
476
+ .. autoclass:: torchvision.models.ResNeXt101_64X4D_Weights
477
+ :members:
478
+ :noindex:
479
+ """
480
+ weights = (ResNeXt101_64X4D_QuantizedWeights if quantize else ResNeXt101_64X4D_Weights).verify(weights)
481
+
482
+ _ovewrite_named_param(kwargs, "groups", 64)
483
+ _ovewrite_named_param(kwargs, "width_per_group", 4)
484
+ return _resnet(QuantizableBottleneck, [3, 4, 23, 3], weights, progress, quantize, **kwargs)
pllava/lib/python3.10/site-packages/torchvision/models/quantization/shufflenetv2.py ADDED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from typing import Any, List, Optional, Union
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from torch import Tensor
7
+ from torchvision.models import shufflenetv2
8
+
9
+ from ...transforms._presets import ImageClassification
10
+ from .._api import register_model, Weights, WeightsEnum
11
+ from .._meta import _IMAGENET_CATEGORIES
12
+ from .._utils import _ovewrite_named_param, handle_legacy_interface
13
+ from ..shufflenetv2 import (
14
+ ShuffleNet_V2_X0_5_Weights,
15
+ ShuffleNet_V2_X1_0_Weights,
16
+ ShuffleNet_V2_X1_5_Weights,
17
+ ShuffleNet_V2_X2_0_Weights,
18
+ )
19
+ from .utils import _fuse_modules, _replace_relu, quantize_model
20
+
21
+
22
+ __all__ = [
23
+ "QuantizableShuffleNetV2",
24
+ "ShuffleNet_V2_X0_5_QuantizedWeights",
25
+ "ShuffleNet_V2_X1_0_QuantizedWeights",
26
+ "ShuffleNet_V2_X1_5_QuantizedWeights",
27
+ "ShuffleNet_V2_X2_0_QuantizedWeights",
28
+ "shufflenet_v2_x0_5",
29
+ "shufflenet_v2_x1_0",
30
+ "shufflenet_v2_x1_5",
31
+ "shufflenet_v2_x2_0",
32
+ ]
33
+
34
+
35
+ class QuantizableInvertedResidual(shufflenetv2.InvertedResidual):
36
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
37
+ super().__init__(*args, **kwargs)
38
+ self.cat = nn.quantized.FloatFunctional()
39
+
40
+ def forward(self, x: Tensor) -> Tensor:
41
+ if self.stride == 1:
42
+ x1, x2 = x.chunk(2, dim=1)
43
+ out = self.cat.cat([x1, self.branch2(x2)], dim=1)
44
+ else:
45
+ out = self.cat.cat([self.branch1(x), self.branch2(x)], dim=1)
46
+
47
+ out = shufflenetv2.channel_shuffle(out, 2)
48
+
49
+ return out
50
+
51
+
52
+ class QuantizableShuffleNetV2(shufflenetv2.ShuffleNetV2):
53
+ # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
54
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
55
+ super().__init__(*args, inverted_residual=QuantizableInvertedResidual, **kwargs) # type: ignore[misc]
56
+ self.quant = torch.ao.quantization.QuantStub()
57
+ self.dequant = torch.ao.quantization.DeQuantStub()
58
+
59
+ def forward(self, x: Tensor) -> Tensor:
60
+ x = self.quant(x)
61
+ x = self._forward_impl(x)
62
+ x = self.dequant(x)
63
+ return x
64
+
65
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
66
+ r"""Fuse conv/bn/relu modules in shufflenetv2 model
67
+
68
+ Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.
69
+ Model is modified in place.
70
+
71
+ .. note::
72
+ Note that this operation does not change numerics
73
+ and the model after modification is in floating point
74
+ """
75
+ for name, m in self._modules.items():
76
+ if name in ["conv1", "conv5"] and m is not None:
77
+ _fuse_modules(m, [["0", "1", "2"]], is_qat, inplace=True)
78
+ for m in self.modules():
79
+ if type(m) is QuantizableInvertedResidual:
80
+ if len(m.branch1._modules.items()) > 0:
81
+ _fuse_modules(m.branch1, [["0", "1"], ["2", "3", "4"]], is_qat, inplace=True)
82
+ _fuse_modules(
83
+ m.branch2,
84
+ [["0", "1", "2"], ["3", "4"], ["5", "6", "7"]],
85
+ is_qat,
86
+ inplace=True,
87
+ )
88
+
89
+
90
+ def _shufflenetv2(
91
+ stages_repeats: List[int],
92
+ stages_out_channels: List[int],
93
+ *,
94
+ weights: Optional[WeightsEnum],
95
+ progress: bool,
96
+ quantize: bool,
97
+ **kwargs: Any,
98
+ ) -> QuantizableShuffleNetV2:
99
+ if weights is not None:
100
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
101
+ if "backend" in weights.meta:
102
+ _ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
103
+ backend = kwargs.pop("backend", "fbgemm")
104
+
105
+ model = QuantizableShuffleNetV2(stages_repeats, stages_out_channels, **kwargs)
106
+ _replace_relu(model)
107
+ if quantize:
108
+ quantize_model(model, backend)
109
+
110
+ if weights is not None:
111
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
112
+
113
+ return model
114
+
115
+
116
+ _COMMON_META = {
117
+ "min_size": (1, 1),
118
+ "categories": _IMAGENET_CATEGORIES,
119
+ "backend": "fbgemm",
120
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models",
121
+ "_docs": """
122
+ These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
123
+ weights listed below.
124
+ """,
125
+ }
126
+
127
+
128
+ class ShuffleNet_V2_X0_5_QuantizedWeights(WeightsEnum):
129
+ IMAGENET1K_FBGEMM_V1 = Weights(
130
+ url="https://download.pytorch.org/models/quantized/shufflenetv2_x0.5_fbgemm-00845098.pth",
131
+ transforms=partial(ImageClassification, crop_size=224),
132
+ meta={
133
+ **_COMMON_META,
134
+ "num_params": 1366792,
135
+ "unquantized": ShuffleNet_V2_X0_5_Weights.IMAGENET1K_V1,
136
+ "_metrics": {
137
+ "ImageNet-1K": {
138
+ "acc@1": 57.972,
139
+ "acc@5": 79.780,
140
+ }
141
+ },
142
+ "_ops": 0.04,
143
+ "_file_size": 1.501,
144
+ },
145
+ )
146
+ DEFAULT = IMAGENET1K_FBGEMM_V1
147
+
148
+
149
+ class ShuffleNet_V2_X1_0_QuantizedWeights(WeightsEnum):
150
+ IMAGENET1K_FBGEMM_V1 = Weights(
151
+ url="https://download.pytorch.org/models/quantized/shufflenetv2_x1_fbgemm-1e62bb32.pth",
152
+ transforms=partial(ImageClassification, crop_size=224),
153
+ meta={
154
+ **_COMMON_META,
155
+ "num_params": 2278604,
156
+ "unquantized": ShuffleNet_V2_X1_0_Weights.IMAGENET1K_V1,
157
+ "_metrics": {
158
+ "ImageNet-1K": {
159
+ "acc@1": 68.360,
160
+ "acc@5": 87.582,
161
+ }
162
+ },
163
+ "_ops": 0.145,
164
+ "_file_size": 2.334,
165
+ },
166
+ )
167
+ DEFAULT = IMAGENET1K_FBGEMM_V1
168
+
169
+
170
+ class ShuffleNet_V2_X1_5_QuantizedWeights(WeightsEnum):
171
+ IMAGENET1K_FBGEMM_V1 = Weights(
172
+ url="https://download.pytorch.org/models/quantized/shufflenetv2_x1_5_fbgemm-d7401f05.pth",
173
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
174
+ meta={
175
+ **_COMMON_META,
176
+ "recipe": "https://github.com/pytorch/vision/pull/5906",
177
+ "num_params": 3503624,
178
+ "unquantized": ShuffleNet_V2_X1_5_Weights.IMAGENET1K_V1,
179
+ "_metrics": {
180
+ "ImageNet-1K": {
181
+ "acc@1": 72.052,
182
+ "acc@5": 90.700,
183
+ }
184
+ },
185
+ "_ops": 0.296,
186
+ "_file_size": 3.672,
187
+ },
188
+ )
189
+ DEFAULT = IMAGENET1K_FBGEMM_V1
190
+
191
+
192
+ class ShuffleNet_V2_X2_0_QuantizedWeights(WeightsEnum):
193
+ IMAGENET1K_FBGEMM_V1 = Weights(
194
+ url="https://download.pytorch.org/models/quantized/shufflenetv2_x2_0_fbgemm-5cac526c.pth",
195
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
196
+ meta={
197
+ **_COMMON_META,
198
+ "recipe": "https://github.com/pytorch/vision/pull/5906",
199
+ "num_params": 7393996,
200
+ "unquantized": ShuffleNet_V2_X2_0_Weights.IMAGENET1K_V1,
201
+ "_metrics": {
202
+ "ImageNet-1K": {
203
+ "acc@1": 75.354,
204
+ "acc@5": 92.488,
205
+ }
206
+ },
207
+ "_ops": 0.583,
208
+ "_file_size": 7.467,
209
+ },
210
+ )
211
+ DEFAULT = IMAGENET1K_FBGEMM_V1
212
+
213
+
214
+ @register_model(name="quantized_shufflenet_v2_x0_5")
215
+ @handle_legacy_interface(
216
+ weights=(
217
+ "pretrained",
218
+ lambda kwargs: ShuffleNet_V2_X0_5_QuantizedWeights.IMAGENET1K_FBGEMM_V1
219
+ if kwargs.get("quantize", False)
220
+ else ShuffleNet_V2_X0_5_Weights.IMAGENET1K_V1,
221
+ )
222
+ )
223
+ def shufflenet_v2_x0_5(
224
+ *,
225
+ weights: Optional[Union[ShuffleNet_V2_X0_5_QuantizedWeights, ShuffleNet_V2_X0_5_Weights]] = None,
226
+ progress: bool = True,
227
+ quantize: bool = False,
228
+ **kwargs: Any,
229
+ ) -> QuantizableShuffleNetV2:
230
+ """
231
+ Constructs a ShuffleNetV2 with 0.5x output channels, as described in
232
+ `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
233
+ <https://arxiv.org/abs/1807.11164>`__.
234
+
235
+ .. note::
236
+ Note that ``quantize = True`` returns a quantized model with 8 bit
237
+ weights. Quantized models only support inference and run on CPUs.
238
+ GPU inference is not yet supported.
239
+
240
+ Args:
241
+ weights (:class:`~torchvision.models.quantization.ShuffleNet_V2_X0_5_QuantizedWeights` or :class:`~torchvision.models.ShuffleNet_V2_X0_5_Weights`, optional): The
242
+ pretrained weights for the model. See
243
+ :class:`~torchvision.models.quantization.ShuffleNet_V2_X0_5_QuantizedWeights` below for
244
+ more details, and possible values. By default, no pre-trained
245
+ weights are used.
246
+ progress (bool, optional): If True, displays a progress bar of the download to stderr.
247
+ Default is True.
248
+ quantize (bool, optional): If True, return a quantized version of the model.
249
+ Default is False.
250
+ **kwargs: parameters passed to the ``torchvision.models.quantization.ShuffleNet_V2_X0_5_QuantizedWeights``
251
+ base class. Please refer to the `source code
252
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/shufflenetv2.py>`_
253
+ for more details about this class.
254
+
255
+ .. autoclass:: torchvision.models.quantization.ShuffleNet_V2_X0_5_QuantizedWeights
256
+ :members:
257
+
258
+ .. autoclass:: torchvision.models.ShuffleNet_V2_X0_5_Weights
259
+ :members:
260
+ :noindex:
261
+ """
262
+ weights = (ShuffleNet_V2_X0_5_QuantizedWeights if quantize else ShuffleNet_V2_X0_5_Weights).verify(weights)
263
+ return _shufflenetv2(
264
+ [4, 8, 4], [24, 48, 96, 192, 1024], weights=weights, progress=progress, quantize=quantize, **kwargs
265
+ )
266
+
267
+
268
+ @register_model(name="quantized_shufflenet_v2_x1_0")
269
+ @handle_legacy_interface(
270
+ weights=(
271
+ "pretrained",
272
+ lambda kwargs: ShuffleNet_V2_X1_0_QuantizedWeights.IMAGENET1K_FBGEMM_V1
273
+ if kwargs.get("quantize", False)
274
+ else ShuffleNet_V2_X1_0_Weights.IMAGENET1K_V1,
275
+ )
276
+ )
277
+ def shufflenet_v2_x1_0(
278
+ *,
279
+ weights: Optional[Union[ShuffleNet_V2_X1_0_QuantizedWeights, ShuffleNet_V2_X1_0_Weights]] = None,
280
+ progress: bool = True,
281
+ quantize: bool = False,
282
+ **kwargs: Any,
283
+ ) -> QuantizableShuffleNetV2:
284
+ """
285
+ Constructs a ShuffleNetV2 with 1.0x output channels, as described in
286
+ `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
287
+ <https://arxiv.org/abs/1807.11164>`__.
288
+
289
+ .. note::
290
+ Note that ``quantize = True`` returns a quantized model with 8 bit
291
+ weights. Quantized models only support inference and run on CPUs.
292
+ GPU inference is not yet supported.
293
+
294
+ Args:
295
+ weights (:class:`~torchvision.models.quantization.ShuffleNet_V2_X1_0_QuantizedWeights` or :class:`~torchvision.models.ShuffleNet_V2_X1_0_Weights`, optional): The
296
+ pretrained weights for the model. See
297
+ :class:`~torchvision.models.quantization.ShuffleNet_V2_X1_0_QuantizedWeights` below for
298
+ more details, and possible values. By default, no pre-trained
299
+ weights are used.
300
+ progress (bool, optional): If True, displays a progress bar of the download to stderr.
301
+ Default is True.
302
+ quantize (bool, optional): If True, return a quantized version of the model.
303
+ Default is False.
304
+ **kwargs: parameters passed to the ``torchvision.models.quantization.ShuffleNet_V2_X1_0_QuantizedWeights``
305
+ base class. Please refer to the `source code
306
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/shufflenetv2.py>`_
307
+ for more details about this class.
308
+
309
+ .. autoclass:: torchvision.models.quantization.ShuffleNet_V2_X1_0_QuantizedWeights
310
+ :members:
311
+
312
+ .. autoclass:: torchvision.models.ShuffleNet_V2_X1_0_Weights
313
+ :members:
314
+ :noindex:
315
+ """
316
+ weights = (ShuffleNet_V2_X1_0_QuantizedWeights if quantize else ShuffleNet_V2_X1_0_Weights).verify(weights)
317
+ return _shufflenetv2(
318
+ [4, 8, 4], [24, 116, 232, 464, 1024], weights=weights, progress=progress, quantize=quantize, **kwargs
319
+ )
320
+
321
+
322
+ @register_model(name="quantized_shufflenet_v2_x1_5")
323
+ @handle_legacy_interface(
324
+ weights=(
325
+ "pretrained",
326
+ lambda kwargs: ShuffleNet_V2_X1_5_QuantizedWeights.IMAGENET1K_FBGEMM_V1
327
+ if kwargs.get("quantize", False)
328
+ else ShuffleNet_V2_X1_5_Weights.IMAGENET1K_V1,
329
+ )
330
+ )
331
+ def shufflenet_v2_x1_5(
332
+ *,
333
+ weights: Optional[Union[ShuffleNet_V2_X1_5_QuantizedWeights, ShuffleNet_V2_X1_5_Weights]] = None,
334
+ progress: bool = True,
335
+ quantize: bool = False,
336
+ **kwargs: Any,
337
+ ) -> QuantizableShuffleNetV2:
338
+ """
339
+ Constructs a ShuffleNetV2 with 1.5x output channels, as described in
340
+ `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
341
+ <https://arxiv.org/abs/1807.11164>`__.
342
+
343
+ .. note::
344
+ Note that ``quantize = True`` returns a quantized model with 8 bit
345
+ weights. Quantized models only support inference and run on CPUs.
346
+ GPU inference is not yet supported.
347
+
348
+ Args:
349
+ weights (:class:`~torchvision.models.quantization.ShuffleNet_V2_X1_5_QuantizedWeights` or :class:`~torchvision.models.ShuffleNet_V2_X1_5_Weights`, optional): The
350
+ pretrained weights for the model. See
351
+ :class:`~torchvision.models.quantization.ShuffleNet_V2_X1_5_QuantizedWeights` below for
352
+ more details, and possible values. By default, no pre-trained
353
+ weights are used.
354
+ progress (bool, optional): If True, displays a progress bar of the download to stderr.
355
+ Default is True.
356
+ quantize (bool, optional): If True, return a quantized version of the model.
357
+ Default is False.
358
+ **kwargs: parameters passed to the ``torchvision.models.quantization.ShuffleNet_V2_X1_5_QuantizedWeights``
359
+ base class. Please refer to the `source code
360
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/shufflenetv2.py>`_
361
+ for more details about this class.
362
+
363
+ .. autoclass:: torchvision.models.quantization.ShuffleNet_V2_X1_5_QuantizedWeights
364
+ :members:
365
+
366
+ .. autoclass:: torchvision.models.ShuffleNet_V2_X1_5_Weights
367
+ :members:
368
+ :noindex:
369
+ """
370
+ weights = (ShuffleNet_V2_X1_5_QuantizedWeights if quantize else ShuffleNet_V2_X1_5_Weights).verify(weights)
371
+ return _shufflenetv2(
372
+ [4, 8, 4], [24, 176, 352, 704, 1024], weights=weights, progress=progress, quantize=quantize, **kwargs
373
+ )
374
+
375
+
376
+ @register_model(name="quantized_shufflenet_v2_x2_0")
377
+ @handle_legacy_interface(
378
+ weights=(
379
+ "pretrained",
380
+ lambda kwargs: ShuffleNet_V2_X2_0_QuantizedWeights.IMAGENET1K_FBGEMM_V1
381
+ if kwargs.get("quantize", False)
382
+ else ShuffleNet_V2_X2_0_Weights.IMAGENET1K_V1,
383
+ )
384
+ )
385
+ def shufflenet_v2_x2_0(
386
+ *,
387
+ weights: Optional[Union[ShuffleNet_V2_X2_0_QuantizedWeights, ShuffleNet_V2_X2_0_Weights]] = None,
388
+ progress: bool = True,
389
+ quantize: bool = False,
390
+ **kwargs: Any,
391
+ ) -> QuantizableShuffleNetV2:
392
+ """
393
+ Constructs a ShuffleNetV2 with 2.0x output channels, as described in
394
+ `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
395
+ <https://arxiv.org/abs/1807.11164>`__.
396
+
397
+ .. note::
398
+ Note that ``quantize = True`` returns a quantized model with 8 bit
399
+ weights. Quantized models only support inference and run on CPUs.
400
+ GPU inference is not yet supported.
401
+
402
+ Args:
403
+ weights (:class:`~torchvision.models.quantization.ShuffleNet_V2_X2_0_QuantizedWeights` or :class:`~torchvision.models.ShuffleNet_V2_X2_0_Weights`, optional): The
404
+ pretrained weights for the model. See
405
+ :class:`~torchvision.models.quantization.ShuffleNet_V2_X2_0_QuantizedWeights` below for
406
+ more details, and possible values. By default, no pre-trained
407
+ weights are used.
408
+ progress (bool, optional): If True, displays a progress bar of the download to stderr.
409
+ Default is True.
410
+ quantize (bool, optional): If True, return a quantized version of the model.
411
+ Default is False.
412
+ **kwargs: parameters passed to the ``torchvision.models.quantization.ShuffleNet_V2_X2_0_QuantizedWeights``
413
+ base class. Please refer to the `source code
414
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/shufflenetv2.py>`_
415
+ for more details about this class.
416
+
417
+ .. autoclass:: torchvision.models.quantization.ShuffleNet_V2_X2_0_QuantizedWeights
418
+ :members:
419
+
420
+ .. autoclass:: torchvision.models.ShuffleNet_V2_X2_0_Weights
421
+ :members:
422
+ :noindex:
423
+ """
424
+ weights = (ShuffleNet_V2_X2_0_QuantizedWeights if quantize else ShuffleNet_V2_X2_0_Weights).verify(weights)
425
+ return _shufflenetv2(
426
+ [4, 8, 4], [24, 244, 488, 976, 2048], weights=weights, progress=progress, quantize=quantize, **kwargs
427
+ )
pllava/lib/python3.10/site-packages/torchvision/models/quantization/utils.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Optional, Union
2
+
3
+ import torch
4
+ from torch import nn
5
+
6
+
7
+ def _replace_relu(module: nn.Module) -> None:
8
+ reassign = {}
9
+ for name, mod in module.named_children():
10
+ _replace_relu(mod)
11
+ # Checking for explicit type instead of instance
12
+ # as we only want to replace modules of the exact type
13
+ # not inherited classes
14
+ if type(mod) is nn.ReLU or type(mod) is nn.ReLU6:
15
+ reassign[name] = nn.ReLU(inplace=False)
16
+
17
+ for key, value in reassign.items():
18
+ module._modules[key] = value
19
+
20
+
21
+ def quantize_model(model: nn.Module, backend: str) -> None:
22
+ _dummy_input_data = torch.rand(1, 3, 299, 299)
23
+ if backend not in torch.backends.quantized.supported_engines:
24
+ raise RuntimeError("Quantized backend not supported ")
25
+ torch.backends.quantized.engine = backend
26
+ model.eval()
27
+ # Make sure that weight qconfig matches that of the serialized models
28
+ if backend == "fbgemm":
29
+ model.qconfig = torch.ao.quantization.QConfig( # type: ignore[assignment]
30
+ activation=torch.ao.quantization.default_observer,
31
+ weight=torch.ao.quantization.default_per_channel_weight_observer,
32
+ )
33
+ elif backend == "qnnpack":
34
+ model.qconfig = torch.ao.quantization.QConfig( # type: ignore[assignment]
35
+ activation=torch.ao.quantization.default_observer, weight=torch.ao.quantization.default_weight_observer
36
+ )
37
+
38
+ # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
39
+ model.fuse_model() # type: ignore[operator]
40
+ torch.ao.quantization.prepare(model, inplace=True)
41
+ model(_dummy_input_data)
42
+ torch.ao.quantization.convert(model, inplace=True)
43
+
44
+
45
+ def _fuse_modules(
46
+ model: nn.Module, modules_to_fuse: Union[List[str], List[List[str]]], is_qat: Optional[bool], **kwargs: Any
47
+ ):
48
+ if is_qat is None:
49
+ is_qat = model.training
50
+ method = torch.ao.quantization.fuse_modules_qat if is_qat else torch.ao.quantization.fuse_modules
51
+ return method(model, modules_to_fuse, **kwargs)
pllava/lib/python3.10/site-packages/torchvision/models/regnet.py ADDED
@@ -0,0 +1,1571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from collections import OrderedDict
3
+ from functools import partial
4
+ from typing import Any, Callable, Dict, List, Optional, Tuple
5
+
6
+ import torch
7
+ from torch import nn, Tensor
8
+
9
+ from ..ops.misc import Conv2dNormActivation, SqueezeExcitation
10
+ from ..transforms._presets import ImageClassification, InterpolationMode
11
+ from ..utils import _log_api_usage_once
12
+ from ._api import register_model, Weights, WeightsEnum
13
+ from ._meta import _IMAGENET_CATEGORIES
14
+ from ._utils import _make_divisible, _ovewrite_named_param, handle_legacy_interface
15
+
16
+
17
+ __all__ = [
18
+ "RegNet",
19
+ "RegNet_Y_400MF_Weights",
20
+ "RegNet_Y_800MF_Weights",
21
+ "RegNet_Y_1_6GF_Weights",
22
+ "RegNet_Y_3_2GF_Weights",
23
+ "RegNet_Y_8GF_Weights",
24
+ "RegNet_Y_16GF_Weights",
25
+ "RegNet_Y_32GF_Weights",
26
+ "RegNet_Y_128GF_Weights",
27
+ "RegNet_X_400MF_Weights",
28
+ "RegNet_X_800MF_Weights",
29
+ "RegNet_X_1_6GF_Weights",
30
+ "RegNet_X_3_2GF_Weights",
31
+ "RegNet_X_8GF_Weights",
32
+ "RegNet_X_16GF_Weights",
33
+ "RegNet_X_32GF_Weights",
34
+ "regnet_y_400mf",
35
+ "regnet_y_800mf",
36
+ "regnet_y_1_6gf",
37
+ "regnet_y_3_2gf",
38
+ "regnet_y_8gf",
39
+ "regnet_y_16gf",
40
+ "regnet_y_32gf",
41
+ "regnet_y_128gf",
42
+ "regnet_x_400mf",
43
+ "regnet_x_800mf",
44
+ "regnet_x_1_6gf",
45
+ "regnet_x_3_2gf",
46
+ "regnet_x_8gf",
47
+ "regnet_x_16gf",
48
+ "regnet_x_32gf",
49
+ ]
50
+
51
+
52
+ class SimpleStemIN(Conv2dNormActivation):
53
+ """Simple stem for ImageNet: 3x3, BN, ReLU."""
54
+
55
+ def __init__(
56
+ self,
57
+ width_in: int,
58
+ width_out: int,
59
+ norm_layer: Callable[..., nn.Module],
60
+ activation_layer: Callable[..., nn.Module],
61
+ ) -> None:
62
+ super().__init__(
63
+ width_in, width_out, kernel_size=3, stride=2, norm_layer=norm_layer, activation_layer=activation_layer
64
+ )
65
+
66
+
67
+ class BottleneckTransform(nn.Sequential):
68
+ """Bottleneck transformation: 1x1, 3x3 [+SE], 1x1."""
69
+
70
+ def __init__(
71
+ self,
72
+ width_in: int,
73
+ width_out: int,
74
+ stride: int,
75
+ norm_layer: Callable[..., nn.Module],
76
+ activation_layer: Callable[..., nn.Module],
77
+ group_width: int,
78
+ bottleneck_multiplier: float,
79
+ se_ratio: Optional[float],
80
+ ) -> None:
81
+ layers: OrderedDict[str, nn.Module] = OrderedDict()
82
+ w_b = int(round(width_out * bottleneck_multiplier))
83
+ g = w_b // group_width
84
+
85
+ layers["a"] = Conv2dNormActivation(
86
+ width_in, w_b, kernel_size=1, stride=1, norm_layer=norm_layer, activation_layer=activation_layer
87
+ )
88
+ layers["b"] = Conv2dNormActivation(
89
+ w_b, w_b, kernel_size=3, stride=stride, groups=g, norm_layer=norm_layer, activation_layer=activation_layer
90
+ )
91
+
92
+ if se_ratio:
93
+ # The SE reduction ratio is defined with respect to the
94
+ # beginning of the block
95
+ width_se_out = int(round(se_ratio * width_in))
96
+ layers["se"] = SqueezeExcitation(
97
+ input_channels=w_b,
98
+ squeeze_channels=width_se_out,
99
+ activation=activation_layer,
100
+ )
101
+
102
+ layers["c"] = Conv2dNormActivation(
103
+ w_b, width_out, kernel_size=1, stride=1, norm_layer=norm_layer, activation_layer=None
104
+ )
105
+ super().__init__(layers)
106
+
107
+
108
+ class ResBottleneckBlock(nn.Module):
109
+ """Residual bottleneck block: x + F(x), F = bottleneck transform."""
110
+
111
+ def __init__(
112
+ self,
113
+ width_in: int,
114
+ width_out: int,
115
+ stride: int,
116
+ norm_layer: Callable[..., nn.Module],
117
+ activation_layer: Callable[..., nn.Module],
118
+ group_width: int = 1,
119
+ bottleneck_multiplier: float = 1.0,
120
+ se_ratio: Optional[float] = None,
121
+ ) -> None:
122
+ super().__init__()
123
+
124
+ # Use skip connection with projection if shape changes
125
+ self.proj = None
126
+ should_proj = (width_in != width_out) or (stride != 1)
127
+ if should_proj:
128
+ self.proj = Conv2dNormActivation(
129
+ width_in, width_out, kernel_size=1, stride=stride, norm_layer=norm_layer, activation_layer=None
130
+ )
131
+ self.f = BottleneckTransform(
132
+ width_in,
133
+ width_out,
134
+ stride,
135
+ norm_layer,
136
+ activation_layer,
137
+ group_width,
138
+ bottleneck_multiplier,
139
+ se_ratio,
140
+ )
141
+ self.activation = activation_layer(inplace=True)
142
+
143
+ def forward(self, x: Tensor) -> Tensor:
144
+ if self.proj is not None:
145
+ x = self.proj(x) + self.f(x)
146
+ else:
147
+ x = x + self.f(x)
148
+ return self.activation(x)
149
+
150
+
151
+ class AnyStage(nn.Sequential):
152
+ """AnyNet stage (sequence of blocks w/ the same output shape)."""
153
+
154
+ def __init__(
155
+ self,
156
+ width_in: int,
157
+ width_out: int,
158
+ stride: int,
159
+ depth: int,
160
+ block_constructor: Callable[..., nn.Module],
161
+ norm_layer: Callable[..., nn.Module],
162
+ activation_layer: Callable[..., nn.Module],
163
+ group_width: int,
164
+ bottleneck_multiplier: float,
165
+ se_ratio: Optional[float] = None,
166
+ stage_index: int = 0,
167
+ ) -> None:
168
+ super().__init__()
169
+
170
+ for i in range(depth):
171
+ block = block_constructor(
172
+ width_in if i == 0 else width_out,
173
+ width_out,
174
+ stride if i == 0 else 1,
175
+ norm_layer,
176
+ activation_layer,
177
+ group_width,
178
+ bottleneck_multiplier,
179
+ se_ratio,
180
+ )
181
+
182
+ self.add_module(f"block{stage_index}-{i}", block)
183
+
184
+
185
+ class BlockParams:
186
+ def __init__(
187
+ self,
188
+ depths: List[int],
189
+ widths: List[int],
190
+ group_widths: List[int],
191
+ bottleneck_multipliers: List[float],
192
+ strides: List[int],
193
+ se_ratio: Optional[float] = None,
194
+ ) -> None:
195
+ self.depths = depths
196
+ self.widths = widths
197
+ self.group_widths = group_widths
198
+ self.bottleneck_multipliers = bottleneck_multipliers
199
+ self.strides = strides
200
+ self.se_ratio = se_ratio
201
+
202
+ @classmethod
203
+ def from_init_params(
204
+ cls,
205
+ depth: int,
206
+ w_0: int,
207
+ w_a: float,
208
+ w_m: float,
209
+ group_width: int,
210
+ bottleneck_multiplier: float = 1.0,
211
+ se_ratio: Optional[float] = None,
212
+ **kwargs: Any,
213
+ ) -> "BlockParams":
214
+ """
215
+ Programmatically compute all the per-block settings,
216
+ given the RegNet parameters.
217
+
218
+ The first step is to compute the quantized linear block parameters,
219
+ in log space. Key parameters are:
220
+ - `w_a` is the width progression slope
221
+ - `w_0` is the initial width
222
+ - `w_m` is the width stepping in the log space
223
+
224
+ In other terms
225
+ `log(block_width) = log(w_0) + w_m * block_capacity`,
226
+ with `bock_capacity` ramping up following the w_0 and w_a params.
227
+ This block width is finally quantized to multiples of 8.
228
+
229
+ The second step is to compute the parameters per stage,
230
+ taking into account the skip connection and the final 1x1 convolutions.
231
+ We use the fact that the output width is constant within a stage.
232
+ """
233
+
234
+ QUANT = 8
235
+ STRIDE = 2
236
+
237
+ if w_a < 0 or w_0 <= 0 or w_m <= 1 or w_0 % 8 != 0:
238
+ raise ValueError("Invalid RegNet settings")
239
+ # Compute the block widths. Each stage has one unique block width
240
+ widths_cont = torch.arange(depth) * w_a + w_0
241
+ block_capacity = torch.round(torch.log(widths_cont / w_0) / math.log(w_m))
242
+ block_widths = (torch.round(torch.divide(w_0 * torch.pow(w_m, block_capacity), QUANT)) * QUANT).int().tolist()
243
+ num_stages = len(set(block_widths))
244
+
245
+ # Convert to per stage parameters
246
+ split_helper = zip(
247
+ block_widths + [0],
248
+ [0] + block_widths,
249
+ block_widths + [0],
250
+ [0] + block_widths,
251
+ )
252
+ splits = [w != wp or r != rp for w, wp, r, rp in split_helper]
253
+
254
+ stage_widths = [w for w, t in zip(block_widths, splits[:-1]) if t]
255
+ stage_depths = torch.diff(torch.tensor([d for d, t in enumerate(splits) if t])).int().tolist()
256
+
257
+ strides = [STRIDE] * num_stages
258
+ bottleneck_multipliers = [bottleneck_multiplier] * num_stages
259
+ group_widths = [group_width] * num_stages
260
+
261
+ # Adjust the compatibility of stage widths and group widths
262
+ stage_widths, group_widths = cls._adjust_widths_groups_compatibilty(
263
+ stage_widths, bottleneck_multipliers, group_widths
264
+ )
265
+
266
+ return cls(
267
+ depths=stage_depths,
268
+ widths=stage_widths,
269
+ group_widths=group_widths,
270
+ bottleneck_multipliers=bottleneck_multipliers,
271
+ strides=strides,
272
+ se_ratio=se_ratio,
273
+ )
274
+
275
+ def _get_expanded_params(self):
276
+ return zip(self.widths, self.strides, self.depths, self.group_widths, self.bottleneck_multipliers)
277
+
278
+ @staticmethod
279
+ def _adjust_widths_groups_compatibilty(
280
+ stage_widths: List[int], bottleneck_ratios: List[float], group_widths: List[int]
281
+ ) -> Tuple[List[int], List[int]]:
282
+ """
283
+ Adjusts the compatibility of widths and groups,
284
+ depending on the bottleneck ratio.
285
+ """
286
+ # Compute all widths for the current settings
287
+ widths = [int(w * b) for w, b in zip(stage_widths, bottleneck_ratios)]
288
+ group_widths_min = [min(g, w_bot) for g, w_bot in zip(group_widths, widths)]
289
+
290
+ # Compute the adjusted widths so that stage and group widths fit
291
+ ws_bot = [_make_divisible(w_bot, g) for w_bot, g in zip(widths, group_widths_min)]
292
+ stage_widths = [int(w_bot / b) for w_bot, b in zip(ws_bot, bottleneck_ratios)]
293
+ return stage_widths, group_widths_min
294
+
295
+
296
+ class RegNet(nn.Module):
297
+ def __init__(
298
+ self,
299
+ block_params: BlockParams,
300
+ num_classes: int = 1000,
301
+ stem_width: int = 32,
302
+ stem_type: Optional[Callable[..., nn.Module]] = None,
303
+ block_type: Optional[Callable[..., nn.Module]] = None,
304
+ norm_layer: Optional[Callable[..., nn.Module]] = None,
305
+ activation: Optional[Callable[..., nn.Module]] = None,
306
+ ) -> None:
307
+ super().__init__()
308
+ _log_api_usage_once(self)
309
+
310
+ if stem_type is None:
311
+ stem_type = SimpleStemIN
312
+ if norm_layer is None:
313
+ norm_layer = nn.BatchNorm2d
314
+ if block_type is None:
315
+ block_type = ResBottleneckBlock
316
+ if activation is None:
317
+ activation = nn.ReLU
318
+
319
+ # Ad hoc stem
320
+ self.stem = stem_type(
321
+ 3, # width_in
322
+ stem_width,
323
+ norm_layer,
324
+ activation,
325
+ )
326
+
327
+ current_width = stem_width
328
+
329
+ blocks = []
330
+ for i, (
331
+ width_out,
332
+ stride,
333
+ depth,
334
+ group_width,
335
+ bottleneck_multiplier,
336
+ ) in enumerate(block_params._get_expanded_params()):
337
+ blocks.append(
338
+ (
339
+ f"block{i+1}",
340
+ AnyStage(
341
+ current_width,
342
+ width_out,
343
+ stride,
344
+ depth,
345
+ block_type,
346
+ norm_layer,
347
+ activation,
348
+ group_width,
349
+ bottleneck_multiplier,
350
+ block_params.se_ratio,
351
+ stage_index=i + 1,
352
+ ),
353
+ )
354
+ )
355
+
356
+ current_width = width_out
357
+
358
+ self.trunk_output = nn.Sequential(OrderedDict(blocks))
359
+
360
+ self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
361
+ self.fc = nn.Linear(in_features=current_width, out_features=num_classes)
362
+
363
+ # Performs ResNet-style weight initialization
364
+ for m in self.modules():
365
+ if isinstance(m, nn.Conv2d):
366
+ # Note that there is no bias due to BN
367
+ fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
368
+ nn.init.normal_(m.weight, mean=0.0, std=math.sqrt(2.0 / fan_out))
369
+ elif isinstance(m, nn.BatchNorm2d):
370
+ nn.init.ones_(m.weight)
371
+ nn.init.zeros_(m.bias)
372
+ elif isinstance(m, nn.Linear):
373
+ nn.init.normal_(m.weight, mean=0.0, std=0.01)
374
+ nn.init.zeros_(m.bias)
375
+
376
+ def forward(self, x: Tensor) -> Tensor:
377
+ x = self.stem(x)
378
+ x = self.trunk_output(x)
379
+
380
+ x = self.avgpool(x)
381
+ x = x.flatten(start_dim=1)
382
+ x = self.fc(x)
383
+
384
+ return x
385
+
386
+
387
+ def _regnet(
388
+ block_params: BlockParams,
389
+ weights: Optional[WeightsEnum],
390
+ progress: bool,
391
+ **kwargs: Any,
392
+ ) -> RegNet:
393
+ if weights is not None:
394
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
395
+
396
+ norm_layer = kwargs.pop("norm_layer", partial(nn.BatchNorm2d, eps=1e-05, momentum=0.1))
397
+ model = RegNet(block_params, norm_layer=norm_layer, **kwargs)
398
+
399
+ if weights is not None:
400
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
401
+
402
+ return model
403
+
404
+
405
+ _COMMON_META: Dict[str, Any] = {
406
+ "min_size": (1, 1),
407
+ "categories": _IMAGENET_CATEGORIES,
408
+ }
409
+
410
+ _COMMON_SWAG_META = {
411
+ **_COMMON_META,
412
+ "recipe": "https://github.com/facebookresearch/SWAG",
413
+ "license": "https://github.com/facebookresearch/SWAG/blob/main/LICENSE",
414
+ }
415
+
416
+
417
+ class RegNet_Y_400MF_Weights(WeightsEnum):
418
+ IMAGENET1K_V1 = Weights(
419
+ url="https://download.pytorch.org/models/regnet_y_400mf-c65dace8.pth",
420
+ transforms=partial(ImageClassification, crop_size=224),
421
+ meta={
422
+ **_COMMON_META,
423
+ "num_params": 4344144,
424
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
425
+ "_metrics": {
426
+ "ImageNet-1K": {
427
+ "acc@1": 74.046,
428
+ "acc@5": 91.716,
429
+ }
430
+ },
431
+ "_ops": 0.402,
432
+ "_file_size": 16.806,
433
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
434
+ },
435
+ )
436
+ IMAGENET1K_V2 = Weights(
437
+ url="https://download.pytorch.org/models/regnet_y_400mf-e6988f5f.pth",
438
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
439
+ meta={
440
+ **_COMMON_META,
441
+ "num_params": 4344144,
442
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
443
+ "_metrics": {
444
+ "ImageNet-1K": {
445
+ "acc@1": 75.804,
446
+ "acc@5": 92.742,
447
+ }
448
+ },
449
+ "_ops": 0.402,
450
+ "_file_size": 16.806,
451
+ "_docs": """
452
+ These weights improve upon the results of the original paper by using a modified version of TorchVision's
453
+ `new training recipe
454
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
455
+ """,
456
+ },
457
+ )
458
+ DEFAULT = IMAGENET1K_V2
459
+
460
+
461
+ class RegNet_Y_800MF_Weights(WeightsEnum):
462
+ IMAGENET1K_V1 = Weights(
463
+ url="https://download.pytorch.org/models/regnet_y_800mf-1b27b58c.pth",
464
+ transforms=partial(ImageClassification, crop_size=224),
465
+ meta={
466
+ **_COMMON_META,
467
+ "num_params": 6432512,
468
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
469
+ "_metrics": {
470
+ "ImageNet-1K": {
471
+ "acc@1": 76.420,
472
+ "acc@5": 93.136,
473
+ }
474
+ },
475
+ "_ops": 0.834,
476
+ "_file_size": 24.774,
477
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
478
+ },
479
+ )
480
+ IMAGENET1K_V2 = Weights(
481
+ url="https://download.pytorch.org/models/regnet_y_800mf-58fc7688.pth",
482
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
483
+ meta={
484
+ **_COMMON_META,
485
+ "num_params": 6432512,
486
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
487
+ "_metrics": {
488
+ "ImageNet-1K": {
489
+ "acc@1": 78.828,
490
+ "acc@5": 94.502,
491
+ }
492
+ },
493
+ "_ops": 0.834,
494
+ "_file_size": 24.774,
495
+ "_docs": """
496
+ These weights improve upon the results of the original paper by using a modified version of TorchVision's
497
+ `new training recipe
498
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
499
+ """,
500
+ },
501
+ )
502
+ DEFAULT = IMAGENET1K_V2
503
+
504
+
505
+ class RegNet_Y_1_6GF_Weights(WeightsEnum):
506
+ IMAGENET1K_V1 = Weights(
507
+ url="https://download.pytorch.org/models/regnet_y_1_6gf-b11a554e.pth",
508
+ transforms=partial(ImageClassification, crop_size=224),
509
+ meta={
510
+ **_COMMON_META,
511
+ "num_params": 11202430,
512
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
513
+ "_metrics": {
514
+ "ImageNet-1K": {
515
+ "acc@1": 77.950,
516
+ "acc@5": 93.966,
517
+ }
518
+ },
519
+ "_ops": 1.612,
520
+ "_file_size": 43.152,
521
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
522
+ },
523
+ )
524
+ IMAGENET1K_V2 = Weights(
525
+ url="https://download.pytorch.org/models/regnet_y_1_6gf-0d7bc02a.pth",
526
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
527
+ meta={
528
+ **_COMMON_META,
529
+ "num_params": 11202430,
530
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
531
+ "_metrics": {
532
+ "ImageNet-1K": {
533
+ "acc@1": 80.876,
534
+ "acc@5": 95.444,
535
+ }
536
+ },
537
+ "_ops": 1.612,
538
+ "_file_size": 43.152,
539
+ "_docs": """
540
+ These weights improve upon the results of the original paper by using a modified version of TorchVision's
541
+ `new training recipe
542
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
543
+ """,
544
+ },
545
+ )
546
+ DEFAULT = IMAGENET1K_V2
547
+
548
+
549
+ class RegNet_Y_3_2GF_Weights(WeightsEnum):
550
+ IMAGENET1K_V1 = Weights(
551
+ url="https://download.pytorch.org/models/regnet_y_3_2gf-b5a9779c.pth",
552
+ transforms=partial(ImageClassification, crop_size=224),
553
+ meta={
554
+ **_COMMON_META,
555
+ "num_params": 19436338,
556
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
557
+ "_metrics": {
558
+ "ImageNet-1K": {
559
+ "acc@1": 78.948,
560
+ "acc@5": 94.576,
561
+ }
562
+ },
563
+ "_ops": 3.176,
564
+ "_file_size": 74.567,
565
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
566
+ },
567
+ )
568
+ IMAGENET1K_V2 = Weights(
569
+ url="https://download.pytorch.org/models/regnet_y_3_2gf-9180c971.pth",
570
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
571
+ meta={
572
+ **_COMMON_META,
573
+ "num_params": 19436338,
574
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
575
+ "_metrics": {
576
+ "ImageNet-1K": {
577
+ "acc@1": 81.982,
578
+ "acc@5": 95.972,
579
+ }
580
+ },
581
+ "_ops": 3.176,
582
+ "_file_size": 74.567,
583
+ "_docs": """
584
+ These weights improve upon the results of the original paper by using a modified version of TorchVision's
585
+ `new training recipe
586
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
587
+ """,
588
+ },
589
+ )
590
+ DEFAULT = IMAGENET1K_V2
591
+
592
+
593
+ class RegNet_Y_8GF_Weights(WeightsEnum):
594
+ IMAGENET1K_V1 = Weights(
595
+ url="https://download.pytorch.org/models/regnet_y_8gf-d0d0e4a8.pth",
596
+ transforms=partial(ImageClassification, crop_size=224),
597
+ meta={
598
+ **_COMMON_META,
599
+ "num_params": 39381472,
600
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
601
+ "_metrics": {
602
+ "ImageNet-1K": {
603
+ "acc@1": 80.032,
604
+ "acc@5": 95.048,
605
+ }
606
+ },
607
+ "_ops": 8.473,
608
+ "_file_size": 150.701,
609
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
610
+ },
611
+ )
612
+ IMAGENET1K_V2 = Weights(
613
+ url="https://download.pytorch.org/models/regnet_y_8gf-dc2b1b54.pth",
614
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
615
+ meta={
616
+ **_COMMON_META,
617
+ "num_params": 39381472,
618
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
619
+ "_metrics": {
620
+ "ImageNet-1K": {
621
+ "acc@1": 82.828,
622
+ "acc@5": 96.330,
623
+ }
624
+ },
625
+ "_ops": 8.473,
626
+ "_file_size": 150.701,
627
+ "_docs": """
628
+ These weights improve upon the results of the original paper by using a modified version of TorchVision's
629
+ `new training recipe
630
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
631
+ """,
632
+ },
633
+ )
634
+ DEFAULT = IMAGENET1K_V2
635
+
636
+
637
+ class RegNet_Y_16GF_Weights(WeightsEnum):
638
+ IMAGENET1K_V1 = Weights(
639
+ url="https://download.pytorch.org/models/regnet_y_16gf-9e6ed7dd.pth",
640
+ transforms=partial(ImageClassification, crop_size=224),
641
+ meta={
642
+ **_COMMON_META,
643
+ "num_params": 83590140,
644
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models",
645
+ "_metrics": {
646
+ "ImageNet-1K": {
647
+ "acc@1": 80.424,
648
+ "acc@5": 95.240,
649
+ }
650
+ },
651
+ "_ops": 15.912,
652
+ "_file_size": 319.49,
653
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
654
+ },
655
+ )
656
+ IMAGENET1K_V2 = Weights(
657
+ url="https://download.pytorch.org/models/regnet_y_16gf-3e4a00f9.pth",
658
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
659
+ meta={
660
+ **_COMMON_META,
661
+ "num_params": 83590140,
662
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
663
+ "_metrics": {
664
+ "ImageNet-1K": {
665
+ "acc@1": 82.886,
666
+ "acc@5": 96.328,
667
+ }
668
+ },
669
+ "_ops": 15.912,
670
+ "_file_size": 319.49,
671
+ "_docs": """
672
+ These weights improve upon the results of the original paper by using a modified version of TorchVision's
673
+ `new training recipe
674
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
675
+ """,
676
+ },
677
+ )
678
+ IMAGENET1K_SWAG_E2E_V1 = Weights(
679
+ url="https://download.pytorch.org/models/regnet_y_16gf_swag-43afe44d.pth",
680
+ transforms=partial(
681
+ ImageClassification, crop_size=384, resize_size=384, interpolation=InterpolationMode.BICUBIC
682
+ ),
683
+ meta={
684
+ **_COMMON_SWAG_META,
685
+ "num_params": 83590140,
686
+ "_metrics": {
687
+ "ImageNet-1K": {
688
+ "acc@1": 86.012,
689
+ "acc@5": 98.054,
690
+ }
691
+ },
692
+ "_ops": 46.735,
693
+ "_file_size": 319.49,
694
+ "_docs": """
695
+ These weights are learnt via transfer learning by end-to-end fine-tuning the original
696
+ `SWAG <https://arxiv.org/abs/2201.08371>`_ weights on ImageNet-1K data.
697
+ """,
698
+ },
699
+ )
700
+ IMAGENET1K_SWAG_LINEAR_V1 = Weights(
701
+ url="https://download.pytorch.org/models/regnet_y_16gf_lc_swag-f3ec0043.pth",
702
+ transforms=partial(
703
+ ImageClassification, crop_size=224, resize_size=224, interpolation=InterpolationMode.BICUBIC
704
+ ),
705
+ meta={
706
+ **_COMMON_SWAG_META,
707
+ "recipe": "https://github.com/pytorch/vision/pull/5793",
708
+ "num_params": 83590140,
709
+ "_metrics": {
710
+ "ImageNet-1K": {
711
+ "acc@1": 83.976,
712
+ "acc@5": 97.244,
713
+ }
714
+ },
715
+ "_ops": 15.912,
716
+ "_file_size": 319.49,
717
+ "_docs": """
718
+ These weights are composed of the original frozen `SWAG <https://arxiv.org/abs/2201.08371>`_ trunk
719
+ weights and a linear classifier learnt on top of them trained on ImageNet-1K data.
720
+ """,
721
+ },
722
+ )
723
+ DEFAULT = IMAGENET1K_V2
724
+
725
+
726
+ class RegNet_Y_32GF_Weights(WeightsEnum):
727
+ IMAGENET1K_V1 = Weights(
728
+ url="https://download.pytorch.org/models/regnet_y_32gf-4dee3f7a.pth",
729
+ transforms=partial(ImageClassification, crop_size=224),
730
+ meta={
731
+ **_COMMON_META,
732
+ "num_params": 145046770,
733
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models",
734
+ "_metrics": {
735
+ "ImageNet-1K": {
736
+ "acc@1": 80.878,
737
+ "acc@5": 95.340,
738
+ }
739
+ },
740
+ "_ops": 32.28,
741
+ "_file_size": 554.076,
742
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
743
+ },
744
+ )
745
+ IMAGENET1K_V2 = Weights(
746
+ url="https://download.pytorch.org/models/regnet_y_32gf-8db6d4b5.pth",
747
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
748
+ meta={
749
+ **_COMMON_META,
750
+ "num_params": 145046770,
751
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
752
+ "_metrics": {
753
+ "ImageNet-1K": {
754
+ "acc@1": 83.368,
755
+ "acc@5": 96.498,
756
+ }
757
+ },
758
+ "_ops": 32.28,
759
+ "_file_size": 554.076,
760
+ "_docs": """
761
+ These weights improve upon the results of the original paper by using a modified version of TorchVision's
762
+ `new training recipe
763
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
764
+ """,
765
+ },
766
+ )
767
+ IMAGENET1K_SWAG_E2E_V1 = Weights(
768
+ url="https://download.pytorch.org/models/regnet_y_32gf_swag-04fdfa75.pth",
769
+ transforms=partial(
770
+ ImageClassification, crop_size=384, resize_size=384, interpolation=InterpolationMode.BICUBIC
771
+ ),
772
+ meta={
773
+ **_COMMON_SWAG_META,
774
+ "num_params": 145046770,
775
+ "_metrics": {
776
+ "ImageNet-1K": {
777
+ "acc@1": 86.838,
778
+ "acc@5": 98.362,
779
+ }
780
+ },
781
+ "_ops": 94.826,
782
+ "_file_size": 554.076,
783
+ "_docs": """
784
+ These weights are learnt via transfer learning by end-to-end fine-tuning the original
785
+ `SWAG <https://arxiv.org/abs/2201.08371>`_ weights on ImageNet-1K data.
786
+ """,
787
+ },
788
+ )
789
+ IMAGENET1K_SWAG_LINEAR_V1 = Weights(
790
+ url="https://download.pytorch.org/models/regnet_y_32gf_lc_swag-e1583746.pth",
791
+ transforms=partial(
792
+ ImageClassification, crop_size=224, resize_size=224, interpolation=InterpolationMode.BICUBIC
793
+ ),
794
+ meta={
795
+ **_COMMON_SWAG_META,
796
+ "recipe": "https://github.com/pytorch/vision/pull/5793",
797
+ "num_params": 145046770,
798
+ "_metrics": {
799
+ "ImageNet-1K": {
800
+ "acc@1": 84.622,
801
+ "acc@5": 97.480,
802
+ }
803
+ },
804
+ "_ops": 32.28,
805
+ "_file_size": 554.076,
806
+ "_docs": """
807
+ These weights are composed of the original frozen `SWAG <https://arxiv.org/abs/2201.08371>`_ trunk
808
+ weights and a linear classifier learnt on top of them trained on ImageNet-1K data.
809
+ """,
810
+ },
811
+ )
812
+ DEFAULT = IMAGENET1K_V2
813
+
814
+
815
+ class RegNet_Y_128GF_Weights(WeightsEnum):
816
+ IMAGENET1K_SWAG_E2E_V1 = Weights(
817
+ url="https://download.pytorch.org/models/regnet_y_128gf_swag-c8ce3e52.pth",
818
+ transforms=partial(
819
+ ImageClassification, crop_size=384, resize_size=384, interpolation=InterpolationMode.BICUBIC
820
+ ),
821
+ meta={
822
+ **_COMMON_SWAG_META,
823
+ "num_params": 644812894,
824
+ "_metrics": {
825
+ "ImageNet-1K": {
826
+ "acc@1": 88.228,
827
+ "acc@5": 98.682,
828
+ }
829
+ },
830
+ "_ops": 374.57,
831
+ "_file_size": 2461.564,
832
+ "_docs": """
833
+ These weights are learnt via transfer learning by end-to-end fine-tuning the original
834
+ `SWAG <https://arxiv.org/abs/2201.08371>`_ weights on ImageNet-1K data.
835
+ """,
836
+ },
837
+ )
838
+ IMAGENET1K_SWAG_LINEAR_V1 = Weights(
839
+ url="https://download.pytorch.org/models/regnet_y_128gf_lc_swag-cbe8ce12.pth",
840
+ transforms=partial(
841
+ ImageClassification, crop_size=224, resize_size=224, interpolation=InterpolationMode.BICUBIC
842
+ ),
843
+ meta={
844
+ **_COMMON_SWAG_META,
845
+ "recipe": "https://github.com/pytorch/vision/pull/5793",
846
+ "num_params": 644812894,
847
+ "_metrics": {
848
+ "ImageNet-1K": {
849
+ "acc@1": 86.068,
850
+ "acc@5": 97.844,
851
+ }
852
+ },
853
+ "_ops": 127.518,
854
+ "_file_size": 2461.564,
855
+ "_docs": """
856
+ These weights are composed of the original frozen `SWAG <https://arxiv.org/abs/2201.08371>`_ trunk
857
+ weights and a linear classifier learnt on top of them trained on ImageNet-1K data.
858
+ """,
859
+ },
860
+ )
861
+ DEFAULT = IMAGENET1K_SWAG_E2E_V1
862
+
863
+
864
+ class RegNet_X_400MF_Weights(WeightsEnum):
865
+ IMAGENET1K_V1 = Weights(
866
+ url="https://download.pytorch.org/models/regnet_x_400mf-adf1edd5.pth",
867
+ transforms=partial(ImageClassification, crop_size=224),
868
+ meta={
869
+ **_COMMON_META,
870
+ "num_params": 5495976,
871
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
872
+ "_metrics": {
873
+ "ImageNet-1K": {
874
+ "acc@1": 72.834,
875
+ "acc@5": 90.950,
876
+ }
877
+ },
878
+ "_ops": 0.414,
879
+ "_file_size": 21.258,
880
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
881
+ },
882
+ )
883
+ IMAGENET1K_V2 = Weights(
884
+ url="https://download.pytorch.org/models/regnet_x_400mf-62229a5f.pth",
885
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
886
+ meta={
887
+ **_COMMON_META,
888
+ "num_params": 5495976,
889
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
890
+ "_metrics": {
891
+ "ImageNet-1K": {
892
+ "acc@1": 74.864,
893
+ "acc@5": 92.322,
894
+ }
895
+ },
896
+ "_ops": 0.414,
897
+ "_file_size": 21.257,
898
+ "_docs": """
899
+ These weights improve upon the results of the original paper by using a modified version of TorchVision's
900
+ `new training recipe
901
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
902
+ """,
903
+ },
904
+ )
905
+ DEFAULT = IMAGENET1K_V2
906
+
907
+
908
+ class RegNet_X_800MF_Weights(WeightsEnum):
909
+ IMAGENET1K_V1 = Weights(
910
+ url="https://download.pytorch.org/models/regnet_x_800mf-ad17e45c.pth",
911
+ transforms=partial(ImageClassification, crop_size=224),
912
+ meta={
913
+ **_COMMON_META,
914
+ "num_params": 7259656,
915
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
916
+ "_metrics": {
917
+ "ImageNet-1K": {
918
+ "acc@1": 75.212,
919
+ "acc@5": 92.348,
920
+ }
921
+ },
922
+ "_ops": 0.8,
923
+ "_file_size": 27.945,
924
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
925
+ },
926
+ )
927
+ IMAGENET1K_V2 = Weights(
928
+ url="https://download.pytorch.org/models/regnet_x_800mf-94a99ebd.pth",
929
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
930
+ meta={
931
+ **_COMMON_META,
932
+ "num_params": 7259656,
933
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
934
+ "_metrics": {
935
+ "ImageNet-1K": {
936
+ "acc@1": 77.522,
937
+ "acc@5": 93.826,
938
+ }
939
+ },
940
+ "_ops": 0.8,
941
+ "_file_size": 27.945,
942
+ "_docs": """
943
+ These weights improve upon the results of the original paper by using a modified version of TorchVision's
944
+ `new training recipe
945
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
946
+ """,
947
+ },
948
+ )
949
+ DEFAULT = IMAGENET1K_V2
950
+
951
+
952
+ class RegNet_X_1_6GF_Weights(WeightsEnum):
953
+ IMAGENET1K_V1 = Weights(
954
+ url="https://download.pytorch.org/models/regnet_x_1_6gf-e3633e7f.pth",
955
+ transforms=partial(ImageClassification, crop_size=224),
956
+ meta={
957
+ **_COMMON_META,
958
+ "num_params": 9190136,
959
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
960
+ "_metrics": {
961
+ "ImageNet-1K": {
962
+ "acc@1": 77.040,
963
+ "acc@5": 93.440,
964
+ }
965
+ },
966
+ "_ops": 1.603,
967
+ "_file_size": 35.339,
968
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
969
+ },
970
+ )
971
+ IMAGENET1K_V2 = Weights(
972
+ url="https://download.pytorch.org/models/regnet_x_1_6gf-a12f2b72.pth",
973
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
974
+ meta={
975
+ **_COMMON_META,
976
+ "num_params": 9190136,
977
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
978
+ "_metrics": {
979
+ "ImageNet-1K": {
980
+ "acc@1": 79.668,
981
+ "acc@5": 94.922,
982
+ }
983
+ },
984
+ "_ops": 1.603,
985
+ "_file_size": 35.339,
986
+ "_docs": """
987
+ These weights improve upon the results of the original paper by using a modified version of TorchVision's
988
+ `new training recipe
989
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
990
+ """,
991
+ },
992
+ )
993
+ DEFAULT = IMAGENET1K_V2
994
+
995
+
996
+ class RegNet_X_3_2GF_Weights(WeightsEnum):
997
+ IMAGENET1K_V1 = Weights(
998
+ url="https://download.pytorch.org/models/regnet_x_3_2gf-f342aeae.pth",
999
+ transforms=partial(ImageClassification, crop_size=224),
1000
+ meta={
1001
+ **_COMMON_META,
1002
+ "num_params": 15296552,
1003
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
1004
+ "_metrics": {
1005
+ "ImageNet-1K": {
1006
+ "acc@1": 78.364,
1007
+ "acc@5": 93.992,
1008
+ }
1009
+ },
1010
+ "_ops": 3.177,
1011
+ "_file_size": 58.756,
1012
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
1013
+ },
1014
+ )
1015
+ IMAGENET1K_V2 = Weights(
1016
+ url="https://download.pytorch.org/models/regnet_x_3_2gf-7071aa85.pth",
1017
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
1018
+ meta={
1019
+ **_COMMON_META,
1020
+ "num_params": 15296552,
1021
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
1022
+ "_metrics": {
1023
+ "ImageNet-1K": {
1024
+ "acc@1": 81.196,
1025
+ "acc@5": 95.430,
1026
+ }
1027
+ },
1028
+ "_ops": 3.177,
1029
+ "_file_size": 58.756,
1030
+ "_docs": """
1031
+ These weights improve upon the results of the original paper by using a modified version of TorchVision's
1032
+ `new training recipe
1033
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
1034
+ """,
1035
+ },
1036
+ )
1037
+ DEFAULT = IMAGENET1K_V2
1038
+
1039
+
1040
+ class RegNet_X_8GF_Weights(WeightsEnum):
1041
+ IMAGENET1K_V1 = Weights(
1042
+ url="https://download.pytorch.org/models/regnet_x_8gf-03ceed89.pth",
1043
+ transforms=partial(ImageClassification, crop_size=224),
1044
+ meta={
1045
+ **_COMMON_META,
1046
+ "num_params": 39572648,
1047
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
1048
+ "_metrics": {
1049
+ "ImageNet-1K": {
1050
+ "acc@1": 79.344,
1051
+ "acc@5": 94.686,
1052
+ }
1053
+ },
1054
+ "_ops": 7.995,
1055
+ "_file_size": 151.456,
1056
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
1057
+ },
1058
+ )
1059
+ IMAGENET1K_V2 = Weights(
1060
+ url="https://download.pytorch.org/models/regnet_x_8gf-2b70d774.pth",
1061
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
1062
+ meta={
1063
+ **_COMMON_META,
1064
+ "num_params": 39572648,
1065
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
1066
+ "_metrics": {
1067
+ "ImageNet-1K": {
1068
+ "acc@1": 81.682,
1069
+ "acc@5": 95.678,
1070
+ }
1071
+ },
1072
+ "_ops": 7.995,
1073
+ "_file_size": 151.456,
1074
+ "_docs": """
1075
+ These weights improve upon the results of the original paper by using a modified version of TorchVision's
1076
+ `new training recipe
1077
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
1078
+ """,
1079
+ },
1080
+ )
1081
+ DEFAULT = IMAGENET1K_V2
1082
+
1083
+
1084
+ class RegNet_X_16GF_Weights(WeightsEnum):
1085
+ IMAGENET1K_V1 = Weights(
1086
+ url="https://download.pytorch.org/models/regnet_x_16gf-2007eb11.pth",
1087
+ transforms=partial(ImageClassification, crop_size=224),
1088
+ meta={
1089
+ **_COMMON_META,
1090
+ "num_params": 54278536,
1091
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
1092
+ "_metrics": {
1093
+ "ImageNet-1K": {
1094
+ "acc@1": 80.058,
1095
+ "acc@5": 94.944,
1096
+ }
1097
+ },
1098
+ "_ops": 15.941,
1099
+ "_file_size": 207.627,
1100
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
1101
+ },
1102
+ )
1103
+ IMAGENET1K_V2 = Weights(
1104
+ url="https://download.pytorch.org/models/regnet_x_16gf-ba3796d7.pth",
1105
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
1106
+ meta={
1107
+ **_COMMON_META,
1108
+ "num_params": 54278536,
1109
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
1110
+ "_metrics": {
1111
+ "ImageNet-1K": {
1112
+ "acc@1": 82.716,
1113
+ "acc@5": 96.196,
1114
+ }
1115
+ },
1116
+ "_ops": 15.941,
1117
+ "_file_size": 207.627,
1118
+ "_docs": """
1119
+ These weights improve upon the results of the original paper by using a modified version of TorchVision's
1120
+ `new training recipe
1121
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
1122
+ """,
1123
+ },
1124
+ )
1125
+ DEFAULT = IMAGENET1K_V2
1126
+
1127
+
1128
+ class RegNet_X_32GF_Weights(WeightsEnum):
1129
+ IMAGENET1K_V1 = Weights(
1130
+ url="https://download.pytorch.org/models/regnet_x_32gf-9d47f8d0.pth",
1131
+ transforms=partial(ImageClassification, crop_size=224),
1132
+ meta={
1133
+ **_COMMON_META,
1134
+ "num_params": 107811560,
1135
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models",
1136
+ "_metrics": {
1137
+ "ImageNet-1K": {
1138
+ "acc@1": 80.622,
1139
+ "acc@5": 95.248,
1140
+ }
1141
+ },
1142
+ "_ops": 31.736,
1143
+ "_file_size": 412.039,
1144
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
1145
+ },
1146
+ )
1147
+ IMAGENET1K_V2 = Weights(
1148
+ url="https://download.pytorch.org/models/regnet_x_32gf-6eb8fdc6.pth",
1149
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
1150
+ meta={
1151
+ **_COMMON_META,
1152
+ "num_params": 107811560,
1153
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
1154
+ "_metrics": {
1155
+ "ImageNet-1K": {
1156
+ "acc@1": 83.014,
1157
+ "acc@5": 96.288,
1158
+ }
1159
+ },
1160
+ "_ops": 31.736,
1161
+ "_file_size": 412.039,
1162
+ "_docs": """
1163
+ These weights improve upon the results of the original paper by using a modified version of TorchVision's
1164
+ `new training recipe
1165
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
1166
+ """,
1167
+ },
1168
+ )
1169
+ DEFAULT = IMAGENET1K_V2
1170
+
1171
+
1172
+ @register_model()
1173
+ @handle_legacy_interface(weights=("pretrained", RegNet_Y_400MF_Weights.IMAGENET1K_V1))
1174
+ def regnet_y_400mf(*, weights: Optional[RegNet_Y_400MF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
1175
+ """
1176
+ Constructs a RegNetY_400MF architecture from
1177
+ `Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
1178
+
1179
+ Args:
1180
+ weights (:class:`~torchvision.models.RegNet_Y_400MF_Weights`, optional): The pretrained weights to use.
1181
+ See :class:`~torchvision.models.RegNet_Y_400MF_Weights` below for more details and possible values.
1182
+ By default, no pretrained weights are used.
1183
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
1184
+ **kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
1185
+ ``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
1186
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
1187
+ for more detail about the classes.
1188
+
1189
+ .. autoclass:: torchvision.models.RegNet_Y_400MF_Weights
1190
+ :members:
1191
+ """
1192
+ weights = RegNet_Y_400MF_Weights.verify(weights)
1193
+
1194
+ params = BlockParams.from_init_params(depth=16, w_0=48, w_a=27.89, w_m=2.09, group_width=8, se_ratio=0.25, **kwargs)
1195
+ return _regnet(params, weights, progress, **kwargs)
1196
+
1197
+
1198
+ @register_model()
1199
+ @handle_legacy_interface(weights=("pretrained", RegNet_Y_800MF_Weights.IMAGENET1K_V1))
1200
+ def regnet_y_800mf(*, weights: Optional[RegNet_Y_800MF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
1201
+ """
1202
+ Constructs a RegNetY_800MF architecture from
1203
+ `Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
1204
+
1205
+ Args:
1206
+ weights (:class:`~torchvision.models.RegNet_Y_800MF_Weights`, optional): The pretrained weights to use.
1207
+ See :class:`~torchvision.models.RegNet_Y_800MF_Weights` below for more details and possible values.
1208
+ By default, no pretrained weights are used.
1209
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
1210
+ **kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
1211
+ ``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
1212
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
1213
+ for more detail about the classes.
1214
+
1215
+ .. autoclass:: torchvision.models.RegNet_Y_800MF_Weights
1216
+ :members:
1217
+ """
1218
+ weights = RegNet_Y_800MF_Weights.verify(weights)
1219
+
1220
+ params = BlockParams.from_init_params(depth=14, w_0=56, w_a=38.84, w_m=2.4, group_width=16, se_ratio=0.25, **kwargs)
1221
+ return _regnet(params, weights, progress, **kwargs)
1222
+
1223
+
1224
+ @register_model()
1225
+ @handle_legacy_interface(weights=("pretrained", RegNet_Y_1_6GF_Weights.IMAGENET1K_V1))
1226
+ def regnet_y_1_6gf(*, weights: Optional[RegNet_Y_1_6GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
1227
+ """
1228
+ Constructs a RegNetY_1.6GF architecture from
1229
+ `Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
1230
+
1231
+ Args:
1232
+ weights (:class:`~torchvision.models.RegNet_Y_1_6GF_Weights`, optional): The pretrained weights to use.
1233
+ See :class:`~torchvision.models.RegNet_Y_1_6GF_Weights` below for more details and possible values.
1234
+ By default, no pretrained weights are used.
1235
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
1236
+ **kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
1237
+ ``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
1238
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
1239
+ for more detail about the classes.
1240
+
1241
+ .. autoclass:: torchvision.models.RegNet_Y_1_6GF_Weights
1242
+ :members:
1243
+ """
1244
+ weights = RegNet_Y_1_6GF_Weights.verify(weights)
1245
+
1246
+ params = BlockParams.from_init_params(
1247
+ depth=27, w_0=48, w_a=20.71, w_m=2.65, group_width=24, se_ratio=0.25, **kwargs
1248
+ )
1249
+ return _regnet(params, weights, progress, **kwargs)
1250
+
1251
+
1252
+ @register_model()
1253
+ @handle_legacy_interface(weights=("pretrained", RegNet_Y_3_2GF_Weights.IMAGENET1K_V1))
1254
+ def regnet_y_3_2gf(*, weights: Optional[RegNet_Y_3_2GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
1255
+ """
1256
+ Constructs a RegNetY_3.2GF architecture from
1257
+ `Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
1258
+
1259
+ Args:
1260
+ weights (:class:`~torchvision.models.RegNet_Y_3_2GF_Weights`, optional): The pretrained weights to use.
1261
+ See :class:`~torchvision.models.RegNet_Y_3_2GF_Weights` below for more details and possible values.
1262
+ By default, no pretrained weights are used.
1263
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
1264
+ **kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
1265
+ ``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
1266
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
1267
+ for more detail about the classes.
1268
+
1269
+ .. autoclass:: torchvision.models.RegNet_Y_3_2GF_Weights
1270
+ :members:
1271
+ """
1272
+ weights = RegNet_Y_3_2GF_Weights.verify(weights)
1273
+
1274
+ params = BlockParams.from_init_params(
1275
+ depth=21, w_0=80, w_a=42.63, w_m=2.66, group_width=24, se_ratio=0.25, **kwargs
1276
+ )
1277
+ return _regnet(params, weights, progress, **kwargs)
1278
+
1279
+
1280
+ @register_model()
1281
+ @handle_legacy_interface(weights=("pretrained", RegNet_Y_8GF_Weights.IMAGENET1K_V1))
1282
+ def regnet_y_8gf(*, weights: Optional[RegNet_Y_8GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
1283
+ """
1284
+ Constructs a RegNetY_8GF architecture from
1285
+ `Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
1286
+
1287
+ Args:
1288
+ weights (:class:`~torchvision.models.RegNet_Y_8GF_Weights`, optional): The pretrained weights to use.
1289
+ See :class:`~torchvision.models.RegNet_Y_8GF_Weights` below for more details and possible values.
1290
+ By default, no pretrained weights are used.
1291
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
1292
+ **kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
1293
+ ``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
1294
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
1295
+ for more detail about the classes.
1296
+
1297
+ .. autoclass:: torchvision.models.RegNet_Y_8GF_Weights
1298
+ :members:
1299
+ """
1300
+ weights = RegNet_Y_8GF_Weights.verify(weights)
1301
+
1302
+ params = BlockParams.from_init_params(
1303
+ depth=17, w_0=192, w_a=76.82, w_m=2.19, group_width=56, se_ratio=0.25, **kwargs
1304
+ )
1305
+ return _regnet(params, weights, progress, **kwargs)
1306
+
1307
+
1308
+ @register_model()
1309
+ @handle_legacy_interface(weights=("pretrained", RegNet_Y_16GF_Weights.IMAGENET1K_V1))
1310
+ def regnet_y_16gf(*, weights: Optional[RegNet_Y_16GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
1311
+ """
1312
+ Constructs a RegNetY_16GF architecture from
1313
+ `Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
1314
+
1315
+ Args:
1316
+ weights (:class:`~torchvision.models.RegNet_Y_16GF_Weights`, optional): The pretrained weights to use.
1317
+ See :class:`~torchvision.models.RegNet_Y_16GF_Weights` below for more details and possible values.
1318
+ By default, no pretrained weights are used.
1319
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
1320
+ **kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
1321
+ ``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
1322
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
1323
+ for more detail about the classes.
1324
+
1325
+ .. autoclass:: torchvision.models.RegNet_Y_16GF_Weights
1326
+ :members:
1327
+ """
1328
+ weights = RegNet_Y_16GF_Weights.verify(weights)
1329
+
1330
+ params = BlockParams.from_init_params(
1331
+ depth=18, w_0=200, w_a=106.23, w_m=2.48, group_width=112, se_ratio=0.25, **kwargs
1332
+ )
1333
+ return _regnet(params, weights, progress, **kwargs)
1334
+
1335
+
1336
+ @register_model()
1337
+ @handle_legacy_interface(weights=("pretrained", RegNet_Y_32GF_Weights.IMAGENET1K_V1))
1338
+ def regnet_y_32gf(*, weights: Optional[RegNet_Y_32GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
1339
+ """
1340
+ Constructs a RegNetY_32GF architecture from
1341
+ `Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
1342
+
1343
+ Args:
1344
+ weights (:class:`~torchvision.models.RegNet_Y_32GF_Weights`, optional): The pretrained weights to use.
1345
+ See :class:`~torchvision.models.RegNet_Y_32GF_Weights` below for more details and possible values.
1346
+ By default, no pretrained weights are used.
1347
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
1348
+ **kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
1349
+ ``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
1350
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
1351
+ for more detail about the classes.
1352
+
1353
+ .. autoclass:: torchvision.models.RegNet_Y_32GF_Weights
1354
+ :members:
1355
+ """
1356
+ weights = RegNet_Y_32GF_Weights.verify(weights)
1357
+
1358
+ params = BlockParams.from_init_params(
1359
+ depth=20, w_0=232, w_a=115.89, w_m=2.53, group_width=232, se_ratio=0.25, **kwargs
1360
+ )
1361
+ return _regnet(params, weights, progress, **kwargs)
1362
+
1363
+
1364
+ @register_model()
1365
+ @handle_legacy_interface(weights=("pretrained", None))
1366
+ def regnet_y_128gf(*, weights: Optional[RegNet_Y_128GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
1367
+ """
1368
+ Constructs a RegNetY_128GF architecture from
1369
+ `Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
1370
+
1371
+ Args:
1372
+ weights (:class:`~torchvision.models.RegNet_Y_128GF_Weights`, optional): The pretrained weights to use.
1373
+ See :class:`~torchvision.models.RegNet_Y_128GF_Weights` below for more details and possible values.
1374
+ By default, no pretrained weights are used.
1375
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
1376
+ **kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
1377
+ ``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
1378
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
1379
+ for more detail about the classes.
1380
+
1381
+ .. autoclass:: torchvision.models.RegNet_Y_128GF_Weights
1382
+ :members:
1383
+ """
1384
+ weights = RegNet_Y_128GF_Weights.verify(weights)
1385
+
1386
+ params = BlockParams.from_init_params(
1387
+ depth=27, w_0=456, w_a=160.83, w_m=2.52, group_width=264, se_ratio=0.25, **kwargs
1388
+ )
1389
+ return _regnet(params, weights, progress, **kwargs)
1390
+
1391
+
1392
+ @register_model()
1393
+ @handle_legacy_interface(weights=("pretrained", RegNet_X_400MF_Weights.IMAGENET1K_V1))
1394
+ def regnet_x_400mf(*, weights: Optional[RegNet_X_400MF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
1395
+ """
1396
+ Constructs a RegNetX_400MF architecture from
1397
+ `Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
1398
+
1399
+ Args:
1400
+ weights (:class:`~torchvision.models.RegNet_X_400MF_Weights`, optional): The pretrained weights to use.
1401
+ See :class:`~torchvision.models.RegNet_X_400MF_Weights` below for more details and possible values.
1402
+ By default, no pretrained weights are used.
1403
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
1404
+ **kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
1405
+ ``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
1406
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
1407
+ for more detail about the classes.
1408
+
1409
+ .. autoclass:: torchvision.models.RegNet_X_400MF_Weights
1410
+ :members:
1411
+ """
1412
+ weights = RegNet_X_400MF_Weights.verify(weights)
1413
+
1414
+ params = BlockParams.from_init_params(depth=22, w_0=24, w_a=24.48, w_m=2.54, group_width=16, **kwargs)
1415
+ return _regnet(params, weights, progress, **kwargs)
1416
+
1417
+
1418
+ @register_model()
1419
+ @handle_legacy_interface(weights=("pretrained", RegNet_X_800MF_Weights.IMAGENET1K_V1))
1420
+ def regnet_x_800mf(*, weights: Optional[RegNet_X_800MF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
1421
+ """
1422
+ Constructs a RegNetX_800MF architecture from
1423
+ `Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
1424
+
1425
+ Args:
1426
+ weights (:class:`~torchvision.models.RegNet_X_800MF_Weights`, optional): The pretrained weights to use.
1427
+ See :class:`~torchvision.models.RegNet_X_800MF_Weights` below for more details and possible values.
1428
+ By default, no pretrained weights are used.
1429
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
1430
+ **kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
1431
+ ``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
1432
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
1433
+ for more detail about the classes.
1434
+
1435
+ .. autoclass:: torchvision.models.RegNet_X_800MF_Weights
1436
+ :members:
1437
+ """
1438
+ weights = RegNet_X_800MF_Weights.verify(weights)
1439
+
1440
+ params = BlockParams.from_init_params(depth=16, w_0=56, w_a=35.73, w_m=2.28, group_width=16, **kwargs)
1441
+ return _regnet(params, weights, progress, **kwargs)
1442
+
1443
+
1444
+ @register_model()
1445
+ @handle_legacy_interface(weights=("pretrained", RegNet_X_1_6GF_Weights.IMAGENET1K_V1))
1446
+ def regnet_x_1_6gf(*, weights: Optional[RegNet_X_1_6GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
1447
+ """
1448
+ Constructs a RegNetX_1.6GF architecture from
1449
+ `Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
1450
+
1451
+ Args:
1452
+ weights (:class:`~torchvision.models.RegNet_X_1_6GF_Weights`, optional): The pretrained weights to use.
1453
+ See :class:`~torchvision.models.RegNet_X_1_6GF_Weights` below for more details and possible values.
1454
+ By default, no pretrained weights are used.
1455
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
1456
+ **kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
1457
+ ``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
1458
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
1459
+ for more detail about the classes.
1460
+
1461
+ .. autoclass:: torchvision.models.RegNet_X_1_6GF_Weights
1462
+ :members:
1463
+ """
1464
+ weights = RegNet_X_1_6GF_Weights.verify(weights)
1465
+
1466
+ params = BlockParams.from_init_params(depth=18, w_0=80, w_a=34.01, w_m=2.25, group_width=24, **kwargs)
1467
+ return _regnet(params, weights, progress, **kwargs)
1468
+
1469
+
1470
+ @register_model()
1471
+ @handle_legacy_interface(weights=("pretrained", RegNet_X_3_2GF_Weights.IMAGENET1K_V1))
1472
+ def regnet_x_3_2gf(*, weights: Optional[RegNet_X_3_2GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
1473
+ """
1474
+ Constructs a RegNetX_3.2GF architecture from
1475
+ `Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
1476
+
1477
+ Args:
1478
+ weights (:class:`~torchvision.models.RegNet_X_3_2GF_Weights`, optional): The pretrained weights to use.
1479
+ See :class:`~torchvision.models.RegNet_X_3_2GF_Weights` below for more details and possible values.
1480
+ By default, no pretrained weights are used.
1481
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
1482
+ **kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
1483
+ ``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
1484
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
1485
+ for more detail about the classes.
1486
+
1487
+ .. autoclass:: torchvision.models.RegNet_X_3_2GF_Weights
1488
+ :members:
1489
+ """
1490
+ weights = RegNet_X_3_2GF_Weights.verify(weights)
1491
+
1492
+ params = BlockParams.from_init_params(depth=25, w_0=88, w_a=26.31, w_m=2.25, group_width=48, **kwargs)
1493
+ return _regnet(params, weights, progress, **kwargs)
1494
+
1495
+
1496
+ @register_model()
1497
+ @handle_legacy_interface(weights=("pretrained", RegNet_X_8GF_Weights.IMAGENET1K_V1))
1498
+ def regnet_x_8gf(*, weights: Optional[RegNet_X_8GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
1499
+ """
1500
+ Constructs a RegNetX_8GF architecture from
1501
+ `Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
1502
+
1503
+ Args:
1504
+ weights (:class:`~torchvision.models.RegNet_X_8GF_Weights`, optional): The pretrained weights to use.
1505
+ See :class:`~torchvision.models.RegNet_X_8GF_Weights` below for more details and possible values.
1506
+ By default, no pretrained weights are used.
1507
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
1508
+ **kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
1509
+ ``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
1510
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
1511
+ for more detail about the classes.
1512
+
1513
+ .. autoclass:: torchvision.models.RegNet_X_8GF_Weights
1514
+ :members:
1515
+ """
1516
+ weights = RegNet_X_8GF_Weights.verify(weights)
1517
+
1518
+ params = BlockParams.from_init_params(depth=23, w_0=80, w_a=49.56, w_m=2.88, group_width=120, **kwargs)
1519
+ return _regnet(params, weights, progress, **kwargs)
1520
+
1521
+
1522
+ @register_model()
1523
+ @handle_legacy_interface(weights=("pretrained", RegNet_X_16GF_Weights.IMAGENET1K_V1))
1524
+ def regnet_x_16gf(*, weights: Optional[RegNet_X_16GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
1525
+ """
1526
+ Constructs a RegNetX_16GF architecture from
1527
+ `Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
1528
+
1529
+ Args:
1530
+ weights (:class:`~torchvision.models.RegNet_X_16GF_Weights`, optional): The pretrained weights to use.
1531
+ See :class:`~torchvision.models.RegNet_X_16GF_Weights` below for more details and possible values.
1532
+ By default, no pretrained weights are used.
1533
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
1534
+ **kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
1535
+ ``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
1536
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
1537
+ for more detail about the classes.
1538
+
1539
+ .. autoclass:: torchvision.models.RegNet_X_16GF_Weights
1540
+ :members:
1541
+ """
1542
+ weights = RegNet_X_16GF_Weights.verify(weights)
1543
+
1544
+ params = BlockParams.from_init_params(depth=22, w_0=216, w_a=55.59, w_m=2.1, group_width=128, **kwargs)
1545
+ return _regnet(params, weights, progress, **kwargs)
1546
+
1547
+
1548
+ @register_model()
1549
+ @handle_legacy_interface(weights=("pretrained", RegNet_X_32GF_Weights.IMAGENET1K_V1))
1550
+ def regnet_x_32gf(*, weights: Optional[RegNet_X_32GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
1551
+ """
1552
+ Constructs a RegNetX_32GF architecture from
1553
+ `Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
1554
+
1555
+ Args:
1556
+ weights (:class:`~torchvision.models.RegNet_X_32GF_Weights`, optional): The pretrained weights to use.
1557
+ See :class:`~torchvision.models.RegNet_X_32GF_Weights` below for more details and possible values.
1558
+ By default, no pretrained weights are used.
1559
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
1560
+ **kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
1561
+ ``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
1562
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
1563
+ for more detail about the classes.
1564
+
1565
+ .. autoclass:: torchvision.models.RegNet_X_32GF_Weights
1566
+ :members:
1567
+ """
1568
+ weights = RegNet_X_32GF_Weights.verify(weights)
1569
+
1570
+ params = BlockParams.from_init_params(depth=23, w_0=320, w_a=69.86, w_m=2.0, group_width=168, **kwargs)
1571
+ return _regnet(params, weights, progress, **kwargs)
pllava/lib/python3.10/site-packages/torchvision/models/resnet.py ADDED
@@ -0,0 +1,985 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from typing import Any, Callable, List, Optional, Type, Union
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from torch import Tensor
7
+
8
+ from ..transforms._presets import ImageClassification
9
+ from ..utils import _log_api_usage_once
10
+ from ._api import register_model, Weights, WeightsEnum
11
+ from ._meta import _IMAGENET_CATEGORIES
12
+ from ._utils import _ovewrite_named_param, handle_legacy_interface
13
+
14
+
15
+ __all__ = [
16
+ "ResNet",
17
+ "ResNet18_Weights",
18
+ "ResNet34_Weights",
19
+ "ResNet50_Weights",
20
+ "ResNet101_Weights",
21
+ "ResNet152_Weights",
22
+ "ResNeXt50_32X4D_Weights",
23
+ "ResNeXt101_32X8D_Weights",
24
+ "ResNeXt101_64X4D_Weights",
25
+ "Wide_ResNet50_2_Weights",
26
+ "Wide_ResNet101_2_Weights",
27
+ "resnet18",
28
+ "resnet34",
29
+ "resnet50",
30
+ "resnet101",
31
+ "resnet152",
32
+ "resnext50_32x4d",
33
+ "resnext101_32x8d",
34
+ "resnext101_64x4d",
35
+ "wide_resnet50_2",
36
+ "wide_resnet101_2",
37
+ ]
38
+
39
+
40
+ def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
41
+ """3x3 convolution with padding"""
42
+ return nn.Conv2d(
43
+ in_planes,
44
+ out_planes,
45
+ kernel_size=3,
46
+ stride=stride,
47
+ padding=dilation,
48
+ groups=groups,
49
+ bias=False,
50
+ dilation=dilation,
51
+ )
52
+
53
+
54
+ def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
55
+ """1x1 convolution"""
56
+ return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
57
+
58
+
59
+ class BasicBlock(nn.Module):
60
+ expansion: int = 1
61
+
62
+ def __init__(
63
+ self,
64
+ inplanes: int,
65
+ planes: int,
66
+ stride: int = 1,
67
+ downsample: Optional[nn.Module] = None,
68
+ groups: int = 1,
69
+ base_width: int = 64,
70
+ dilation: int = 1,
71
+ norm_layer: Optional[Callable[..., nn.Module]] = None,
72
+ ) -> None:
73
+ super().__init__()
74
+ if norm_layer is None:
75
+ norm_layer = nn.BatchNorm2d
76
+ if groups != 1 or base_width != 64:
77
+ raise ValueError("BasicBlock only supports groups=1 and base_width=64")
78
+ if dilation > 1:
79
+ raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
80
+ # Both self.conv1 and self.downsample layers downsample the input when stride != 1
81
+ self.conv1 = conv3x3(inplanes, planes, stride)
82
+ self.bn1 = norm_layer(planes)
83
+ self.relu = nn.ReLU(inplace=True)
84
+ self.conv2 = conv3x3(planes, planes)
85
+ self.bn2 = norm_layer(planes)
86
+ self.downsample = downsample
87
+ self.stride = stride
88
+
89
+ def forward(self, x: Tensor) -> Tensor:
90
+ identity = x
91
+
92
+ out = self.conv1(x)
93
+ out = self.bn1(out)
94
+ out = self.relu(out)
95
+
96
+ out = self.conv2(out)
97
+ out = self.bn2(out)
98
+
99
+ if self.downsample is not None:
100
+ identity = self.downsample(x)
101
+
102
+ out += identity
103
+ out = self.relu(out)
104
+
105
+ return out
106
+
107
+
108
+ class Bottleneck(nn.Module):
109
+ # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
110
+ # while original implementation places the stride at the first 1x1 convolution(self.conv1)
111
+ # according to "Deep residual learning for image recognition" https://arxiv.org/abs/1512.03385.
112
+ # This variant is also known as ResNet V1.5 and improves accuracy according to
113
+ # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
114
+
115
+ expansion: int = 4
116
+
117
+ def __init__(
118
+ self,
119
+ inplanes: int,
120
+ planes: int,
121
+ stride: int = 1,
122
+ downsample: Optional[nn.Module] = None,
123
+ groups: int = 1,
124
+ base_width: int = 64,
125
+ dilation: int = 1,
126
+ norm_layer: Optional[Callable[..., nn.Module]] = None,
127
+ ) -> None:
128
+ super().__init__()
129
+ if norm_layer is None:
130
+ norm_layer = nn.BatchNorm2d
131
+ width = int(planes * (base_width / 64.0)) * groups
132
+ # Both self.conv2 and self.downsample layers downsample the input when stride != 1
133
+ self.conv1 = conv1x1(inplanes, width)
134
+ self.bn1 = norm_layer(width)
135
+ self.conv2 = conv3x3(width, width, stride, groups, dilation)
136
+ self.bn2 = norm_layer(width)
137
+ self.conv3 = conv1x1(width, planes * self.expansion)
138
+ self.bn3 = norm_layer(planes * self.expansion)
139
+ self.relu = nn.ReLU(inplace=True)
140
+ self.downsample = downsample
141
+ self.stride = stride
142
+
143
+ def forward(self, x: Tensor) -> Tensor:
144
+ identity = x
145
+
146
+ out = self.conv1(x)
147
+ out = self.bn1(out)
148
+ out = self.relu(out)
149
+
150
+ out = self.conv2(out)
151
+ out = self.bn2(out)
152
+ out = self.relu(out)
153
+
154
+ out = self.conv3(out)
155
+ out = self.bn3(out)
156
+
157
+ if self.downsample is not None:
158
+ identity = self.downsample(x)
159
+
160
+ out += identity
161
+ out = self.relu(out)
162
+
163
+ return out
164
+
165
+
166
+ class ResNet(nn.Module):
167
+ def __init__(
168
+ self,
169
+ block: Type[Union[BasicBlock, Bottleneck]],
170
+ layers: List[int],
171
+ num_classes: int = 1000,
172
+ zero_init_residual: bool = False,
173
+ groups: int = 1,
174
+ width_per_group: int = 64,
175
+ replace_stride_with_dilation: Optional[List[bool]] = None,
176
+ norm_layer: Optional[Callable[..., nn.Module]] = None,
177
+ ) -> None:
178
+ super().__init__()
179
+ _log_api_usage_once(self)
180
+ if norm_layer is None:
181
+ norm_layer = nn.BatchNorm2d
182
+ self._norm_layer = norm_layer
183
+
184
+ self.inplanes = 64
185
+ self.dilation = 1
186
+ if replace_stride_with_dilation is None:
187
+ # each element in the tuple indicates if we should replace
188
+ # the 2x2 stride with a dilated convolution instead
189
+ replace_stride_with_dilation = [False, False, False]
190
+ if len(replace_stride_with_dilation) != 3:
191
+ raise ValueError(
192
+ "replace_stride_with_dilation should be None "
193
+ f"or a 3-element tuple, got {replace_stride_with_dilation}"
194
+ )
195
+ self.groups = groups
196
+ self.base_width = width_per_group
197
+ self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
198
+ self.bn1 = norm_layer(self.inplanes)
199
+ self.relu = nn.ReLU(inplace=True)
200
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
201
+ self.layer1 = self._make_layer(block, 64, layers[0])
202
+ self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
203
+ self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
204
+ self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
205
+ self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
206
+ self.fc = nn.Linear(512 * block.expansion, num_classes)
207
+
208
+ for m in self.modules():
209
+ if isinstance(m, nn.Conv2d):
210
+ nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
211
+ elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
212
+ nn.init.constant_(m.weight, 1)
213
+ nn.init.constant_(m.bias, 0)
214
+
215
+ # Zero-initialize the last BN in each residual branch,
216
+ # so that the residual branch starts with zeros, and each residual block behaves like an identity.
217
+ # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
218
+ if zero_init_residual:
219
+ for m in self.modules():
220
+ if isinstance(m, Bottleneck) and m.bn3.weight is not None:
221
+ nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
222
+ elif isinstance(m, BasicBlock) and m.bn2.weight is not None:
223
+ nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
224
+
225
+ def _make_layer(
226
+ self,
227
+ block: Type[Union[BasicBlock, Bottleneck]],
228
+ planes: int,
229
+ blocks: int,
230
+ stride: int = 1,
231
+ dilate: bool = False,
232
+ ) -> nn.Sequential:
233
+ norm_layer = self._norm_layer
234
+ downsample = None
235
+ previous_dilation = self.dilation
236
+ if dilate:
237
+ self.dilation *= stride
238
+ stride = 1
239
+ if stride != 1 or self.inplanes != planes * block.expansion:
240
+ downsample = nn.Sequential(
241
+ conv1x1(self.inplanes, planes * block.expansion, stride),
242
+ norm_layer(planes * block.expansion),
243
+ )
244
+
245
+ layers = []
246
+ layers.append(
247
+ block(
248
+ self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer
249
+ )
250
+ )
251
+ self.inplanes = planes * block.expansion
252
+ for _ in range(1, blocks):
253
+ layers.append(
254
+ block(
255
+ self.inplanes,
256
+ planes,
257
+ groups=self.groups,
258
+ base_width=self.base_width,
259
+ dilation=self.dilation,
260
+ norm_layer=norm_layer,
261
+ )
262
+ )
263
+
264
+ return nn.Sequential(*layers)
265
+
266
+ def _forward_impl(self, x: Tensor) -> Tensor:
267
+ # See note [TorchScript super()]
268
+ x = self.conv1(x)
269
+ x = self.bn1(x)
270
+ x = self.relu(x)
271
+ x = self.maxpool(x)
272
+
273
+ x = self.layer1(x)
274
+ x = self.layer2(x)
275
+ x = self.layer3(x)
276
+ x = self.layer4(x)
277
+
278
+ x = self.avgpool(x)
279
+ x = torch.flatten(x, 1)
280
+ x = self.fc(x)
281
+
282
+ return x
283
+
284
+ def forward(self, x: Tensor) -> Tensor:
285
+ return self._forward_impl(x)
286
+
287
+
288
+ def _resnet(
289
+ block: Type[Union[BasicBlock, Bottleneck]],
290
+ layers: List[int],
291
+ weights: Optional[WeightsEnum],
292
+ progress: bool,
293
+ **kwargs: Any,
294
+ ) -> ResNet:
295
+ if weights is not None:
296
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
297
+
298
+ model = ResNet(block, layers, **kwargs)
299
+
300
+ if weights is not None:
301
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
302
+
303
+ return model
304
+
305
+
306
+ _COMMON_META = {
307
+ "min_size": (1, 1),
308
+ "categories": _IMAGENET_CATEGORIES,
309
+ }
310
+
311
+
312
+ class ResNet18_Weights(WeightsEnum):
313
+ IMAGENET1K_V1 = Weights(
314
+ url="https://download.pytorch.org/models/resnet18-f37072fd.pth",
315
+ transforms=partial(ImageClassification, crop_size=224),
316
+ meta={
317
+ **_COMMON_META,
318
+ "num_params": 11689512,
319
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
320
+ "_metrics": {
321
+ "ImageNet-1K": {
322
+ "acc@1": 69.758,
323
+ "acc@5": 89.078,
324
+ }
325
+ },
326
+ "_ops": 1.814,
327
+ "_file_size": 44.661,
328
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
329
+ },
330
+ )
331
+ DEFAULT = IMAGENET1K_V1
332
+
333
+
334
+ class ResNet34_Weights(WeightsEnum):
335
+ IMAGENET1K_V1 = Weights(
336
+ url="https://download.pytorch.org/models/resnet34-b627a593.pth",
337
+ transforms=partial(ImageClassification, crop_size=224),
338
+ meta={
339
+ **_COMMON_META,
340
+ "num_params": 21797672,
341
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
342
+ "_metrics": {
343
+ "ImageNet-1K": {
344
+ "acc@1": 73.314,
345
+ "acc@5": 91.420,
346
+ }
347
+ },
348
+ "_ops": 3.664,
349
+ "_file_size": 83.275,
350
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
351
+ },
352
+ )
353
+ DEFAULT = IMAGENET1K_V1
354
+
355
+
356
+ class ResNet50_Weights(WeightsEnum):
357
+ IMAGENET1K_V1 = Weights(
358
+ url="https://download.pytorch.org/models/resnet50-0676ba61.pth",
359
+ transforms=partial(ImageClassification, crop_size=224),
360
+ meta={
361
+ **_COMMON_META,
362
+ "num_params": 25557032,
363
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
364
+ "_metrics": {
365
+ "ImageNet-1K": {
366
+ "acc@1": 76.130,
367
+ "acc@5": 92.862,
368
+ }
369
+ },
370
+ "_ops": 4.089,
371
+ "_file_size": 97.781,
372
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
373
+ },
374
+ )
375
+ IMAGENET1K_V2 = Weights(
376
+ url="https://download.pytorch.org/models/resnet50-11ad3fa6.pth",
377
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
378
+ meta={
379
+ **_COMMON_META,
380
+ "num_params": 25557032,
381
+ "recipe": "https://github.com/pytorch/vision/issues/3995#issuecomment-1013906621",
382
+ "_metrics": {
383
+ "ImageNet-1K": {
384
+ "acc@1": 80.858,
385
+ "acc@5": 95.434,
386
+ }
387
+ },
388
+ "_ops": 4.089,
389
+ "_file_size": 97.79,
390
+ "_docs": """
391
+ These weights improve upon the results of the original paper by using TorchVision's `new training recipe
392
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
393
+ """,
394
+ },
395
+ )
396
+ DEFAULT = IMAGENET1K_V2
397
+
398
+
399
+ class ResNet101_Weights(WeightsEnum):
400
+ IMAGENET1K_V1 = Weights(
401
+ url="https://download.pytorch.org/models/resnet101-63fe2227.pth",
402
+ transforms=partial(ImageClassification, crop_size=224),
403
+ meta={
404
+ **_COMMON_META,
405
+ "num_params": 44549160,
406
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
407
+ "_metrics": {
408
+ "ImageNet-1K": {
409
+ "acc@1": 77.374,
410
+ "acc@5": 93.546,
411
+ }
412
+ },
413
+ "_ops": 7.801,
414
+ "_file_size": 170.511,
415
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
416
+ },
417
+ )
418
+ IMAGENET1K_V2 = Weights(
419
+ url="https://download.pytorch.org/models/resnet101-cd907fc2.pth",
420
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
421
+ meta={
422
+ **_COMMON_META,
423
+ "num_params": 44549160,
424
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
425
+ "_metrics": {
426
+ "ImageNet-1K": {
427
+ "acc@1": 81.886,
428
+ "acc@5": 95.780,
429
+ }
430
+ },
431
+ "_ops": 7.801,
432
+ "_file_size": 170.53,
433
+ "_docs": """
434
+ These weights improve upon the results of the original paper by using TorchVision's `new training recipe
435
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
436
+ """,
437
+ },
438
+ )
439
+ DEFAULT = IMAGENET1K_V2
440
+
441
+
442
+ class ResNet152_Weights(WeightsEnum):
443
+ IMAGENET1K_V1 = Weights(
444
+ url="https://download.pytorch.org/models/resnet152-394f9c45.pth",
445
+ transforms=partial(ImageClassification, crop_size=224),
446
+ meta={
447
+ **_COMMON_META,
448
+ "num_params": 60192808,
449
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnet",
450
+ "_metrics": {
451
+ "ImageNet-1K": {
452
+ "acc@1": 78.312,
453
+ "acc@5": 94.046,
454
+ }
455
+ },
456
+ "_ops": 11.514,
457
+ "_file_size": 230.434,
458
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
459
+ },
460
+ )
461
+ IMAGENET1K_V2 = Weights(
462
+ url="https://download.pytorch.org/models/resnet152-f82ba261.pth",
463
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
464
+ meta={
465
+ **_COMMON_META,
466
+ "num_params": 60192808,
467
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
468
+ "_metrics": {
469
+ "ImageNet-1K": {
470
+ "acc@1": 82.284,
471
+ "acc@5": 96.002,
472
+ }
473
+ },
474
+ "_ops": 11.514,
475
+ "_file_size": 230.474,
476
+ "_docs": """
477
+ These weights improve upon the results of the original paper by using TorchVision's `new training recipe
478
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
479
+ """,
480
+ },
481
+ )
482
+ DEFAULT = IMAGENET1K_V2
483
+
484
+
485
+ class ResNeXt50_32X4D_Weights(WeightsEnum):
486
+ IMAGENET1K_V1 = Weights(
487
+ url="https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth",
488
+ transforms=partial(ImageClassification, crop_size=224),
489
+ meta={
490
+ **_COMMON_META,
491
+ "num_params": 25028904,
492
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnext",
493
+ "_metrics": {
494
+ "ImageNet-1K": {
495
+ "acc@1": 77.618,
496
+ "acc@5": 93.698,
497
+ }
498
+ },
499
+ "_ops": 4.23,
500
+ "_file_size": 95.789,
501
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
502
+ },
503
+ )
504
+ IMAGENET1K_V2 = Weights(
505
+ url="https://download.pytorch.org/models/resnext50_32x4d-1a0047aa.pth",
506
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
507
+ meta={
508
+ **_COMMON_META,
509
+ "num_params": 25028904,
510
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
511
+ "_metrics": {
512
+ "ImageNet-1K": {
513
+ "acc@1": 81.198,
514
+ "acc@5": 95.340,
515
+ }
516
+ },
517
+ "_ops": 4.23,
518
+ "_file_size": 95.833,
519
+ "_docs": """
520
+ These weights improve upon the results of the original paper by using TorchVision's `new training recipe
521
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
522
+ """,
523
+ },
524
+ )
525
+ DEFAULT = IMAGENET1K_V2
526
+
527
+
528
+ class ResNeXt101_32X8D_Weights(WeightsEnum):
529
+ IMAGENET1K_V1 = Weights(
530
+ url="https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth",
531
+ transforms=partial(ImageClassification, crop_size=224),
532
+ meta={
533
+ **_COMMON_META,
534
+ "num_params": 88791336,
535
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#resnext",
536
+ "_metrics": {
537
+ "ImageNet-1K": {
538
+ "acc@1": 79.312,
539
+ "acc@5": 94.526,
540
+ }
541
+ },
542
+ "_ops": 16.414,
543
+ "_file_size": 339.586,
544
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
545
+ },
546
+ )
547
+ IMAGENET1K_V2 = Weights(
548
+ url="https://download.pytorch.org/models/resnext101_32x8d-110c445d.pth",
549
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
550
+ meta={
551
+ **_COMMON_META,
552
+ "num_params": 88791336,
553
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
554
+ "_metrics": {
555
+ "ImageNet-1K": {
556
+ "acc@1": 82.834,
557
+ "acc@5": 96.228,
558
+ }
559
+ },
560
+ "_ops": 16.414,
561
+ "_file_size": 339.673,
562
+ "_docs": """
563
+ These weights improve upon the results of the original paper by using TorchVision's `new training recipe
564
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
565
+ """,
566
+ },
567
+ )
568
+ DEFAULT = IMAGENET1K_V2
569
+
570
+
571
+ class ResNeXt101_64X4D_Weights(WeightsEnum):
572
+ IMAGENET1K_V1 = Weights(
573
+ url="https://download.pytorch.org/models/resnext101_64x4d-173b62eb.pth",
574
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
575
+ meta={
576
+ **_COMMON_META,
577
+ "num_params": 83455272,
578
+ "recipe": "https://github.com/pytorch/vision/pull/5935",
579
+ "_metrics": {
580
+ "ImageNet-1K": {
581
+ "acc@1": 83.246,
582
+ "acc@5": 96.454,
583
+ }
584
+ },
585
+ "_ops": 15.46,
586
+ "_file_size": 319.318,
587
+ "_docs": """
588
+ These weights were trained from scratch by using TorchVision's `new training recipe
589
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
590
+ """,
591
+ },
592
+ )
593
+ DEFAULT = IMAGENET1K_V1
594
+
595
+
596
+ class Wide_ResNet50_2_Weights(WeightsEnum):
597
+ IMAGENET1K_V1 = Weights(
598
+ url="https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth",
599
+ transforms=partial(ImageClassification, crop_size=224),
600
+ meta={
601
+ **_COMMON_META,
602
+ "num_params": 68883240,
603
+ "recipe": "https://github.com/pytorch/vision/pull/912#issue-445437439",
604
+ "_metrics": {
605
+ "ImageNet-1K": {
606
+ "acc@1": 78.468,
607
+ "acc@5": 94.086,
608
+ }
609
+ },
610
+ "_ops": 11.398,
611
+ "_file_size": 131.82,
612
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
613
+ },
614
+ )
615
+ IMAGENET1K_V2 = Weights(
616
+ url="https://download.pytorch.org/models/wide_resnet50_2-9ba9bcbe.pth",
617
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
618
+ meta={
619
+ **_COMMON_META,
620
+ "num_params": 68883240,
621
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
622
+ "_metrics": {
623
+ "ImageNet-1K": {
624
+ "acc@1": 81.602,
625
+ "acc@5": 95.758,
626
+ }
627
+ },
628
+ "_ops": 11.398,
629
+ "_file_size": 263.124,
630
+ "_docs": """
631
+ These weights improve upon the results of the original paper by using TorchVision's `new training recipe
632
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
633
+ """,
634
+ },
635
+ )
636
+ DEFAULT = IMAGENET1K_V2
637
+
638
+
639
+ class Wide_ResNet101_2_Weights(WeightsEnum):
640
+ IMAGENET1K_V1 = Weights(
641
+ url="https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth",
642
+ transforms=partial(ImageClassification, crop_size=224),
643
+ meta={
644
+ **_COMMON_META,
645
+ "num_params": 126886696,
646
+ "recipe": "https://github.com/pytorch/vision/pull/912#issue-445437439",
647
+ "_metrics": {
648
+ "ImageNet-1K": {
649
+ "acc@1": 78.848,
650
+ "acc@5": 94.284,
651
+ }
652
+ },
653
+ "_ops": 22.753,
654
+ "_file_size": 242.896,
655
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
656
+ },
657
+ )
658
+ IMAGENET1K_V2 = Weights(
659
+ url="https://download.pytorch.org/models/wide_resnet101_2-d733dc28.pth",
660
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
661
+ meta={
662
+ **_COMMON_META,
663
+ "num_params": 126886696,
664
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
665
+ "_metrics": {
666
+ "ImageNet-1K": {
667
+ "acc@1": 82.510,
668
+ "acc@5": 96.020,
669
+ }
670
+ },
671
+ "_ops": 22.753,
672
+ "_file_size": 484.747,
673
+ "_docs": """
674
+ These weights improve upon the results of the original paper by using TorchVision's `new training recipe
675
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
676
+ """,
677
+ },
678
+ )
679
+ DEFAULT = IMAGENET1K_V2
680
+
681
+
682
+ @register_model()
683
+ @handle_legacy_interface(weights=("pretrained", ResNet18_Weights.IMAGENET1K_V1))
684
+ def resnet18(*, weights: Optional[ResNet18_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
685
+ """ResNet-18 from `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`__.
686
+
687
+ Args:
688
+ weights (:class:`~torchvision.models.ResNet18_Weights`, optional): The
689
+ pretrained weights to use. See
690
+ :class:`~torchvision.models.ResNet18_Weights` below for
691
+ more details, and possible values. By default, no pre-trained
692
+ weights are used.
693
+ progress (bool, optional): If True, displays a progress bar of the
694
+ download to stderr. Default is True.
695
+ **kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
696
+ base class. Please refer to the `source code
697
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
698
+ for more details about this class.
699
+
700
+ .. autoclass:: torchvision.models.ResNet18_Weights
701
+ :members:
702
+ """
703
+ weights = ResNet18_Weights.verify(weights)
704
+
705
+ return _resnet(BasicBlock, [2, 2, 2, 2], weights, progress, **kwargs)
706
+
707
+
708
+ @register_model()
709
+ @handle_legacy_interface(weights=("pretrained", ResNet34_Weights.IMAGENET1K_V1))
710
+ def resnet34(*, weights: Optional[ResNet34_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
711
+ """ResNet-34 from `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`__.
712
+
713
+ Args:
714
+ weights (:class:`~torchvision.models.ResNet34_Weights`, optional): The
715
+ pretrained weights to use. See
716
+ :class:`~torchvision.models.ResNet34_Weights` below for
717
+ more details, and possible values. By default, no pre-trained
718
+ weights are used.
719
+ progress (bool, optional): If True, displays a progress bar of the
720
+ download to stderr. Default is True.
721
+ **kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
722
+ base class. Please refer to the `source code
723
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
724
+ for more details about this class.
725
+
726
+ .. autoclass:: torchvision.models.ResNet34_Weights
727
+ :members:
728
+ """
729
+ weights = ResNet34_Weights.verify(weights)
730
+
731
+ return _resnet(BasicBlock, [3, 4, 6, 3], weights, progress, **kwargs)
732
+
733
+
734
+ @register_model()
735
+ @handle_legacy_interface(weights=("pretrained", ResNet50_Weights.IMAGENET1K_V1))
736
+ def resnet50(*, weights: Optional[ResNet50_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
737
+ """ResNet-50 from `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`__.
738
+
739
+ .. note::
740
+ The bottleneck of TorchVision places the stride for downsampling to the second 3x3
741
+ convolution while the original paper places it to the first 1x1 convolution.
742
+ This variant improves the accuracy and is known as `ResNet V1.5
743
+ <https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch>`_.
744
+
745
+ Args:
746
+ weights (:class:`~torchvision.models.ResNet50_Weights`, optional): The
747
+ pretrained weights to use. See
748
+ :class:`~torchvision.models.ResNet50_Weights` below for
749
+ more details, and possible values. By default, no pre-trained
750
+ weights are used.
751
+ progress (bool, optional): If True, displays a progress bar of the
752
+ download to stderr. Default is True.
753
+ **kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
754
+ base class. Please refer to the `source code
755
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
756
+ for more details about this class.
757
+
758
+ .. autoclass:: torchvision.models.ResNet50_Weights
759
+ :members:
760
+ """
761
+ weights = ResNet50_Weights.verify(weights)
762
+
763
+ return _resnet(Bottleneck, [3, 4, 6, 3], weights, progress, **kwargs)
764
+
765
+
766
+ @register_model()
767
+ @handle_legacy_interface(weights=("pretrained", ResNet101_Weights.IMAGENET1K_V1))
768
+ def resnet101(*, weights: Optional[ResNet101_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
769
+ """ResNet-101 from `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`__.
770
+
771
+ .. note::
772
+ The bottleneck of TorchVision places the stride for downsampling to the second 3x3
773
+ convolution while the original paper places it to the first 1x1 convolution.
774
+ This variant improves the accuracy and is known as `ResNet V1.5
775
+ <https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch>`_.
776
+
777
+ Args:
778
+ weights (:class:`~torchvision.models.ResNet101_Weights`, optional): The
779
+ pretrained weights to use. See
780
+ :class:`~torchvision.models.ResNet101_Weights` below for
781
+ more details, and possible values. By default, no pre-trained
782
+ weights are used.
783
+ progress (bool, optional): If True, displays a progress bar of the
784
+ download to stderr. Default is True.
785
+ **kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
786
+ base class. Please refer to the `source code
787
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
788
+ for more details about this class.
789
+
790
+ .. autoclass:: torchvision.models.ResNet101_Weights
791
+ :members:
792
+ """
793
+ weights = ResNet101_Weights.verify(weights)
794
+
795
+ return _resnet(Bottleneck, [3, 4, 23, 3], weights, progress, **kwargs)
796
+
797
+
798
+ @register_model()
799
+ @handle_legacy_interface(weights=("pretrained", ResNet152_Weights.IMAGENET1K_V1))
800
+ def resnet152(*, weights: Optional[ResNet152_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
801
+ """ResNet-152 from `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`__.
802
+
803
+ .. note::
804
+ The bottleneck of TorchVision places the stride for downsampling to the second 3x3
805
+ convolution while the original paper places it to the first 1x1 convolution.
806
+ This variant improves the accuracy and is known as `ResNet V1.5
807
+ <https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch>`_.
808
+
809
+ Args:
810
+ weights (:class:`~torchvision.models.ResNet152_Weights`, optional): The
811
+ pretrained weights to use. See
812
+ :class:`~torchvision.models.ResNet152_Weights` below for
813
+ more details, and possible values. By default, no pre-trained
814
+ weights are used.
815
+ progress (bool, optional): If True, displays a progress bar of the
816
+ download to stderr. Default is True.
817
+ **kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
818
+ base class. Please refer to the `source code
819
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
820
+ for more details about this class.
821
+
822
+ .. autoclass:: torchvision.models.ResNet152_Weights
823
+ :members:
824
+ """
825
+ weights = ResNet152_Weights.verify(weights)
826
+
827
+ return _resnet(Bottleneck, [3, 8, 36, 3], weights, progress, **kwargs)
828
+
829
+
830
+ @register_model()
831
+ @handle_legacy_interface(weights=("pretrained", ResNeXt50_32X4D_Weights.IMAGENET1K_V1))
832
+ def resnext50_32x4d(
833
+ *, weights: Optional[ResNeXt50_32X4D_Weights] = None, progress: bool = True, **kwargs: Any
834
+ ) -> ResNet:
835
+ """ResNeXt-50 32x4d model from
836
+ `Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_.
837
+
838
+ Args:
839
+ weights (:class:`~torchvision.models.ResNeXt50_32X4D_Weights`, optional): The
840
+ pretrained weights to use. See
841
+ :class:`~torchvision.models.ResNext50_32X4D_Weights` below for
842
+ more details, and possible values. By default, no pre-trained
843
+ weights are used.
844
+ progress (bool, optional): If True, displays a progress bar of the
845
+ download to stderr. Default is True.
846
+ **kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
847
+ base class. Please refer to the `source code
848
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
849
+ for more details about this class.
850
+ .. autoclass:: torchvision.models.ResNeXt50_32X4D_Weights
851
+ :members:
852
+ """
853
+ weights = ResNeXt50_32X4D_Weights.verify(weights)
854
+
855
+ _ovewrite_named_param(kwargs, "groups", 32)
856
+ _ovewrite_named_param(kwargs, "width_per_group", 4)
857
+ return _resnet(Bottleneck, [3, 4, 6, 3], weights, progress, **kwargs)
858
+
859
+
860
+ @register_model()
861
+ @handle_legacy_interface(weights=("pretrained", ResNeXt101_32X8D_Weights.IMAGENET1K_V1))
862
+ def resnext101_32x8d(
863
+ *, weights: Optional[ResNeXt101_32X8D_Weights] = None, progress: bool = True, **kwargs: Any
864
+ ) -> ResNet:
865
+ """ResNeXt-101 32x8d model from
866
+ `Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_.
867
+
868
+ Args:
869
+ weights (:class:`~torchvision.models.ResNeXt101_32X8D_Weights`, optional): The
870
+ pretrained weights to use. See
871
+ :class:`~torchvision.models.ResNeXt101_32X8D_Weights` below for
872
+ more details, and possible values. By default, no pre-trained
873
+ weights are used.
874
+ progress (bool, optional): If True, displays a progress bar of the
875
+ download to stderr. Default is True.
876
+ **kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
877
+ base class. Please refer to the `source code
878
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
879
+ for more details about this class.
880
+ .. autoclass:: torchvision.models.ResNeXt101_32X8D_Weights
881
+ :members:
882
+ """
883
+ weights = ResNeXt101_32X8D_Weights.verify(weights)
884
+
885
+ _ovewrite_named_param(kwargs, "groups", 32)
886
+ _ovewrite_named_param(kwargs, "width_per_group", 8)
887
+ return _resnet(Bottleneck, [3, 4, 23, 3], weights, progress, **kwargs)
888
+
889
+
890
+ @register_model()
891
+ @handle_legacy_interface(weights=("pretrained", ResNeXt101_64X4D_Weights.IMAGENET1K_V1))
892
+ def resnext101_64x4d(
893
+ *, weights: Optional[ResNeXt101_64X4D_Weights] = None, progress: bool = True, **kwargs: Any
894
+ ) -> ResNet:
895
+ """ResNeXt-101 64x4d model from
896
+ `Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_.
897
+
898
+ Args:
899
+ weights (:class:`~torchvision.models.ResNeXt101_64X4D_Weights`, optional): The
900
+ pretrained weights to use. See
901
+ :class:`~torchvision.models.ResNeXt101_64X4D_Weights` below for
902
+ more details, and possible values. By default, no pre-trained
903
+ weights are used.
904
+ progress (bool, optional): If True, displays a progress bar of the
905
+ download to stderr. Default is True.
906
+ **kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
907
+ base class. Please refer to the `source code
908
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
909
+ for more details about this class.
910
+ .. autoclass:: torchvision.models.ResNeXt101_64X4D_Weights
911
+ :members:
912
+ """
913
+ weights = ResNeXt101_64X4D_Weights.verify(weights)
914
+
915
+ _ovewrite_named_param(kwargs, "groups", 64)
916
+ _ovewrite_named_param(kwargs, "width_per_group", 4)
917
+ return _resnet(Bottleneck, [3, 4, 23, 3], weights, progress, **kwargs)
918
+
919
+
920
+ @register_model()
921
+ @handle_legacy_interface(weights=("pretrained", Wide_ResNet50_2_Weights.IMAGENET1K_V1))
922
+ def wide_resnet50_2(
923
+ *, weights: Optional[Wide_ResNet50_2_Weights] = None, progress: bool = True, **kwargs: Any
924
+ ) -> ResNet:
925
+ """Wide ResNet-50-2 model from
926
+ `Wide Residual Networks <https://arxiv.org/abs/1605.07146>`_.
927
+
928
+ The model is the same as ResNet except for the bottleneck number of channels
929
+ which is twice larger in every block. The number of channels in outer 1x1
930
+ convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
931
+ channels, and in Wide ResNet-50-2 has 2048-1024-2048.
932
+
933
+ Args:
934
+ weights (:class:`~torchvision.models.Wide_ResNet50_2_Weights`, optional): The
935
+ pretrained weights to use. See
936
+ :class:`~torchvision.models.Wide_ResNet50_2_Weights` below for
937
+ more details, and possible values. By default, no pre-trained
938
+ weights are used.
939
+ progress (bool, optional): If True, displays a progress bar of the
940
+ download to stderr. Default is True.
941
+ **kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
942
+ base class. Please refer to the `source code
943
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
944
+ for more details about this class.
945
+ .. autoclass:: torchvision.models.Wide_ResNet50_2_Weights
946
+ :members:
947
+ """
948
+ weights = Wide_ResNet50_2_Weights.verify(weights)
949
+
950
+ _ovewrite_named_param(kwargs, "width_per_group", 64 * 2)
951
+ return _resnet(Bottleneck, [3, 4, 6, 3], weights, progress, **kwargs)
952
+
953
+
954
+ @register_model()
955
+ @handle_legacy_interface(weights=("pretrained", Wide_ResNet101_2_Weights.IMAGENET1K_V1))
956
+ def wide_resnet101_2(
957
+ *, weights: Optional[Wide_ResNet101_2_Weights] = None, progress: bool = True, **kwargs: Any
958
+ ) -> ResNet:
959
+ """Wide ResNet-101-2 model from
960
+ `Wide Residual Networks <https://arxiv.org/abs/1605.07146>`_.
961
+
962
+ The model is the same as ResNet except for the bottleneck number of channels
963
+ which is twice larger in every block. The number of channels in outer 1x1
964
+ convolutions is the same, e.g. last block in ResNet-101 has 2048-512-2048
965
+ channels, and in Wide ResNet-101-2 has 2048-1024-2048.
966
+
967
+ Args:
968
+ weights (:class:`~torchvision.models.Wide_ResNet101_2_Weights`, optional): The
969
+ pretrained weights to use. See
970
+ :class:`~torchvision.models.Wide_ResNet101_2_Weights` below for
971
+ more details, and possible values. By default, no pre-trained
972
+ weights are used.
973
+ progress (bool, optional): If True, displays a progress bar of the
974
+ download to stderr. Default is True.
975
+ **kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``
976
+ base class. Please refer to the `source code
977
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py>`_
978
+ for more details about this class.
979
+ .. autoclass:: torchvision.models.Wide_ResNet101_2_Weights
980
+ :members:
981
+ """
982
+ weights = Wide_ResNet101_2_Weights.verify(weights)
983
+
984
+ _ovewrite_named_param(kwargs, "width_per_group", 64 * 2)
985
+ return _resnet(Bottleneck, [3, 4, 23, 3], weights, progress, **kwargs)
pllava/lib/python3.10/site-packages/torchvision/models/segmentation/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .deeplabv3 import *
2
+ from .fcn import *
3
+ from .lraspp import *
pllava/lib/python3.10/site-packages/torchvision/models/segmentation/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (242 Bytes). View file
 
pllava/lib/python3.10/site-packages/torchvision/models/segmentation/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (1.44 kB). View file