Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- llava_next/share/terminfo/2/2621-wl +0 -0
- llava_next/share/terminfo/2/2621a +0 -0
- llava_next/share/terminfo/e/elks-glasstty +0 -0
- llava_next/share/terminfo/e/ep40 +0 -0
- llava_next/share/terminfo/e/ep4080 +0 -0
- llava_next/share/terminfo/e/esprit-am +0 -0
- llava_next/share/terminfo/e/eterm +0 -0
- llava_next/share/terminfo/e/ex155 +0 -0
- llava_next/share/terminfo/m/mach +0 -0
- llava_next/share/terminfo/m/mach-gnu +0 -0
- llava_next/share/terminfo/m/masscomp1 +0 -0
- llava_next/share/terminfo/m/mdl110 +0 -0
- llava_next/share/terminfo/m/megatek +0 -0
- llava_next/share/terminfo/m/memhp +0 -0
- llava_next/share/terminfo/m/mgt +0 -0
- llava_next/share/terminfo/m/mgterm +0 -0
- llava_next/share/terminfo/m/microbee +0 -0
- llava_next/share/terminfo/m/microterm5 +0 -0
- llava_next/share/terminfo/m/mime2a-s +0 -0
- llava_next/share/terminfo/m/mime3a +0 -0
- llava_next/share/terminfo/m/mime3ax +0 -0
- llava_next/share/terminfo/m/mimei +0 -0
- llava_next/share/terminfo/m/minitel1-nb +0 -0
- llava_next/share/terminfo/m/minitel12-80 +0 -0
- llava_next/share/terminfo/m/minitel1b-80 +0 -0
- llava_next/share/terminfo/m/minitel2-80 +0 -0
- llava_next/share/terminfo/m/minix-3.0 +0 -0
- llava_next/share/terminfo/m/minix-old +0 -0
- llava_next/share/terminfo/m/mlterm2 +0 -0
- llava_next/share/terminfo/m/mm314 +0 -0
- llava_next/share/terminfo/m/mm340 +0 -0
- llava_next/share/terminfo/m/mod +0 -0
- llava_next/share/terminfo/m/modgraph2 +0 -0
- llava_next/share/terminfo/m/mouse-sun +0 -0
- llava_next/share/terminfo/m/mt-70 +0 -0
- llava_next/share/terminfo/m/mvterm +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/nn/__init__.py +20 -0
- parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/__init__.py +38 -0
- parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/functional.py +645 -0
- parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__init__.py +132 -0
- parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/batchnorm.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/conv.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/embedding_ops.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/linear.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/normalization.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/activation.py +303 -0
- parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/conv.py +946 -0
- parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/dropout.py +28 -0
- parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/embedding_ops.py +295 -0
- parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/functional_modules.py +250 -0
llava_next/share/terminfo/2/2621-wl
ADDED
|
Binary file (622 Bytes). View file
|
|
|
llava_next/share/terminfo/2/2621a
ADDED
|
Binary file (622 Bytes). View file
|
|
|
llava_next/share/terminfo/e/elks-glasstty
ADDED
|
Binary file (385 Bytes). View file
|
|
|
llava_next/share/terminfo/e/ep40
ADDED
|
Binary file (370 Bytes). View file
|
|
|
llava_next/share/terminfo/e/ep4080
ADDED
|
Binary file (370 Bytes). View file
|
|
|
llava_next/share/terminfo/e/esprit-am
ADDED
|
Binary file (496 Bytes). View file
|
|
|
llava_next/share/terminfo/e/eterm
ADDED
|
Binary file (908 Bytes). View file
|
|
|
llava_next/share/terminfo/e/ex155
ADDED
|
Binary file (554 Bytes). View file
|
|
|
llava_next/share/terminfo/m/mach
ADDED
|
Binary file (635 Bytes). View file
|
|
|
llava_next/share/terminfo/m/mach-gnu
ADDED
|
Binary file (1.07 kB). View file
|
|
|
llava_next/share/terminfo/m/masscomp1
ADDED
|
Binary file (482 Bytes). View file
|
|
|
llava_next/share/terminfo/m/mdl110
ADDED
|
Binary file (476 Bytes). View file
|
|
|
llava_next/share/terminfo/m/megatek
ADDED
|
Binary file (80 Bytes). View file
|
|
|
llava_next/share/terminfo/m/memhp
ADDED
|
Binary file (1.08 kB). View file
|
|
|
llava_next/share/terminfo/m/mgt
ADDED
|
Binary file (1.98 kB). View file
|
|
|
llava_next/share/terminfo/m/mgterm
ADDED
|
Binary file (1.14 kB). View file
|
|
|
llava_next/share/terminfo/m/microbee
ADDED
|
Binary file (475 Bytes). View file
|
|
|
llava_next/share/terminfo/m/microterm5
ADDED
|
Binary file (498 Bytes). View file
|
|
|
llava_next/share/terminfo/m/mime2a-s
ADDED
|
Binary file (492 Bytes). View file
|
|
|
llava_next/share/terminfo/m/mime3a
ADDED
|
Binary file (945 Bytes). View file
|
|
|
llava_next/share/terminfo/m/mime3ax
ADDED
|
Binary file (989 Bytes). View file
|
|
|
llava_next/share/terminfo/m/mimei
ADDED
|
Binary file (493 Bytes). View file
|
|
|
llava_next/share/terminfo/m/minitel1-nb
ADDED
|
Binary file (1.63 kB). View file
|
|
|
llava_next/share/terminfo/m/minitel12-80
ADDED
|
Binary file (1.83 kB). View file
|
|
|
llava_next/share/terminfo/m/minitel1b-80
ADDED
|
Binary file (1.93 kB). View file
|
|
|
llava_next/share/terminfo/m/minitel2-80
ADDED
|
Binary file (1.93 kB). View file
|
|
|
llava_next/share/terminfo/m/minix-3.0
ADDED
|
Binary file (1.14 kB). View file
|
|
|
llava_next/share/terminfo/m/minix-old
ADDED
|
Binary file (607 Bytes). View file
|
|
|
llava_next/share/terminfo/m/mlterm2
ADDED
|
Binary file (2.51 kB). View file
|
|
|
llava_next/share/terminfo/m/mm314
ADDED
|
Binary file (360 Bytes). View file
|
|
|
llava_next/share/terminfo/m/mm340
ADDED
|
Binary file (422 Bytes). View file
|
|
|
llava_next/share/terminfo/m/mod
ADDED
|
Binary file (1.14 kB). View file
|
|
|
llava_next/share/terminfo/m/modgraph2
ADDED
|
Binary file (598 Bytes). View file
|
|
|
llava_next/share/terminfo/m/mouse-sun
ADDED
|
Binary file (393 Bytes). View file
|
|
|
llava_next/share/terminfo/m/mt-70
ADDED
|
Binary file (842 Bytes). View file
|
|
|
llava_next/share/terminfo/m/mvterm
ADDED
|
Binary file (1.42 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/nn/__init__.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
# We are exposing all subpackages to the end-user.
|
| 3 |
+
# Because of possible inter-dependency, we want to avoid
|
| 4 |
+
# the cyclic imports, thus implementing lazy version
|
| 5 |
+
# as per https://peps.python.org/pep-0562/
|
| 6 |
+
|
| 7 |
+
import importlib
|
| 8 |
+
|
| 9 |
+
__all__ = [
|
| 10 |
+
"intrinsic",
|
| 11 |
+
"qat",
|
| 12 |
+
"quantizable",
|
| 13 |
+
"quantized",
|
| 14 |
+
"sparse",
|
| 15 |
+
]
|
| 16 |
+
|
| 17 |
+
def __getattr__(name):
|
| 18 |
+
if name in __all__:
|
| 19 |
+
return importlib.import_module("." + name, __name__)
|
| 20 |
+
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/__init__.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import functional
|
| 2 |
+
from .modules import * # noqa: F403
|
| 3 |
+
from .modules import MaxPool2d
|
| 4 |
+
|
| 5 |
+
__all__ = [
|
| 6 |
+
'BatchNorm2d',
|
| 7 |
+
'BatchNorm3d',
|
| 8 |
+
'Conv1d',
|
| 9 |
+
'Conv2d',
|
| 10 |
+
'Conv3d',
|
| 11 |
+
'ConvTranspose1d',
|
| 12 |
+
'ConvTranspose2d',
|
| 13 |
+
'ConvTranspose3d',
|
| 14 |
+
'DeQuantize',
|
| 15 |
+
'ELU',
|
| 16 |
+
'Embedding',
|
| 17 |
+
'EmbeddingBag',
|
| 18 |
+
'GroupNorm',
|
| 19 |
+
'Hardswish',
|
| 20 |
+
'InstanceNorm1d',
|
| 21 |
+
'InstanceNorm2d',
|
| 22 |
+
'InstanceNorm3d',
|
| 23 |
+
'LayerNorm',
|
| 24 |
+
'LeakyReLU',
|
| 25 |
+
'Linear',
|
| 26 |
+
'LSTM',
|
| 27 |
+
'MultiheadAttention',
|
| 28 |
+
'Quantize',
|
| 29 |
+
'ReLU6',
|
| 30 |
+
'Sigmoid',
|
| 31 |
+
'Softmax',
|
| 32 |
+
'Dropout',
|
| 33 |
+
'PReLU',
|
| 34 |
+
# Wrapper modules
|
| 35 |
+
'FloatFunctional',
|
| 36 |
+
'FXFloatFunctional',
|
| 37 |
+
'QFunctional',
|
| 38 |
+
]
|
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/functional.py
ADDED
|
@@ -0,0 +1,645 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
r""" Functional interface (quantized)."""
|
| 3 |
+
from typing import List, Optional
|
| 4 |
+
import warnings
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from torch import Tensor
|
| 8 |
+
from torch.nn.modules.utils import _pair, _triple
|
| 9 |
+
from torch.jit.annotations import BroadcastingList2
|
| 10 |
+
|
| 11 |
+
from .modules.utils import _pair_from_first
|
| 12 |
+
|
| 13 |
+
# Although some of the functions and docstrings are mirrored from the torch.nn,
|
| 14 |
+
# we want to have them here for future changes.
|
| 15 |
+
|
| 16 |
+
__all__ = [
|
| 17 |
+
"avg_pool2d",
|
| 18 |
+
"avg_pool3d",
|
| 19 |
+
"adaptive_avg_pool2d",
|
| 20 |
+
"adaptive_avg_pool3d",
|
| 21 |
+
"conv1d",
|
| 22 |
+
"conv2d",
|
| 23 |
+
"conv3d",
|
| 24 |
+
"interpolate",
|
| 25 |
+
"linear",
|
| 26 |
+
"max_pool1d",
|
| 27 |
+
"max_pool2d",
|
| 28 |
+
"celu",
|
| 29 |
+
"leaky_relu",
|
| 30 |
+
"hardtanh",
|
| 31 |
+
"hardswish",
|
| 32 |
+
"threshold",
|
| 33 |
+
"elu",
|
| 34 |
+
"hardsigmoid",
|
| 35 |
+
"clamp",
|
| 36 |
+
"upsample",
|
| 37 |
+
"upsample_bilinear",
|
| 38 |
+
"upsample_nearest",
|
| 39 |
+
]
|
| 40 |
+
|
| 41 |
+
def avg_pool2d(input, kernel_size, stride=None, padding=0, ceil_mode=False,
|
| 42 |
+
count_include_pad=True, divisor_override=None):
|
| 43 |
+
r"""
|
| 44 |
+
Applies 2D average-pooling operation in :math:`kH \times kW` regions by step size
|
| 45 |
+
:math:`sH \times sW` steps. The number of output features is equal to the number of
|
| 46 |
+
input planes.
|
| 47 |
+
|
| 48 |
+
.. note:: The input quantization parameters propagate to the output.
|
| 49 |
+
|
| 50 |
+
See :class:`~torch.ao.nn.quantized.AvgPool2d` for details and output shape.
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
input: quantized input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
|
| 54 |
+
kernel_size: size of the pooling region. Can be a single number or a
|
| 55 |
+
tuple `(kH, kW)`
|
| 56 |
+
stride: stride of the pooling operation. Can be a single number or a
|
| 57 |
+
tuple `(sH, sW)`. Default: :attr:`kernel_size`
|
| 58 |
+
padding: implicit zero paddings on both sides of the input. Can be a
|
| 59 |
+
single number or a tuple `(padH, padW)`. Default: 0
|
| 60 |
+
ceil_mode: when True, will use `ceil` instead of `floor` in the formula
|
| 61 |
+
to compute the output shape. Default: ``False``
|
| 62 |
+
count_include_pad: when True, will include the zero-padding in the
|
| 63 |
+
averaging calculation. Default: ``True``
|
| 64 |
+
divisor_override: if specified, it will be used as divisor, otherwise
|
| 65 |
+
size of the pooling region will be used. Default: None
|
| 66 |
+
"""
|
| 67 |
+
if not input.is_quantized:
|
| 68 |
+
raise ValueError("Input to 'quantized.avg_pool2d' must be quantized!")
|
| 69 |
+
return torch.nn.functional.avg_pool2d(input, kernel_size, stride, padding,
|
| 70 |
+
ceil_mode, count_include_pad,
|
| 71 |
+
divisor_override)
|
| 72 |
+
|
| 73 |
+
def avg_pool3d(input, kernel_size, stride=None, padding=0, ceil_mode=False,
|
| 74 |
+
count_include_pad=True, divisor_override=None):
|
| 75 |
+
r"""
|
| 76 |
+
Applies 3D average-pooling operation in :math:`kD \ times kH \times kW` regions by step size
|
| 77 |
+
:math:`sD \times sH \times sW` steps. The number of output features is equal to the number of
|
| 78 |
+
input planes.
|
| 79 |
+
|
| 80 |
+
.. note:: The input quantization parameters propagate to the output.
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
input: quantized input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
|
| 84 |
+
kernel_size: size of the pooling region. Can be a single number or a
|
| 85 |
+
tuple `(kD, kH, kW)`
|
| 86 |
+
stride: stride of the pooling operation. Can be a single number or a
|
| 87 |
+
tuple `(sD, sH, sW)`. Default: :attr:`kernel_size`
|
| 88 |
+
padding: implicit zero paddings on both sides of the input. Can be a
|
| 89 |
+
single number or a tuple `(padD, padH, padW)`. Default: 0
|
| 90 |
+
ceil_mode: when True, will use `ceil` instead of `floor` in the formula
|
| 91 |
+
to compute the output shape. Default: ``False``
|
| 92 |
+
count_include_pad: when True, will include the zero-padding in the
|
| 93 |
+
averaging calculation. Default: ``True``
|
| 94 |
+
divisor_override: if specified, it will be used as divisor, otherwise
|
| 95 |
+
size of the pooling region will be used. Default: None
|
| 96 |
+
"""
|
| 97 |
+
if not input.is_quantized:
|
| 98 |
+
raise ValueError("Input to 'quantized.avg_pool3d' must be quantized!")
|
| 99 |
+
return torch.nn.functional.avg_pool3d(input, kernel_size, stride, padding,
|
| 100 |
+
ceil_mode, count_include_pad,
|
| 101 |
+
divisor_override)
|
| 102 |
+
|
| 103 |
+
def adaptive_avg_pool2d(input: Tensor, output_size: BroadcastingList2[int]) -> Tensor:
|
| 104 |
+
r"""
|
| 105 |
+
Applies a 2D adaptive average pooling over a quantized input signal composed
|
| 106 |
+
of several quantized input planes.
|
| 107 |
+
|
| 108 |
+
.. note:: The input quantization parameters propagate to the output.
|
| 109 |
+
|
| 110 |
+
See :class:`~torch.ao.nn.quantized.AdaptiveAvgPool2d` for details and output shape.
|
| 111 |
+
|
| 112 |
+
Args:
|
| 113 |
+
output_size: the target output size (single integer or
|
| 114 |
+
double-integer tuple)
|
| 115 |
+
"""
|
| 116 |
+
if not input.is_quantized:
|
| 117 |
+
raise ValueError("Input to 'quantized.functional.adaptive_avg_pool2d' must be quantized!")
|
| 118 |
+
return torch.nn.functional.adaptive_avg_pool2d(input, output_size)
|
| 119 |
+
|
| 120 |
+
def adaptive_avg_pool3d(input: Tensor, output_size: BroadcastingList2[int]) -> Tensor:
|
| 121 |
+
r"""
|
| 122 |
+
Applies a 3D adaptive average pooling over a quantized input signal composed
|
| 123 |
+
of several quantized input planes.
|
| 124 |
+
|
| 125 |
+
.. note:: The input quantization parameters propagate to the output.
|
| 126 |
+
|
| 127 |
+
See :class:`~torch.ao.nn.quantized.AdaptiveAvgPool3d` for details and output shape.
|
| 128 |
+
|
| 129 |
+
Args:
|
| 130 |
+
output_size: the target output size (single integer or
|
| 131 |
+
double-integer tuple)
|
| 132 |
+
"""
|
| 133 |
+
if not input.is_quantized:
|
| 134 |
+
raise ValueError(
|
| 135 |
+
"Input to 'quantized.functional.adaptive_avg_pool3d' must be quantized!")
|
| 136 |
+
return torch.nn.functional.adaptive_avg_pool3d(input, output_size)
|
| 137 |
+
|
| 138 |
+
def conv1d(input, weight, bias,
|
| 139 |
+
stride=1, padding=0, dilation=1, groups=1,
|
| 140 |
+
padding_mode='zeros',
|
| 141 |
+
scale=1.0, zero_point=0,
|
| 142 |
+
dtype=torch.quint8):
|
| 143 |
+
r"""
|
| 144 |
+
Applies a 1D convolution over a quantized 1D input composed of several input
|
| 145 |
+
planes.
|
| 146 |
+
|
| 147 |
+
See :class:`~torch.ao.nn.quantized.Conv1d` for details and output shape.
|
| 148 |
+
|
| 149 |
+
Args:
|
| 150 |
+
input: quantized input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`
|
| 151 |
+
weight: quantized filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , iW)`
|
| 152 |
+
bias: **non-quantized** bias tensor of shape :math:`(\text{out\_channels})`. The tensor type must be `torch.float`.
|
| 153 |
+
stride: the stride of the convolving kernel. Can be a single number or a
|
| 154 |
+
tuple `(sW,)`. Default: 1
|
| 155 |
+
padding: implicit paddings on both sides of the input. Can be a
|
| 156 |
+
single number or a tuple `(padW,)`. Default: 0
|
| 157 |
+
dilation: the spacing between kernel elements. Can be a single number or
|
| 158 |
+
a tuple `(dW,)`. Default: 1
|
| 159 |
+
groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
|
| 160 |
+
number of groups. Default: 1
|
| 161 |
+
padding_mode: the padding mode to use. Only "zeros" is supported for quantized convolution at the moment. Default: "zeros"
|
| 162 |
+
scale: quantization scale for the output. Default: 1.0
|
| 163 |
+
zero_point: quantization zero_point for the output. Default: 0
|
| 164 |
+
dtype: quantization data type to use. Default: ``torch.quint8``
|
| 165 |
+
|
| 166 |
+
Examples::
|
| 167 |
+
|
| 168 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
|
| 169 |
+
>>> from torch.ao.nn.quantized import functional as qF
|
| 170 |
+
>>> filters = torch.randn(33, 16, 3, dtype=torch.float)
|
| 171 |
+
>>> inputs = torch.randn(20, 16, 50, dtype=torch.float)
|
| 172 |
+
>>> bias = torch.randn(33, dtype=torch.float)
|
| 173 |
+
>>>
|
| 174 |
+
>>> scale, zero_point = 1.0, 0
|
| 175 |
+
>>> dtype_inputs = torch.quint8
|
| 176 |
+
>>> dtype_filters = torch.qint8
|
| 177 |
+
>>>
|
| 178 |
+
>>> q_filters = torch.quantize_per_tensor(filters, scale, zero_point, dtype_filters)
|
| 179 |
+
>>> q_inputs = torch.quantize_per_tensor(inputs, scale, zero_point, dtype_inputs)
|
| 180 |
+
>>> qF.conv1d(q_inputs, q_filters, bias, padding=1, scale=scale, zero_point=zero_point)
|
| 181 |
+
""" # noqa: E501
|
| 182 |
+
if padding_mode != 'zeros':
|
| 183 |
+
raise NotImplementedError("Only zero-padding is supported!")
|
| 184 |
+
if input.dtype != torch.quint8:
|
| 185 |
+
raise NotImplementedError("Only torch.quint8 is supported for activation tensor!")
|
| 186 |
+
if weight.dtype != torch.qint8:
|
| 187 |
+
raise NotImplementedError("Only torch.qint8 is supported for weight tensor!")
|
| 188 |
+
if input.ndim != 3:
|
| 189 |
+
raise ValueError("Input shape must be `(N, C, L)`!")
|
| 190 |
+
stride = _pair_from_first(stride)
|
| 191 |
+
padding = _pair_from_first(padding)
|
| 192 |
+
dilation = _pair_from_first(dilation)
|
| 193 |
+
|
| 194 |
+
packed_params = torch.ops.quantized.conv1d_prepack(
|
| 195 |
+
weight, bias, stride, padding, dilation, groups)
|
| 196 |
+
return torch.ops.quantized.conv1d(input, packed_params, scale, zero_point)
|
| 197 |
+
|
| 198 |
+
def conv2d(input, weight, bias,
|
| 199 |
+
stride=1, padding=0, dilation=1, groups=1,
|
| 200 |
+
padding_mode='zeros',
|
| 201 |
+
scale=1.0, zero_point=0,
|
| 202 |
+
dtype=torch.quint8):
|
| 203 |
+
r"""
|
| 204 |
+
Applies a 2D convolution over a quantized 2D input composed of several input
|
| 205 |
+
planes.
|
| 206 |
+
|
| 207 |
+
See :class:`~torch.ao.nn.quantized.Conv2d` for details and output shape.
|
| 208 |
+
|
| 209 |
+
Args:
|
| 210 |
+
input: quantized input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
|
| 211 |
+
weight: quantized filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kH , kW)`
|
| 212 |
+
bias: **non-quantized** bias tensor of shape :math:`(\text{out\_channels})`. The tensor type must be `torch.float`.
|
| 213 |
+
stride: the stride of the convolving kernel. Can be a single number or a
|
| 214 |
+
tuple `(sH, sW)`. Default: 1
|
| 215 |
+
padding: implicit paddings on both sides of the input. Can be a
|
| 216 |
+
single number or a tuple `(padH, padW)`. Default: 0
|
| 217 |
+
dilation: the spacing between kernel elements. Can be a single number or
|
| 218 |
+
a tuple `(dH, dW)`. Default: 1
|
| 219 |
+
groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
|
| 220 |
+
number of groups. Default: 1
|
| 221 |
+
padding_mode: the padding mode to use. Only "zeros" is supported for quantized convolution at the moment. Default: "zeros"
|
| 222 |
+
scale: quantization scale for the output. Default: 1.0
|
| 223 |
+
zero_point: quantization zero_point for the output. Default: 0
|
| 224 |
+
dtype: quantization data type to use. Default: ``torch.quint8``
|
| 225 |
+
|
| 226 |
+
Examples::
|
| 227 |
+
|
| 228 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
|
| 229 |
+
>>> from torch.ao.nn.quantized import functional as qF
|
| 230 |
+
>>> filters = torch.randn(8, 4, 3, 3, dtype=torch.float)
|
| 231 |
+
>>> inputs = torch.randn(1, 4, 5, 5, dtype=torch.float)
|
| 232 |
+
>>> bias = torch.randn(8, dtype=torch.float)
|
| 233 |
+
>>>
|
| 234 |
+
>>> scale, zero_point = 1.0, 0
|
| 235 |
+
>>> dtype_inputs = torch.quint8
|
| 236 |
+
>>> dtype_filters = torch.qint8
|
| 237 |
+
>>>
|
| 238 |
+
>>> q_filters = torch.quantize_per_tensor(filters, scale, zero_point, dtype_filters)
|
| 239 |
+
>>> q_inputs = torch.quantize_per_tensor(inputs, scale, zero_point, dtype_inputs)
|
| 240 |
+
>>> qF.conv2d(q_inputs, q_filters, bias, padding=1, scale=scale, zero_point=zero_point)
|
| 241 |
+
""" # noqa: E501
|
| 242 |
+
if padding_mode != 'zeros':
|
| 243 |
+
raise NotImplementedError("Only zero-padding is supported!")
|
| 244 |
+
if input.dtype != torch.quint8:
|
| 245 |
+
raise NotImplementedError("Only torch.quint8 is supported for activation tensor!")
|
| 246 |
+
if weight.dtype != torch.qint8:
|
| 247 |
+
raise NotImplementedError("Only torch.qint8 is supported for weight tensor!")
|
| 248 |
+
if input.ndim != 4:
|
| 249 |
+
raise ValueError("Input shape must be `(N, C, H, W)`!")
|
| 250 |
+
stride = _pair(stride)
|
| 251 |
+
padding = _pair(padding)
|
| 252 |
+
dilation = _pair(dilation)
|
| 253 |
+
|
| 254 |
+
packed_params = torch.ops.quantized.conv2d_prepack(
|
| 255 |
+
weight, bias, stride, padding, dilation, groups)
|
| 256 |
+
return torch.ops.quantized.conv2d(input, packed_params, scale, zero_point)
|
| 257 |
+
|
| 258 |
+
def conv3d(input, weight, bias, stride=1, padding=0, dilation=1, groups=1,
|
| 259 |
+
padding_mode='zeros', scale=1.0, zero_point=0, dtype=torch.quint8):
|
| 260 |
+
r"""
|
| 261 |
+
Applies a 3D convolution over a quantized 3D input composed of several input
|
| 262 |
+
planes.
|
| 263 |
+
|
| 264 |
+
See :class:`~torch.ao.nn.quantized.Conv3d` for details and output shape.
|
| 265 |
+
|
| 266 |
+
Args:
|
| 267 |
+
input: quantized input tensor of shape
|
| 268 |
+
:math:`(\text{minibatch} , \text{in\_channels} , iD , iH , iW)`
|
| 269 |
+
weight: quantized filters of shape
|
| 270 |
+
:math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kD , kH , kW)`
|
| 271 |
+
bias: **non-quantized** bias tensor of shape
|
| 272 |
+
:math:`(\text{out\_channels})`. The tensor type must be `torch.float`.
|
| 273 |
+
stride: the stride of the convolving kernel. Can be a single number or a
|
| 274 |
+
tuple `(sD, sH, sW)`. Default: 1
|
| 275 |
+
padding: implicit paddings on both sides of the input. Can be a
|
| 276 |
+
single number or a tuple `(padD, padH, padW)`. Default: 0
|
| 277 |
+
dilation: the spacing between kernel elements. Can be a single number or
|
| 278 |
+
a tuple `(dD, dH, dW)`. Default: 1
|
| 279 |
+
groups: split input into groups, :math:`\text{in\_channels}` should be
|
| 280 |
+
divisible by the number of groups. Default: 1
|
| 281 |
+
padding_mode: the padding mode to use. Only "zeros" is supported for
|
| 282 |
+
quantized convolution at the moment. Default: "zeros"
|
| 283 |
+
scale: quantization scale for the output. Default: 1.0
|
| 284 |
+
zero_point: quantization zero_point for the output. Default: 0
|
| 285 |
+
dtype: quantization data type to use. Default: ``torch.quint8``
|
| 286 |
+
|
| 287 |
+
Examples::
|
| 288 |
+
|
| 289 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
|
| 290 |
+
>>> from torch.ao.nn.quantized import functional as qF
|
| 291 |
+
>>> filters = torch.randn(8, 4, 3, 3, 3, dtype=torch.float)
|
| 292 |
+
>>> inputs = torch.randn(1, 4, 5, 5, 5, dtype=torch.float)
|
| 293 |
+
>>> bias = torch.randn(8, dtype=torch.float)
|
| 294 |
+
>>>
|
| 295 |
+
>>> scale, zero_point = 1.0, 0
|
| 296 |
+
>>> dtype_inputs = torch.quint8
|
| 297 |
+
>>> dtype_filters = torch.qint8
|
| 298 |
+
>>>
|
| 299 |
+
>>> q_filters = torch.quantize_per_tensor(filters, scale, zero_point, dtype_filters)
|
| 300 |
+
>>> q_inputs = torch.quantize_per_tensor(inputs, scale, zero_point, dtype_inputs)
|
| 301 |
+
>>> qF.conv3d(q_inputs, q_filters, bias, padding=1, scale=scale, zero_point=zero_point)
|
| 302 |
+
""" # noqa: E501
|
| 303 |
+
if padding_mode != 'zeros':
|
| 304 |
+
raise NotImplementedError("Only zero-padding is supported!")
|
| 305 |
+
if input.dtype != torch.quint8:
|
| 306 |
+
raise NotImplementedError("Only torch.quint8 is supported for activation tensor!")
|
| 307 |
+
if weight.dtype != torch.qint8:
|
| 308 |
+
raise NotImplementedError("Only torch.qint8 is supported for weight tensor!")
|
| 309 |
+
if input.ndim != 5:
|
| 310 |
+
raise ValueError("Input shape must be `(N, C, D, H, W)`!")
|
| 311 |
+
stride = _triple(stride)
|
| 312 |
+
padding = _triple(padding)
|
| 313 |
+
dilation = _triple(dilation)
|
| 314 |
+
|
| 315 |
+
packed_params = torch.ops.quantized.conv3d_prepack(
|
| 316 |
+
weight, bias, stride, padding, dilation, groups)
|
| 317 |
+
return torch.ops.quantized.conv3d(input, packed_params, scale, zero_point)
|
| 318 |
+
|
| 319 |
+
def interpolate(input, size=None, scale_factor=None, mode='nearest', align_corners=None):
|
| 320 |
+
r"""Down/up samples the input to either the given :attr:`size` or the given
|
| 321 |
+
:attr:`scale_factor`
|
| 322 |
+
|
| 323 |
+
See :func:`torch.nn.functional.interpolate` for implementation details.
|
| 324 |
+
|
| 325 |
+
The input dimensions are interpreted in the form:
|
| 326 |
+
`mini-batch x channels x [optional depth] x [optional height] x width`.
|
| 327 |
+
|
| 328 |
+
.. note:: The input quantization parameters propagate to the output.
|
| 329 |
+
|
| 330 |
+
.. note:: Only 2D/3D input is supported for quantized inputs
|
| 331 |
+
|
| 332 |
+
.. note:: Only the following modes are supported for the quantized inputs:
|
| 333 |
+
|
| 334 |
+
- `bilinear`
|
| 335 |
+
- `nearest`
|
| 336 |
+
|
| 337 |
+
Args:
|
| 338 |
+
input (Tensor): the input tensor
|
| 339 |
+
size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
|
| 340 |
+
output spatial size.
|
| 341 |
+
scale_factor (float or Tuple[float]): multiplier for spatial size. Has to match input size if it is a tuple.
|
| 342 |
+
mode (str): algorithm used for upsampling:
|
| 343 |
+
``'nearest'`` | ``'bilinear'``
|
| 344 |
+
align_corners (bool, optional): Geometrically, we consider the pixels of the
|
| 345 |
+
input and output as squares rather than points.
|
| 346 |
+
If set to ``True``, the input and output tensors are aligned by the
|
| 347 |
+
center points of their corner pixels, preserving the values at the corner pixels.
|
| 348 |
+
If set to ``False``, the input and output tensors are aligned by the corner
|
| 349 |
+
points of their corner pixels, and the interpolation uses edge value padding
|
| 350 |
+
for out-of-boundary values, making this operation *independent* of input size
|
| 351 |
+
when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`
|
| 352 |
+
is ``'bilinear'``.
|
| 353 |
+
Default: ``False``
|
| 354 |
+
"""
|
| 355 |
+
if not input.is_quantized:
|
| 356 |
+
raise ValueError("Input to 'quantized.interpolate' must be quantized!")
|
| 357 |
+
return torch.nn.functional.interpolate(input, size, scale_factor, mode,
|
| 358 |
+
align_corners)
|
| 359 |
+
|
| 360 |
+
def linear(
|
| 361 |
+
input: Tensor, weight: Tensor, bias: Optional[Tensor] = None,
|
| 362 |
+
scale: Optional[float] = None, zero_point: Optional[int] = None
|
| 363 |
+
) -> Tensor:
|
| 364 |
+
r"""
|
| 365 |
+
Applies a linear transformation to the incoming quantized data:
|
| 366 |
+
:math:`y = xA^T + b`.
|
| 367 |
+
See :class:`~torch.ao.nn.quantized.Linear`
|
| 368 |
+
|
| 369 |
+
.. note::
|
| 370 |
+
|
| 371 |
+
Current implementation packs weights on every call, which has penalty on performance.
|
| 372 |
+
If you want to avoid the overhead, use :class:`~torch.ao.nn.quantized.Linear`.
|
| 373 |
+
|
| 374 |
+
Args:
|
| 375 |
+
input (Tensor): Quantized input of type `torch.quint8`
|
| 376 |
+
weight (Tensor): Quantized weight of type `torch.qint8`
|
| 377 |
+
bias (Tensor): None or fp32 bias of type `torch.float`
|
| 378 |
+
scale (double): output scale. If None, derived from the input scale
|
| 379 |
+
zero_point (long): output zero point. If None, derived from the input zero_point
|
| 380 |
+
|
| 381 |
+
Shape:
|
| 382 |
+
- Input: :math:`(N, *, in\_features)` where `*` means any number of
|
| 383 |
+
additional dimensions
|
| 384 |
+
- Weight: :math:`(out\_features, in\_features)`
|
| 385 |
+
- Bias: :math:`(out\_features)`
|
| 386 |
+
- Output: :math:`(N, *, out\_features)`
|
| 387 |
+
"""
|
| 388 |
+
if scale is None:
|
| 389 |
+
scale = input.q_scale()
|
| 390 |
+
if zero_point is None:
|
| 391 |
+
zero_point = input.q_zero_point()
|
| 392 |
+
_packed_params = torch.ops.quantized.linear_prepack(weight, bias)
|
| 393 |
+
return torch.ops.quantized.linear(input, _packed_params, scale, zero_point)
|
| 394 |
+
|
| 395 |
+
def max_pool1d(input, kernel_size, stride=None, padding=0, dilation=1,
|
| 396 |
+
ceil_mode=False, return_indices=False):
|
| 397 |
+
r"""Applies a 1D max pooling over a quantized input signal composed of
|
| 398 |
+
several quantized input planes.
|
| 399 |
+
|
| 400 |
+
.. note:: The input quantization parameters are propagated to the output.
|
| 401 |
+
|
| 402 |
+
See :class:`~torch.ao.nn.quantized.MaxPool1d` for details.
|
| 403 |
+
"""
|
| 404 |
+
if return_indices:
|
| 405 |
+
raise NotImplementedError("return_indices is not yet implemented!")
|
| 406 |
+
if stride is None:
|
| 407 |
+
stride = torch.jit.annotate(List[int], [])
|
| 408 |
+
return torch.nn.functional.max_pool1d(input, kernel_size, stride, padding,
|
| 409 |
+
dilation, ceil_mode=ceil_mode, return_indices=return_indices)
|
| 410 |
+
|
| 411 |
+
def max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1,
|
| 412 |
+
ceil_mode=False, return_indices=False):
|
| 413 |
+
r"""Applies a 2D max pooling over a quantized input signal composed of
|
| 414 |
+
several quantized input planes.
|
| 415 |
+
|
| 416 |
+
.. note:: The input quantization parameters are propagated to the output.
|
| 417 |
+
|
| 418 |
+
See :class:`~torch.ao.nn.quantized.MaxPool2d` for details.
|
| 419 |
+
"""
|
| 420 |
+
if return_indices:
|
| 421 |
+
raise NotImplementedError("return_indices is not yet implemented!")
|
| 422 |
+
if stride is None:
|
| 423 |
+
stride = torch.jit.annotate(List[int], [])
|
| 424 |
+
return torch.nn.functional.max_pool2d(input, kernel_size, stride, padding,
|
| 425 |
+
dilation, ceil_mode=ceil_mode, return_indices=return_indices)
|
| 426 |
+
|
| 427 |
+
def celu(input: Tensor, scale: float, zero_point: int, alpha: float = 1.) -> Tensor:
|
| 428 |
+
r"""celu(input, scale, zero_point, alpha=1.) -> Tensor
|
| 429 |
+
|
| 430 |
+
Applies the quantized CELU function element-wise.
|
| 431 |
+
|
| 432 |
+
.. math::
|
| 433 |
+
\text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x / \alpha) - 1))
|
| 434 |
+
|
| 435 |
+
Args:
|
| 436 |
+
input: quantized input
|
| 437 |
+
alpha: the :math:`\alpha` value for the CELU formulation. Default: 1.0
|
| 438 |
+
"""
|
| 439 |
+
if not input.is_quantized:
|
| 440 |
+
raise ValueError("Input to 'quantized.celu' must be quantized!")
|
| 441 |
+
return torch.ops.quantized.celu(input, scale, zero_point, alpha)
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
def leaky_relu(input: Tensor, negative_slope: float = 0.01, inplace: bool = False,
|
| 445 |
+
scale: Optional[float] = None, zero_point: Optional[int] = None):
|
| 446 |
+
r"""
|
| 447 |
+
Quantized version of the.
|
| 448 |
+
leaky_relu(input, negative_slope=0.01, inplace=False, scale, zero_point) -> Tensor
|
| 449 |
+
|
| 450 |
+
Applies element-wise,
|
| 451 |
+
:math:`\text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x)`
|
| 452 |
+
|
| 453 |
+
Args:
|
| 454 |
+
input: Quantized input
|
| 455 |
+
negative_slope: The slope of the negative input
|
| 456 |
+
inplace: Inplace modification of the input tensor
|
| 457 |
+
scale, zero_point: Scale and zero point of the output tensor.
|
| 458 |
+
|
| 459 |
+
See :class:`~torch.nn.LeakyReLU` for more details.
|
| 460 |
+
"""
|
| 461 |
+
if scale is not None and zero_point is not None:
|
| 462 |
+
assert not inplace, "Cannot rescale with `inplace`"
|
| 463 |
+
output = torch._empty_affine_quantized(
|
| 464 |
+
input.shape, scale=scale, zero_point=int(zero_point), dtype=input.dtype)
|
| 465 |
+
torch._C._nn.leaky_relu(input, negative_slope, out=output)
|
| 466 |
+
return output
|
| 467 |
+
if inplace:
|
| 468 |
+
result = torch._C._nn.leaky_relu_(input, negative_slope)
|
| 469 |
+
else:
|
| 470 |
+
result = torch._C._nn.leaky_relu(input, negative_slope)
|
| 471 |
+
return result
|
| 472 |
+
|
| 473 |
+
def hardtanh(input: Tensor, min_val: float = -1., max_val: float = 1., inplace: bool = False) -> Tensor:
|
| 474 |
+
r"""This is the quantized version of :func:`~torch.nn.functional.hardtanh`.
|
| 475 |
+
"""
|
| 476 |
+
if not input.is_quantized:
|
| 477 |
+
raise ValueError("Input to 'quantized.hardtanh' must be quantized!")
|
| 478 |
+
if inplace:
|
| 479 |
+
return torch._C._nn.hardtanh_(input, min_val, max_val)
|
| 480 |
+
return torch._C._nn.hardtanh(input, min_val, max_val)
|
| 481 |
+
|
| 482 |
+
def hardswish(input: Tensor, scale: float, zero_point: int) -> Tensor:
|
| 483 |
+
r"""This is the quantized version of :func:`~torch.nn.functional.hardswish`.
|
| 484 |
+
|
| 485 |
+
Args:
|
| 486 |
+
input: quantized input
|
| 487 |
+
scale: quantization scale of the output tensor
|
| 488 |
+
zero_point: quantization zero point of the output tensor
|
| 489 |
+
"""
|
| 490 |
+
if not input.is_quantized:
|
| 491 |
+
raise ValueError("Input to 'quantized.hardswish' must be quantized!")
|
| 492 |
+
return torch._ops.ops.quantized.hardswish(input, scale, zero_point)
|
| 493 |
+
|
| 494 |
+
def threshold(input: Tensor, threshold: float, value: float) -> Tensor:
|
| 495 |
+
r"""Applies the quantized version of the threshold function element-wise:
|
| 496 |
+
|
| 497 |
+
.. math::
|
| 498 |
+
x = \begin{cases}
|
| 499 |
+
x & \text{if~} x > \text{threshold} \\
|
| 500 |
+
\text{value} & \text{otherwise}
|
| 501 |
+
\end{cases}
|
| 502 |
+
|
| 503 |
+
See :class:`~torch.nn.Threshold` for more details.
|
| 504 |
+
"""
|
| 505 |
+
if not input.is_quantized:
|
| 506 |
+
raise ValueError("Input to 'quantized.threshold' must be quantized!")
|
| 507 |
+
if threshold is None:
|
| 508 |
+
raise ValueError("Input to 'threshold' must be specified!")
|
| 509 |
+
if value is None:
|
| 510 |
+
raise ValueError("Input to 'value' must be specified!")
|
| 511 |
+
return torch._ops.ops.quantized.threshold(input, threshold, value)
|
| 512 |
+
|
| 513 |
+
def elu(input: Tensor, scale: float, zero_point: int, alpha: float = 1.) -> Tensor:
|
| 514 |
+
r"""This is the quantized version of :func:`~torch.nn.functional.elu`.
|
| 515 |
+
|
| 516 |
+
Args:
|
| 517 |
+
input: quantized input
|
| 518 |
+
scale: quantization scale of the output tensor
|
| 519 |
+
zero_point: quantization zero point of the output tensor
|
| 520 |
+
alpha: the alpha constant
|
| 521 |
+
"""
|
| 522 |
+
if not input.is_quantized:
|
| 523 |
+
raise ValueError("Input to 'quantized.elu' must be quantized!")
|
| 524 |
+
return torch.ops.quantized.elu(input, scale, zero_point, alpha)
|
| 525 |
+
|
| 526 |
+
def hardsigmoid(input: Tensor, inplace: bool = False) -> Tensor:
|
| 527 |
+
r"""This is the quantized version of :func:`~torch.nn.functional.hardsigmoid`.
|
| 528 |
+
"""
|
| 529 |
+
if not input.is_quantized:
|
| 530 |
+
raise ValueError("Input to 'quantized.hardsigmoid' must be quantized!")
|
| 531 |
+
if inplace:
|
| 532 |
+
return torch._C._nn.hardsigmoid_(input) # type: ignore[attr-defined]
|
| 533 |
+
return torch._C._nn.hardsigmoid(input)
|
| 534 |
+
|
| 535 |
+
def clamp(input: Tensor, min_: float, max_: float) -> Tensor:
|
| 536 |
+
r"""float(input, min\_, max\_) -> Tensor
|
| 537 |
+
|
| 538 |
+
Applies the clamp function element-wise.
|
| 539 |
+
See :class:`~torch.ao.nn.quantized.clamp` for more details.
|
| 540 |
+
|
| 541 |
+
Args:
|
| 542 |
+
input: quantized input
|
| 543 |
+
min_: minimum value for clamping
|
| 544 |
+
max_: maximum value for clamping
|
| 545 |
+
"""
|
| 546 |
+
if not input.is_quantized:
|
| 547 |
+
raise ValueError("Input to 'quantized.clamp' must be quantized!")
|
| 548 |
+
return torch.clamp(input, min_, max_)
|
| 549 |
+
|
| 550 |
+
def upsample(input, size=None, scale_factor=None, mode='nearest', align_corners=None):
|
| 551 |
+
r"""Upsamples the input to either the given :attr:`size` or the given
|
| 552 |
+
:attr:`scale_factor`
|
| 553 |
+
|
| 554 |
+
.. warning::
|
| 555 |
+
This function is deprecated in favor of
|
| 556 |
+
:func:`torch.ao.nn.quantized.functional.interpolate`.
|
| 557 |
+
This is equivalent with ``nn.quantized.functional.interpolate(...)``.
|
| 558 |
+
|
| 559 |
+
See :func:`torch.nn.functional.interpolate` for implementation details.
|
| 560 |
+
|
| 561 |
+
The input dimensions are interpreted in the form:
|
| 562 |
+
`mini-batch x channels x [optional depth] x [optional height] x width`.
|
| 563 |
+
|
| 564 |
+
.. note:: The input quantization parameters propagate to the output.
|
| 565 |
+
|
| 566 |
+
.. note:: Only 2D input is supported for quantized inputs
|
| 567 |
+
|
| 568 |
+
.. note:: Only the following modes are supported for the quantized inputs:
|
| 569 |
+
|
| 570 |
+
- `bilinear`
|
| 571 |
+
- `nearest`
|
| 572 |
+
|
| 573 |
+
Args:
|
| 574 |
+
input (Tensor): quantized input tensor
|
| 575 |
+
size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
|
| 576 |
+
output spatial size.
|
| 577 |
+
scale_factor (float or Tuple[float]): multiplier for spatial size. Has to be an integer.
|
| 578 |
+
mode (str): algorithm used for upsampling:
|
| 579 |
+
``'nearest'`` | ``'bilinear'``
|
| 580 |
+
align_corners (bool, optional): Geometrically, we consider the pixels of the
|
| 581 |
+
input and output as squares rather than points.
|
| 582 |
+
If set to ``True``, the input and output tensors are aligned by the
|
| 583 |
+
center points of their corner pixels, preserving the values at the corner pixels.
|
| 584 |
+
If set to ``False``, the input and output tensors are aligned by the corner
|
| 585 |
+
points of their corner pixels, and the interpolation uses edge value padding
|
| 586 |
+
for out-of-boundary values, making this operation *independent* of input size
|
| 587 |
+
when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`
|
| 588 |
+
is ``'bilinear'``.
|
| 589 |
+
Default: ``False``
|
| 590 |
+
|
| 591 |
+
.. warning::
|
| 592 |
+
With ``align_corners = True``, the linearly interpolating modes
|
| 593 |
+
(`bilinear`) don't proportionally align the
|
| 594 |
+
output and input pixels, and thus the output values can depend on the
|
| 595 |
+
input size. This was the default behavior for these modes up to version
|
| 596 |
+
0.3.1. Since then, the default behavior is ``align_corners = False``.
|
| 597 |
+
See :class:`~torch.nn.Upsample` for concrete examples on how this
|
| 598 |
+
affects the outputs.
|
| 599 |
+
"""
|
| 600 |
+
warnings.warn("nn.quantized.functional.upsample is deprecated. Use nn.quantized.functional.interpolate instead.")
|
| 601 |
+
return interpolate(input, size, scale_factor, mode, align_corners)
|
| 602 |
+
|
| 603 |
+
def upsample_bilinear(input, size=None, scale_factor=None):
|
| 604 |
+
r"""Upsamples the input, using bilinear upsampling.
|
| 605 |
+
|
| 606 |
+
.. warning::
|
| 607 |
+
This function is deprecated in favor of
|
| 608 |
+
:func:`torch.ao.nn.quantized.functional.interpolate`.
|
| 609 |
+
This is equivalent with
|
| 610 |
+
``nn.quantized.functional.interpolate(..., mode='bilinear', align_corners=True)``.
|
| 611 |
+
|
| 612 |
+
.. note:: The input quantization parameters propagate to the output.
|
| 613 |
+
|
| 614 |
+
.. note:: Only 2D inputs are supported
|
| 615 |
+
|
| 616 |
+
Args:
|
| 617 |
+
input (Tensor): quantized input
|
| 618 |
+
size (int or Tuple[int, int]): output spatial size.
|
| 619 |
+
scale_factor (int or Tuple[int, int]): multiplier for spatial size
|
| 620 |
+
"""
|
| 621 |
+
# DeprecationWarning is ignored by default
|
| 622 |
+
warnings.warn("nn.quantized.functional.upsample_bilinear is deprecated. Use nn.quantized.functional.interpolate instead.")
|
| 623 |
+
return interpolate(input, size, scale_factor, mode='bilinear', align_corners=True)
|
| 624 |
+
|
| 625 |
+
def upsample_nearest(input, size=None, scale_factor=None):
|
| 626 |
+
r"""Upsamples the input, using nearest neighbours' pixel values.
|
| 627 |
+
|
| 628 |
+
.. warning::
|
| 629 |
+
This function is deprecated in favor of
|
| 630 |
+
:func:`torch.ao.nn.quantized.functional.interpolate`.
|
| 631 |
+
This is equivalent with ``nn.quantized.functional.interpolate(..., mode='nearest')``.
|
| 632 |
+
|
| 633 |
+
.. note:: The input quantization parameters propagate to the output.
|
| 634 |
+
|
| 635 |
+
.. note:: Only 2D inputs are supported
|
| 636 |
+
|
| 637 |
+
Args:
|
| 638 |
+
input (Tensor): quantized input
|
| 639 |
+
size (int or Tuple[int, int] or Tuple[int, int, int]): output spatial
|
| 640 |
+
size.
|
| 641 |
+
scale_factor (int): multiplier for spatial size. Has to be an integer.
|
| 642 |
+
"""
|
| 643 |
+
# DeprecationWarning is ignored by default
|
| 644 |
+
warnings.warn("nn.quantized.functional.upsample_nearest is deprecated. Use nn.quantized.functional.interpolate instead.")
|
| 645 |
+
return interpolate(input, size, scale_factor, mode='nearest')
|
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__init__.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
# The quantized modules use `torch.nn` and `torch.ao.nn.quantizable`
|
| 5 |
+
# packages. However, the `quantizable` package uses "lazy imports"
|
| 6 |
+
# to avoid circular dependency.
|
| 7 |
+
# Hence we need to include it here to make sure it is resolved before
|
| 8 |
+
# they are used in the modules.
|
| 9 |
+
import torch.ao.nn.quantizable
|
| 10 |
+
|
| 11 |
+
from torch.nn.modules.pooling import MaxPool2d
|
| 12 |
+
|
| 13 |
+
from .activation import ReLU6, Hardswish, ELU, LeakyReLU, Sigmoid, Softmax, MultiheadAttention, PReLU
|
| 14 |
+
from .dropout import Dropout
|
| 15 |
+
from .batchnorm import BatchNorm2d, BatchNorm3d
|
| 16 |
+
from .normalization import LayerNorm, GroupNorm, InstanceNorm1d, \
|
| 17 |
+
InstanceNorm2d, InstanceNorm3d
|
| 18 |
+
from .conv import Conv1d, Conv2d, Conv3d
|
| 19 |
+
from .conv import ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
|
| 20 |
+
from .linear import Linear
|
| 21 |
+
from .embedding_ops import Embedding, EmbeddingBag
|
| 22 |
+
from .rnn import LSTM
|
| 23 |
+
|
| 24 |
+
from .functional_modules import FloatFunctional, FXFloatFunctional, QFunctional
|
| 25 |
+
|
| 26 |
+
__all__ = [
|
| 27 |
+
'BatchNorm2d',
|
| 28 |
+
'BatchNorm3d',
|
| 29 |
+
'Conv1d',
|
| 30 |
+
'Conv2d',
|
| 31 |
+
'Conv3d',
|
| 32 |
+
'ConvTranspose1d',
|
| 33 |
+
'ConvTranspose2d',
|
| 34 |
+
'ConvTranspose3d',
|
| 35 |
+
'DeQuantize',
|
| 36 |
+
'ELU',
|
| 37 |
+
'Embedding',
|
| 38 |
+
'EmbeddingBag',
|
| 39 |
+
'GroupNorm',
|
| 40 |
+
'Hardswish',
|
| 41 |
+
'InstanceNorm1d',
|
| 42 |
+
'InstanceNorm2d',
|
| 43 |
+
'InstanceNorm3d',
|
| 44 |
+
'LayerNorm',
|
| 45 |
+
'LeakyReLU',
|
| 46 |
+
'Linear',
|
| 47 |
+
'LSTM',
|
| 48 |
+
'MultiheadAttention',
|
| 49 |
+
'Quantize',
|
| 50 |
+
'ReLU6',
|
| 51 |
+
'Sigmoid',
|
| 52 |
+
'Softmax',
|
| 53 |
+
'Dropout',
|
| 54 |
+
'PReLU',
|
| 55 |
+
# Wrapper modules
|
| 56 |
+
'FloatFunctional',
|
| 57 |
+
'FXFloatFunctional',
|
| 58 |
+
'QFunctional',
|
| 59 |
+
]
|
| 60 |
+
|
| 61 |
+
class Quantize(torch.nn.Module):
|
| 62 |
+
r"""Quantizes an incoming tensor
|
| 63 |
+
|
| 64 |
+
Args:
|
| 65 |
+
`scale`: scale of the output Quantized Tensor
|
| 66 |
+
`zero_point`: zero_point of output Quantized Tensor
|
| 67 |
+
`dtype`: data type of output Quantized Tensor
|
| 68 |
+
`factory_kwargs`: Dictionary of kwargs used for configuring initialization
|
| 69 |
+
of internal buffers. Currently, `device` and `dtype` are supported.
|
| 70 |
+
Example: `factory_kwargs={'device': 'cuda', 'dtype': torch.float64}`
|
| 71 |
+
will initialize internal buffers as type `torch.float64` on the current CUDA device.
|
| 72 |
+
Note that `dtype` only applies to floating-point buffers.
|
| 73 |
+
|
| 74 |
+
Examples::
|
| 75 |
+
>>> t = torch.tensor([[1., -1.], [1., -1.]])
|
| 76 |
+
>>> scale, zero_point, dtype = 1.0, 2, torch.qint8
|
| 77 |
+
>>> qm = Quantize(scale, zero_point, dtype)
|
| 78 |
+
>>> # xdoctest: +SKIP
|
| 79 |
+
>>> qt = qm(t)
|
| 80 |
+
>>> print(qt)
|
| 81 |
+
tensor([[ 1., -1.],
|
| 82 |
+
[ 1., -1.]], size=(2, 2), dtype=torch.qint8, scale=1.0, zero_point=2)
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
scale: torch.Tensor
|
| 86 |
+
zero_point: torch.Tensor
|
| 87 |
+
|
| 88 |
+
def __init__(self, scale, zero_point, dtype, factory_kwargs=None):
|
| 89 |
+
factory_kwargs = torch.nn.factory_kwargs(factory_kwargs)
|
| 90 |
+
super().__init__()
|
| 91 |
+
self.register_buffer('scale', torch.tensor([scale], **factory_kwargs))
|
| 92 |
+
self.register_buffer('zero_point',
|
| 93 |
+
torch.tensor([zero_point], dtype=torch.long,
|
| 94 |
+
**{k: v for k, v in factory_kwargs.items() if k != 'dtype'}))
|
| 95 |
+
self.dtype = dtype
|
| 96 |
+
|
| 97 |
+
def forward(self, X):
|
| 98 |
+
return torch.quantize_per_tensor(X, float(self.scale),
|
| 99 |
+
int(self.zero_point), self.dtype)
|
| 100 |
+
|
| 101 |
+
@staticmethod
|
| 102 |
+
def from_float(mod, use_precomputed_fake_quant=False):
|
| 103 |
+
assert hasattr(mod, 'activation_post_process')
|
| 104 |
+
scale, zero_point = mod.activation_post_process.calculate_qparams()
|
| 105 |
+
return Quantize(scale.float().item(), zero_point.long().item(), mod.activation_post_process.dtype)
|
| 106 |
+
|
| 107 |
+
def extra_repr(self):
|
| 108 |
+
return f'scale={self.scale}, zero_point={self.zero_point}, dtype={self.dtype}'
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
class DeQuantize(torch.nn.Module):
|
| 112 |
+
r"""Dequantizes an incoming tensor
|
| 113 |
+
|
| 114 |
+
Examples::
|
| 115 |
+
>>> input = torch.tensor([[1., -1.], [1., -1.]])
|
| 116 |
+
>>> scale, zero_point, dtype = 1.0, 2, torch.qint8
|
| 117 |
+
>>> qm = Quantize(scale, zero_point, dtype)
|
| 118 |
+
>>> # xdoctest: +SKIP
|
| 119 |
+
>>> quantized_input = qm(input)
|
| 120 |
+
>>> dqm = DeQuantize()
|
| 121 |
+
>>> dequantized = dqm(quantized_input)
|
| 122 |
+
>>> print(dequantized)
|
| 123 |
+
tensor([[ 1., -1.],
|
| 124 |
+
[ 1., -1.]], dtype=torch.float32)
|
| 125 |
+
"""
|
| 126 |
+
|
| 127 |
+
def forward(self, Xq):
|
| 128 |
+
return Xq.dequantize()
|
| 129 |
+
|
| 130 |
+
@staticmethod
|
| 131 |
+
def from_float(mod, use_precomputed_fake_quant=False):
|
| 132 |
+
return DeQuantize()
|
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/batchnorm.cpython-310.pyc
ADDED
|
Binary file (4.01 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/conv.cpython-310.pyc
ADDED
|
Binary file (31.3 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/embedding_ops.cpython-310.pyc
ADDED
|
Binary file (11.1 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/linear.cpython-310.pyc
ADDED
|
Binary file (9.63 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/normalization.cpython-310.pyc
ADDED
|
Binary file (6.85 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/activation.py
ADDED
|
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
from warnings import warn
|
| 4 |
+
__all__ = [
|
| 5 |
+
"ReLU6",
|
| 6 |
+
"Hardswish",
|
| 7 |
+
"ELU",
|
| 8 |
+
"LeakyReLU",
|
| 9 |
+
"Sigmoid",
|
| 10 |
+
"Softmax",
|
| 11 |
+
"MultiheadAttention",
|
| 12 |
+
"PReLU"
|
| 13 |
+
]
|
| 14 |
+
|
| 15 |
+
class ReLU6(torch.nn.ReLU):
|
| 16 |
+
r"""Applies the element-wise function:
|
| 17 |
+
|
| 18 |
+
:math:`\text{ReLU6}(x) = \min(\max(x_0, x), q(6))`, where :math:`x_0` is the
|
| 19 |
+
zero_point, and :math:`q(6)` is the quantized representation of number 6.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
inplace: can optionally do the operation in-place. Default: ``False``
|
| 23 |
+
|
| 24 |
+
Shape:
|
| 25 |
+
- Input: :math:`(N, *)` where `*` means, any number of additional
|
| 26 |
+
dimensions
|
| 27 |
+
- Output: :math:`(N, *)`, same shape as the input
|
| 28 |
+
|
| 29 |
+
.. image:: ../scripts/activation_images/ReLU6.png
|
| 30 |
+
|
| 31 |
+
Examples::
|
| 32 |
+
|
| 33 |
+
>>> m = nn.quantized.ReLU6()
|
| 34 |
+
>>> input = torch.randn(2)
|
| 35 |
+
>>> # xdoctest: +SKIP
|
| 36 |
+
>>> input = torch.quantize_per_tensor(input, 1.0, 0, dtype=torch.qint32)
|
| 37 |
+
>>> output = m(input)
|
| 38 |
+
"""
|
| 39 |
+
def __init__(self, inplace=False):
|
| 40 |
+
super().__init__(inplace)
|
| 41 |
+
self.inplace = inplace
|
| 42 |
+
|
| 43 |
+
def forward(self, input):
|
| 44 |
+
return torch.ops.quantized.relu6(input, self.inplace)
|
| 45 |
+
|
| 46 |
+
def _get_name(self):
|
| 47 |
+
return 'QuantizedReLU6'
|
| 48 |
+
|
| 49 |
+
@staticmethod
|
| 50 |
+
def from_float(mod, use_precomputed_fake_quant=False):
|
| 51 |
+
return ReLU6(mod.inplace)
|
| 52 |
+
|
| 53 |
+
class Hardswish(torch.nn.Hardswish):
|
| 54 |
+
r"""This is the quantized version of :class:`~torch.nn.Hardswish`.
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
scale: quantization scale of the output tensor
|
| 58 |
+
zero_point: quantization zero point of the output tensor
|
| 59 |
+
"""
|
| 60 |
+
def __init__(self, scale, zero_point, device=None, dtype=None):
|
| 61 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
| 62 |
+
super().__init__()
|
| 63 |
+
self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))
|
| 64 |
+
self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))
|
| 65 |
+
|
| 66 |
+
def forward(self, input):
|
| 67 |
+
return torch.ops.quantized.hardswish(input, self.scale, self.zero_point)
|
| 68 |
+
|
| 69 |
+
def _get_name(self):
|
| 70 |
+
return 'QuantizedHardswish'
|
| 71 |
+
|
| 72 |
+
@staticmethod
|
| 73 |
+
def from_float(mod, use_precomputed_fake_quant=False):
|
| 74 |
+
scale, zero_point = mod.activation_post_process.calculate_qparams()
|
| 75 |
+
return Hardswish(float(scale), int(zero_point))
|
| 76 |
+
|
| 77 |
+
@classmethod
|
| 78 |
+
def from_reference(cls, mod, scale, zero_point):
|
| 79 |
+
return cls(float(scale), int(zero_point))
|
| 80 |
+
|
| 81 |
+
class ELU(torch.nn.ELU):
|
| 82 |
+
r"""This is the quantized equivalent of :class:`~torch.nn.ELU`.
|
| 83 |
+
|
| 84 |
+
Args:
|
| 85 |
+
scale: quantization scale of the output tensor
|
| 86 |
+
zero_point: quantization zero point of the output tensor
|
| 87 |
+
alpha: the alpha constant
|
| 88 |
+
"""
|
| 89 |
+
def __init__(self, scale, zero_point, alpha=1.):
|
| 90 |
+
super().__init__(alpha)
|
| 91 |
+
self.scale = scale
|
| 92 |
+
self.zero_point = zero_point
|
| 93 |
+
|
| 94 |
+
def forward(self, input):
|
| 95 |
+
return torch.ao.nn.quantized.functional.elu(
|
| 96 |
+
input, self.scale, self.zero_point, self.alpha)
|
| 97 |
+
|
| 98 |
+
def _get_name(self):
|
| 99 |
+
return 'QuantizedELU'
|
| 100 |
+
|
| 101 |
+
@staticmethod
|
| 102 |
+
def from_float(mod, use_precomputed_fake_quant=False):
|
| 103 |
+
scale, zero_point = mod.activation_post_process.calculate_qparams()
|
| 104 |
+
return ELU(float(scale), int(zero_point), mod.alpha)
|
| 105 |
+
|
| 106 |
+
@classmethod
|
| 107 |
+
def from_reference(cls, mod, scale, zero_point):
|
| 108 |
+
return cls(float(scale), int(zero_point), mod.alpha)
|
| 109 |
+
|
| 110 |
+
class LeakyReLU(torch.nn.LeakyReLU):
|
| 111 |
+
r"""This is the quantized equivalent of :class:`~torch.nn.LeakyReLU`.
|
| 112 |
+
|
| 113 |
+
Args:
|
| 114 |
+
scale: quantization scale of the output tensor
|
| 115 |
+
zero_point: quantization zero point of the output tensor
|
| 116 |
+
negative_slope: Controls the angle of the negative slope. Default: 1e-2
|
| 117 |
+
"""
|
| 118 |
+
def __init__(self, scale: float, zero_point: int, negative_slope: float = 1e-2,
|
| 119 |
+
inplace: bool = False, device=None, dtype=None) -> None:
|
| 120 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
| 121 |
+
super().__init__(negative_slope, inplace)
|
| 122 |
+
self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))
|
| 123 |
+
self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))
|
| 124 |
+
|
| 125 |
+
def forward(self, input):
|
| 126 |
+
return torch.ops.quantized.leaky_relu(
|
| 127 |
+
input, self.negative_slope, self.inplace, self.scale, self.zero_point)
|
| 128 |
+
|
| 129 |
+
def _get_name(self):
|
| 130 |
+
return 'QuantizedLeakyReLU'
|
| 131 |
+
|
| 132 |
+
@classmethod
|
| 133 |
+
def from_float(cls, mod, use_precomputed_fake_quant=False):
|
| 134 |
+
scale, zero_point = mod.activation_post_process.calculate_qparams()
|
| 135 |
+
return cls(float(scale), int(zero_point), mod.negative_slope, mod.inplace)
|
| 136 |
+
|
| 137 |
+
@classmethod
|
| 138 |
+
def from_reference(cls, mod, scale, zero_point):
|
| 139 |
+
return cls(float(scale), int(zero_point), mod.negative_slope, mod.inplace)
|
| 140 |
+
|
| 141 |
+
class Sigmoid(torch.nn.Sigmoid):
|
| 142 |
+
r"""This is the quantized equivalent of :class:`~torch.nn.Sigmoid`.
|
| 143 |
+
|
| 144 |
+
Args:
|
| 145 |
+
scale: quantization scale of the output tensor
|
| 146 |
+
zero_point: quantization zero point of the output tensor
|
| 147 |
+
"""
|
| 148 |
+
|
| 149 |
+
def __init__(self, output_scale: float, output_zero_point: int):
|
| 150 |
+
super().__init__()
|
| 151 |
+
self.output_scale = output_scale
|
| 152 |
+
self.output_zero_point = output_zero_point
|
| 153 |
+
|
| 154 |
+
def forward(self, input):
|
| 155 |
+
return torch.ops.quantized.sigmoid(input, self.output_scale, self.output_zero_point)
|
| 156 |
+
|
| 157 |
+
@classmethod
|
| 158 |
+
def from_float(cls, mod, use_precomputed_fake_quant=False):
|
| 159 |
+
output_scale, output_zero_point = mod.activation_post_process.calculate_qparams()
|
| 160 |
+
return cls(float(output_scale), int(output_zero_point))
|
| 161 |
+
|
| 162 |
+
class Softmax(torch.nn.Softmax):
|
| 163 |
+
r"""This is the quantized version of :class:`~torch.nn.Softmax`.
|
| 164 |
+
|
| 165 |
+
Args:
|
| 166 |
+
dim: A dimension along which Softmax will be computed (so every slice along dim will sum to 1).
|
| 167 |
+
scale: quantization scale of the output tensor
|
| 168 |
+
zero_point: quantization zero point of the output tensor
|
| 169 |
+
"""
|
| 170 |
+
def __init__(self, dim=None, scale=1.0, zero_point=0):
|
| 171 |
+
super().__init__()
|
| 172 |
+
self.dim = dim
|
| 173 |
+
self.scale = scale
|
| 174 |
+
self.zero_point = zero_point
|
| 175 |
+
|
| 176 |
+
def forward(self, input):
|
| 177 |
+
dim = self.dim
|
| 178 |
+
if dim is None:
|
| 179 |
+
stacklevel = 3
|
| 180 |
+
# Note: adding the mypy ignore on _get_softmax_dim seems less bad
|
| 181 |
+
# than making `_get_softmax_dim` an official API.
|
| 182 |
+
dim = torch.nn.functional._get_softmax_dim( # type: ignore[attr-defined]
|
| 183 |
+
"softmax", input.dim(), stacklevel)
|
| 184 |
+
return torch.ops.quantized.softmax(
|
| 185 |
+
input, dim, self.scale, self.zero_point)
|
| 186 |
+
|
| 187 |
+
def _get_name(self):
|
| 188 |
+
return 'QuantizedSoftmax'
|
| 189 |
+
|
| 190 |
+
@staticmethod
|
| 191 |
+
def from_float(mod, use_precomputed_fake_quant=False):
|
| 192 |
+
scale, zero_point = mod.activation_post_process.calculate_qparams()
|
| 193 |
+
return Softmax(mod.dim, float(scale), int(zero_point))
|
| 194 |
+
|
| 195 |
+
@classmethod
|
| 196 |
+
def from_reference(cls, mod, scale, zero_point):
|
| 197 |
+
return cls(mod.dim, float(scale), int(zero_point))
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
class MultiheadAttention(torch.ao.nn.quantizable.MultiheadAttention):
|
| 201 |
+
_FLOAT_MODULE = torch.ao.nn.quantizable.MultiheadAttention
|
| 202 |
+
|
| 203 |
+
def _get_name(self):
|
| 204 |
+
return "QuantizedMultiheadAttention"
|
| 205 |
+
|
| 206 |
+
@classmethod
|
| 207 |
+
def from_float(cls, other):
|
| 208 |
+
# The whole flow is float -> observed -> quantized
|
| 209 |
+
# This class does observed -> quantized only
|
| 210 |
+
raise NotImplementedError("It looks like you are trying to convert a "
|
| 211 |
+
"non-observed MHA module. Please, see "
|
| 212 |
+
"the examples on quantizable MHAs.")
|
| 213 |
+
|
| 214 |
+
@classmethod
|
| 215 |
+
def from_observed(cls, other):
|
| 216 |
+
converted = torch.ao.quantization.convert(other, mapping=None,
|
| 217 |
+
inplace=False,
|
| 218 |
+
remove_qconfig=True,
|
| 219 |
+
convert_custom_config_dict=None)
|
| 220 |
+
converted.__class__ = cls
|
| 221 |
+
# Remove the parameters for the bias_k and bias_v to quantize them
|
| 222 |
+
# TODO: This is a potential source of accuracy drop.
|
| 223 |
+
# quantized cat takes the scale and zp of the first
|
| 224 |
+
# element, which might lose the precision in the bias_k
|
| 225 |
+
# and the bias_v (which are cat'ed with k/v being first).
|
| 226 |
+
if converted.bias_k is not None:
|
| 227 |
+
bias_k = converted._parameters.pop('bias_k')
|
| 228 |
+
sc, zp = torch._choose_qparams_per_tensor(bias_k,
|
| 229 |
+
reduce_range=False)
|
| 230 |
+
bias_k = torch.quantize_per_tensor(bias_k, sc, zp, torch.quint8)
|
| 231 |
+
setattr(converted, 'bias_k', bias_k) # noqa: B010
|
| 232 |
+
|
| 233 |
+
if converted.bias_v is not None:
|
| 234 |
+
bias_v = converted._parameters.pop('bias_v')
|
| 235 |
+
sc, zp = torch._choose_qparams_per_tensor(bias_k, # type: ignore[possibly-undefined]
|
| 236 |
+
reduce_range=False)
|
| 237 |
+
bias_v = torch.quantize_per_tensor(bias_v, sc, zp, torch.quint8)
|
| 238 |
+
setattr(converted, 'bias_v', bias_v) # noqa: B010
|
| 239 |
+
|
| 240 |
+
del converted.in_proj_weight
|
| 241 |
+
del converted.in_proj_bias
|
| 242 |
+
|
| 243 |
+
return converted
|
| 244 |
+
|
| 245 |
+
class PReLU(torch.nn.Module):
|
| 246 |
+
r"""This is the quantized equivalent of :class:`~torch.nn.PReLU`.
|
| 247 |
+
|
| 248 |
+
Args:
|
| 249 |
+
scale: quantization scale of the output tensor
|
| 250 |
+
zero_point: quantization zero point of the output tensor
|
| 251 |
+
num_parameters: number of parameters: 1, or the number of channels at input. Default: 1
|
| 252 |
+
"""
|
| 253 |
+
def __init__(self, output_scale: float, output_zero_point: int,
|
| 254 |
+
num_parameters: int = 1) -> None:
|
| 255 |
+
super().__init__()
|
| 256 |
+
self.num_parameters = num_parameters
|
| 257 |
+
self.scale = output_scale
|
| 258 |
+
self.zero_point = output_zero_point
|
| 259 |
+
w = torch.randn(num_parameters, dtype=torch.float)
|
| 260 |
+
qw = torch.quantize_per_tensor(w, scale=1.0, zero_point=0, dtype=torch.quint8)
|
| 261 |
+
self.set_weight(qw)
|
| 262 |
+
|
| 263 |
+
def set_weight(self, w: torch.Tensor) -> None:
|
| 264 |
+
self.weight = w
|
| 265 |
+
|
| 266 |
+
def forward(self, input: torch.Tensor) -> torch.Tensor:
|
| 267 |
+
return torch.ops.quantized.prelu(input, self.weight, self.scale, self.zero_point)
|
| 268 |
+
|
| 269 |
+
def _get_name(self):
|
| 270 |
+
return 'QuantizedPReLU'
|
| 271 |
+
|
| 272 |
+
@classmethod
|
| 273 |
+
def from_float(cls, mod, use_precomputed_fake_quant=False):
|
| 274 |
+
scale, zero_point = mod.activation_post_process.calculate_qparams()
|
| 275 |
+
qprelu = cls(float(scale), int(zero_point), mod.num_parameters)
|
| 276 |
+
float_wt = mod.weight.float()
|
| 277 |
+
observer = mod.qconfig.weight()
|
| 278 |
+
observer(float_wt)
|
| 279 |
+
if observer.dtype != torch.quint8:
|
| 280 |
+
warn(
|
| 281 |
+
f"PReLU's weight observer should have dtype quint8 but got {observer.dtype}"
|
| 282 |
+
)
|
| 283 |
+
wt_scale, wt_zp = observer.calculate_qparams()
|
| 284 |
+
qweight = torch.quantize_per_tensor(
|
| 285 |
+
float_wt, float(wt_scale), int(wt_zp), torch.quint8)
|
| 286 |
+
qprelu.set_weight(qweight)
|
| 287 |
+
return qprelu
|
| 288 |
+
|
| 289 |
+
@classmethod
|
| 290 |
+
def from_reference(cls, mod, scale, zero_point):
|
| 291 |
+
qprelu = cls(float(scale), int(zero_point), mod.num_parameters)
|
| 292 |
+
float_wt = mod.weight.float()
|
| 293 |
+
observer = mod.qconfig.weight()
|
| 294 |
+
observer(float_wt)
|
| 295 |
+
if observer.dtype != torch.quint8:
|
| 296 |
+
warn(
|
| 297 |
+
f"PReLU's weight observer should have dtype quint8 but got {observer.dtype}"
|
| 298 |
+
)
|
| 299 |
+
wt_scale, wt_zp = observer.calculate_qparams()
|
| 300 |
+
qweight = torch.quantize_per_tensor(
|
| 301 |
+
float_wt, float(wt_scale), int(wt_zp), torch.quint8)
|
| 302 |
+
qprelu.set_weight(qweight)
|
| 303 |
+
return qprelu
|
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/conv.py
ADDED
|
@@ -0,0 +1,946 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
r"""Quantized convolution modules."""
|
| 3 |
+
|
| 4 |
+
from typing import Optional, List, TypeVar
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.nn as nn
|
| 8 |
+
import torch.nn.functional as F
|
| 9 |
+
import torch.ao.nn.intrinsic as nni
|
| 10 |
+
import torch.ao.nn.intrinsic.qat as nniqat
|
| 11 |
+
|
| 12 |
+
from torch._ops import ops
|
| 13 |
+
from torch.nn.common_types import _size_1_t
|
| 14 |
+
from torch.nn.modules.utils import _single, _pair, _triple
|
| 15 |
+
from torch.nn.utils import fuse_conv_bn_weights
|
| 16 |
+
|
| 17 |
+
from .utils import _quantize_weight, WeightedQuantizedModule
|
| 18 |
+
|
| 19 |
+
__all__ = ['Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d']
|
| 20 |
+
|
| 21 |
+
_SUPPORTED_PADDING = {
|
| 22 |
+
'zeros',
|
| 23 |
+
'reflect'
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def _reverse_repeat_padding(padding: List[int]) -> List[int]:
|
| 28 |
+
_reversed_padding_repeated_twice: List[int] = []
|
| 29 |
+
N = len(padding)
|
| 30 |
+
for idx in range(N):
|
| 31 |
+
for _ in range(2):
|
| 32 |
+
_reversed_padding_repeated_twice.append(padding[N - idx - 1])
|
| 33 |
+
return _reversed_padding_repeated_twice
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class _ConvNd(WeightedQuantizedModule):
|
| 37 |
+
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
|
| 38 |
+
padding=0, dilation=1, groups=1, bias=True,
|
| 39 |
+
padding_mode='zeros', device=None, dtype=None):
|
| 40 |
+
# All subclasses have this signature - See PR #49702s
|
| 41 |
+
raise NotImplementedError
|
| 42 |
+
|
| 43 |
+
def _init(self, in_channels, out_channels, kernel_size, stride,
|
| 44 |
+
padding, dilation,
|
| 45 |
+
transposed, output_padding,
|
| 46 |
+
groups, bias,
|
| 47 |
+
padding_mode='zeros',
|
| 48 |
+
device=None,
|
| 49 |
+
dtype=None) -> None:
|
| 50 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
| 51 |
+
super().__init__()
|
| 52 |
+
|
| 53 |
+
if in_channels % groups != 0:
|
| 54 |
+
raise ValueError('in_channels must be divisible by groups')
|
| 55 |
+
if out_channels % groups != 0:
|
| 56 |
+
raise ValueError('out_channels must be divisible by groups')
|
| 57 |
+
self.in_channels = in_channels
|
| 58 |
+
self.out_channels = out_channels
|
| 59 |
+
self.kernel_size = kernel_size
|
| 60 |
+
self.stride = stride
|
| 61 |
+
self.padding = padding
|
| 62 |
+
self.dilation = dilation
|
| 63 |
+
self.transposed = transposed
|
| 64 |
+
self.output_padding = output_padding
|
| 65 |
+
self.groups = groups
|
| 66 |
+
if padding_mode not in _SUPPORTED_PADDING:
|
| 67 |
+
raise ValueError(f"'padding_mode' {padding_mode} is not supported by quantized convolution")
|
| 68 |
+
self.padding_mode = padding_mode
|
| 69 |
+
# Initialize as NCHW. set_weight will internally transpose to NHWC.
|
| 70 |
+
if self.transposed:
|
| 71 |
+
weight_shape = [in_channels, out_channels // self.groups]
|
| 72 |
+
else:
|
| 73 |
+
weight_shape = [out_channels, in_channels // self.groups]
|
| 74 |
+
qweight = torch._empty_affine_quantized(
|
| 75 |
+
weight_shape + list(kernel_size),
|
| 76 |
+
scale=1, zero_point=0, dtype=torch.qint8,
|
| 77 |
+
**{k: v for k, v in factory_kwargs.items() if k != 'dtype'})
|
| 78 |
+
bias_float = (
|
| 79 |
+
torch.zeros(out_channels, dtype=torch.float,
|
| 80 |
+
**{k: v for k, v in factory_kwargs.items() if k != 'dtype'}) if bias else None)
|
| 81 |
+
|
| 82 |
+
self.set_weight_bias(qweight, bias_float)
|
| 83 |
+
self.scale = 1.0
|
| 84 |
+
self.zero_point = 0
|
| 85 |
+
|
| 86 |
+
def set_weight_bias(self, qweight, bias_float):
|
| 87 |
+
raise NotImplementedError
|
| 88 |
+
|
| 89 |
+
def bias(self):
|
| 90 |
+
raise NotImplementedError
|
| 91 |
+
|
| 92 |
+
def _weight_bias(self):
|
| 93 |
+
raise NotImplementedError
|
| 94 |
+
|
| 95 |
+
def extra_repr(self):
|
| 96 |
+
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
|
| 97 |
+
', stride={stride}, scale={scale}, zero_point={zero_point}')
|
| 98 |
+
if self.padding != (0,) * len(self.padding):
|
| 99 |
+
s += ', padding={padding}'
|
| 100 |
+
if self.dilation != (1,) * len(self.dilation):
|
| 101 |
+
s += ', dilation={dilation}'
|
| 102 |
+
if self.output_padding != (0,) * len(self.output_padding):
|
| 103 |
+
s += ', output_padding={output_padding}'
|
| 104 |
+
if self.groups != 1:
|
| 105 |
+
s += ', groups={groups}'
|
| 106 |
+
if self.bias() is None:
|
| 107 |
+
s += ', bias=False'
|
| 108 |
+
return s.format(**self.__dict__)
|
| 109 |
+
|
| 110 |
+
# ===== Serialization methods =====
|
| 111 |
+
# The special consideration here is that we have to unpack the weights into
|
| 112 |
+
# their regular QTensor form for serialization. Packed weights should not
|
| 113 |
+
# live outside the process in which they were created, rather they should be
|
| 114 |
+
# derived from the QTensor weight.
|
| 115 |
+
# self
|
| 116 |
+
# |--- weight : Tensor
|
| 117 |
+
# |--- bias : Tensor
|
| 118 |
+
#
|
| 119 |
+
# TODO: maybe change to this when https://github.com/pytorch/pytorch/pull/32958 is landed
|
| 120 |
+
# self
|
| 121 |
+
# |--- _packed_params : Conv2dPackedParamsBase or Conv3dPackedParamsBase
|
| 122 |
+
def _save_to_state_dict(self, destination, prefix, keep_vars):
|
| 123 |
+
super()._save_to_state_dict(destination, prefix, keep_vars)
|
| 124 |
+
(w, b) = self._weight_bias()
|
| 125 |
+
destination[prefix + 'weight'] = w
|
| 126 |
+
destination[prefix + 'bias'] = b
|
| 127 |
+
destination[prefix + 'scale'] = torch.tensor(self.scale)
|
| 128 |
+
destination[prefix + 'zero_point'] = torch.tensor(self.zero_point)
|
| 129 |
+
|
| 130 |
+
@torch.jit.export
|
| 131 |
+
def __getstate__(self):
|
| 132 |
+
(w, b) = self._weight_bias()
|
| 133 |
+
return (
|
| 134 |
+
self.in_channels,
|
| 135 |
+
self.out_channels,
|
| 136 |
+
self.kernel_size,
|
| 137 |
+
self.stride,
|
| 138 |
+
self.padding,
|
| 139 |
+
self.dilation,
|
| 140 |
+
self.transposed,
|
| 141 |
+
self.output_padding,
|
| 142 |
+
self.groups,
|
| 143 |
+
self.padding_mode,
|
| 144 |
+
w,
|
| 145 |
+
b,
|
| 146 |
+
self.scale,
|
| 147 |
+
self.zero_point,
|
| 148 |
+
self.training
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
# ===== Deserialization methods =====
|
| 152 |
+
# Counterpart to the serialization methods, we must pack the serialized
|
| 153 |
+
# QTensor weight into its packed format for use by the FBGEMM ops.
|
| 154 |
+
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
|
| 155 |
+
missing_keys, unexpected_keys, error_msgs):
|
| 156 |
+
self.set_weight_bias(
|
| 157 |
+
state_dict[prefix + 'weight'], state_dict[prefix + 'bias'])
|
| 158 |
+
state_dict.pop(prefix + 'weight')
|
| 159 |
+
state_dict.pop(prefix + 'bias')
|
| 160 |
+
self.scale = float(state_dict[prefix + 'scale'])
|
| 161 |
+
state_dict.pop(prefix + 'scale')
|
| 162 |
+
self.zero_point = int(state_dict[prefix + 'zero_point'])
|
| 163 |
+
state_dict.pop(prefix + 'zero_point')
|
| 164 |
+
super()._load_from_state_dict(
|
| 165 |
+
state_dict, prefix, local_metadata, False, missing_keys,
|
| 166 |
+
unexpected_keys, error_msgs)
|
| 167 |
+
|
| 168 |
+
@torch.jit.export
|
| 169 |
+
def __setstate__(self, state):
|
| 170 |
+
self.in_channels = state[0]
|
| 171 |
+
self.out_channels = state[1]
|
| 172 |
+
self.kernel_size = state[2]
|
| 173 |
+
self.stride = state[3]
|
| 174 |
+
self.padding = state[4]
|
| 175 |
+
self.dilation = state[5]
|
| 176 |
+
self.transposed = state[6]
|
| 177 |
+
self.output_padding = state[7]
|
| 178 |
+
self.groups = state[8]
|
| 179 |
+
self.padding_mode = state[9]
|
| 180 |
+
self.set_weight_bias(state[10], state[11])
|
| 181 |
+
self.scale = state[12]
|
| 182 |
+
self.zero_point = state[13]
|
| 183 |
+
self.training = state[14]
|
| 184 |
+
|
| 185 |
+
def __deepcopy__(self, memo):
|
| 186 |
+
new_instance = type(self).__new__(type(self))
|
| 187 |
+
torch.nn.Module.__init__(new_instance)
|
| 188 |
+
state = self.__getstate__()
|
| 189 |
+
new_instance.__setstate__(state)
|
| 190 |
+
return new_instance
|
| 191 |
+
|
| 192 |
+
def __copy__(self):
|
| 193 |
+
return self.__deepcopy__({})
|
| 194 |
+
|
| 195 |
+
@classmethod
|
| 196 |
+
def get_qconv(cls, mod, activation_post_process, weight_post_process=None):
|
| 197 |
+
r"""Creates a qconv object and returns it.
|
| 198 |
+
"""
|
| 199 |
+
if weight_post_process is None:
|
| 200 |
+
weight_post_process = mod.qconfig.weight()
|
| 201 |
+
weight_post_process(mod.weight)
|
| 202 |
+
assert weight_post_process.dtype == torch.qint8, \
|
| 203 |
+
'Weight observer must have a dtype of qint8'
|
| 204 |
+
qweight = _quantize_weight(mod.weight.float(), weight_post_process)
|
| 205 |
+
# the __init__ call used is the one from derived classes and not the one from _ConvNd
|
| 206 |
+
qconv = cls(mod.in_channels, mod.out_channels, mod.kernel_size,
|
| 207 |
+
mod.stride, mod.padding, mod.dilation, mod.groups,
|
| 208 |
+
mod.bias is not None, mod.padding_mode)
|
| 209 |
+
qconv.set_weight_bias(qweight, mod.bias)
|
| 210 |
+
if activation_post_process is None or activation_post_process.dtype == torch.float:
|
| 211 |
+
return qconv # dynamic quantization doesn't need scale/zero_point
|
| 212 |
+
else:
|
| 213 |
+
act_scale, act_zp = activation_post_process.calculate_qparams()
|
| 214 |
+
qconv.scale = float(act_scale)
|
| 215 |
+
qconv.zero_point = int(act_zp)
|
| 216 |
+
return qconv
|
| 217 |
+
|
| 218 |
+
@staticmethod
|
| 219 |
+
def from_float(cls, mod, use_precomputed_fake_quant=False):
|
| 220 |
+
if hasattr(mod, "weight_fake_quant"):
|
| 221 |
+
# assert type(mod) == cls.__QAT_MODULE, " nnq." + cls.__name__ + \
|
| 222 |
+
# ".from_float only works for " + cls.__QAT_MODULE.__name__
|
| 223 |
+
if type(mod) == cls._NNIQAT_CONV_BN_MODULE:
|
| 224 |
+
mod.weight, mod.bias = fuse_conv_bn_weights(
|
| 225 |
+
mod.weight, mod.bias, mod.bn.running_mean, mod.bn.running_var,
|
| 226 |
+
mod.bn.eps, mod.bn.weight, mod.bn.bias)
|
| 227 |
+
assert hasattr(mod, "activation_post_process"), \
|
| 228 |
+
"Input QAT module must have observer attached"
|
| 229 |
+
weight_post_process = mod.weight_fake_quant
|
| 230 |
+
activation_post_process = mod.activation_post_process
|
| 231 |
+
else:
|
| 232 |
+
assert type(mod) == cls._FLOAT_MODULE, \
|
| 233 |
+
" nnq." + cls.__name__ + ".from_float only works for " + \
|
| 234 |
+
cls._FLOAT_MODULE.__name__ + " but got:" + str(type(mod))
|
| 235 |
+
assert hasattr(mod, "qconfig"), \
|
| 236 |
+
"Input float module must have qconfig defined."
|
| 237 |
+
activation_post_process = None if not hasattr(
|
| 238 |
+
mod, "activation_post_process") else mod.activation_post_process
|
| 239 |
+
if type(mod) in [cls._NNI_CONV_RELU_MODULE, cls._NNI_CONV_ADD_MODULE, cls._NNI_CONV_ADD_RELU_MODULE]:
|
| 240 |
+
mod = mod[0]
|
| 241 |
+
weight_post_process = mod.qconfig.weight()
|
| 242 |
+
return cls.get_qconv(mod, activation_post_process, weight_post_process)
|
| 243 |
+
|
| 244 |
+
@classmethod
|
| 245 |
+
def from_reference(cls, ref_qconv, output_scale, output_zero_point):
|
| 246 |
+
r"""Create a (fbgemm/qnnpack) quantized module from a reference quantized module
|
| 247 |
+
Args:
|
| 248 |
+
ref_qconv (Module): a reference quantized module, either produced by torch.ao.quantization
|
| 249 |
+
utilities or provided by the user
|
| 250 |
+
output_scale (float): scale for output Tensor
|
| 251 |
+
output_zero_point (int): zero point for output Tensor
|
| 252 |
+
"""
|
| 253 |
+
qconv = cls(
|
| 254 |
+
ref_qconv.in_channels,
|
| 255 |
+
ref_qconv.out_channels,
|
| 256 |
+
ref_qconv.kernel_size, # type: ignore[arg-type]
|
| 257 |
+
ref_qconv.stride, # type: ignore[arg-type]
|
| 258 |
+
ref_qconv.padding, # type: ignore[arg-type]
|
| 259 |
+
ref_qconv.dilation, # type: ignore[arg-type]
|
| 260 |
+
ref_qconv.groups,
|
| 261 |
+
ref_qconv.bias is not None, # type: ignore[arg-type]
|
| 262 |
+
ref_qconv.padding_mode,
|
| 263 |
+
device=ref_qconv.weight.device,
|
| 264 |
+
dtype=ref_qconv.weight.dtype)
|
| 265 |
+
qweight = ref_qconv.get_quantized_weight()
|
| 266 |
+
qconv.set_weight_bias(qweight, ref_qconv.bias)
|
| 267 |
+
qconv.scale = float(output_scale)
|
| 268 |
+
qconv.zero_point = int(output_zero_point)
|
| 269 |
+
return qconv
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
class Conv1d(_ConvNd):
|
| 273 |
+
r"""Applies a 1D convolution over a quantized input signal composed of
|
| 274 |
+
several quantized input planes.
|
| 275 |
+
|
| 276 |
+
For details on input arguments, parameters, and implementation see
|
| 277 |
+
:class:`~torch.nn.Conv1d`.
|
| 278 |
+
|
| 279 |
+
.. note::
|
| 280 |
+
Only `zeros` is supported for the :attr:`padding_mode` argument.
|
| 281 |
+
|
| 282 |
+
.. note::
|
| 283 |
+
Only `torch.quint8` is supported for the input data type.
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
Attributes:
|
| 287 |
+
weight (Tensor): packed tensor derived from the learnable weight
|
| 288 |
+
parameter.
|
| 289 |
+
scale (Tensor): scalar for the output scale
|
| 290 |
+
zero_point (Tensor): scalar for the output zero point
|
| 291 |
+
|
| 292 |
+
See :class:`~torch.nn.Conv1d` for other attributes.
|
| 293 |
+
|
| 294 |
+
Examples::
|
| 295 |
+
|
| 296 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
|
| 297 |
+
>>> m = nn.quantized.Conv1d(16, 33, 3, stride=2)
|
| 298 |
+
>>> input = torch.randn(20, 16, 100)
|
| 299 |
+
>>> # quantize input to quint8
|
| 300 |
+
>>> # xdoctest: +SKIP
|
| 301 |
+
>>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0,
|
| 302 |
+
... dtype=torch.quint8)
|
| 303 |
+
>>> output = m(q_input)
|
| 304 |
+
|
| 305 |
+
"""
|
| 306 |
+
|
| 307 |
+
_FLOAT_MODULE = nn.Conv1d
|
| 308 |
+
_NNIQAT_CONV_BN_MODULE = nniqat.ConvBn1d
|
| 309 |
+
_NNI_CONV_RELU_MODULE = nni.ConvReLU1d
|
| 310 |
+
_NNI_CONV_ADD_MODULE: None = None
|
| 311 |
+
_NNI_CONV_ADD_RELU_MODULE: None = None
|
| 312 |
+
|
| 313 |
+
def __init__(self,
|
| 314 |
+
in_channels: int,
|
| 315 |
+
out_channels: int,
|
| 316 |
+
kernel_size: _size_1_t,
|
| 317 |
+
stride: _size_1_t = 1,
|
| 318 |
+
padding: _size_1_t = 0,
|
| 319 |
+
dilation: _size_1_t = 1,
|
| 320 |
+
groups: int = 1,
|
| 321 |
+
bias: bool = True,
|
| 322 |
+
padding_mode: str = 'zeros',
|
| 323 |
+
device=None,
|
| 324 |
+
dtype=None):
|
| 325 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
| 326 |
+
kernel_size = _single(kernel_size)
|
| 327 |
+
stride = _single(stride)
|
| 328 |
+
padding = padding if isinstance(padding, str) else _single(padding)
|
| 329 |
+
dilation = _single(dilation)
|
| 330 |
+
|
| 331 |
+
# Subclasses of _ConvNd needs to call _init rather than __init__. See
|
| 332 |
+
# discussion on PR #49702
|
| 333 |
+
super()._init(
|
| 334 |
+
in_channels, out_channels, kernel_size, stride, padding, dilation,
|
| 335 |
+
False, _single(0), groups, bias, padding_mode, **factory_kwargs)
|
| 336 |
+
|
| 337 |
+
def _get_name(self):
|
| 338 |
+
return 'QuantizedConv1d'
|
| 339 |
+
|
| 340 |
+
def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
|
| 341 |
+
if self.padding_mode == 'zeros':
|
| 342 |
+
self._packed_params = torch.ops.quantized.conv1d_prepack(
|
| 343 |
+
w, b, self.stride, self.padding, self.dilation, self.groups)
|
| 344 |
+
else:
|
| 345 |
+
self._packed_params = torch.ops.quantized.conv1d_prepack(
|
| 346 |
+
w, b, self.stride, _pair(0), self.dilation,
|
| 347 |
+
self.groups)
|
| 348 |
+
|
| 349 |
+
def _weight_bias(self):
|
| 350 |
+
w, b = torch.ops.quantized.conv1d_unpack(self._packed_params)
|
| 351 |
+
return w, b
|
| 352 |
+
|
| 353 |
+
def weight(self):
|
| 354 |
+
return self._weight_bias()[0]
|
| 355 |
+
|
| 356 |
+
def bias(self):
|
| 357 |
+
return self._weight_bias()[1]
|
| 358 |
+
|
| 359 |
+
def forward(self, input):
|
| 360 |
+
# Temporarily using len(shape) instead of ndim due to JIT issue
|
| 361 |
+
# https://github.com/pytorch/pytorch/issues/23890
|
| 362 |
+
if len(input.shape) != 3:
|
| 363 |
+
raise ValueError("Input shape must be `(N, C, L)`!")
|
| 364 |
+
if self.padding_mode != 'zeros':
|
| 365 |
+
# Padding in Conv1d is stored as (p, p), need to get (p,)
|
| 366 |
+
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding[:1])
|
| 367 |
+
input = F.pad(input, _reversed_padding_repeated_twice,
|
| 368 |
+
mode=self.padding_mode)
|
| 369 |
+
return ops.quantized.conv1d(input, self._packed_params, self.scale, self.zero_point)
|
| 370 |
+
|
| 371 |
+
@classmethod
|
| 372 |
+
def from_float(cls, mod, use_precomputed_fake_quant=False):
|
| 373 |
+
r"""Creates a quantized module from a float module or qparams_dict.
|
| 374 |
+
|
| 375 |
+
Args:
|
| 376 |
+
mod (Module): a float module, either produced by torch.ao.quantization
|
| 377 |
+
utilities or provided by the user
|
| 378 |
+
"""
|
| 379 |
+
return _ConvNd.from_float(cls, mod, use_precomputed_fake_quant=use_precomputed_fake_quant)
|
| 380 |
+
|
| 381 |
+
|
| 382 |
+
class Conv2d(_ConvNd):
|
| 383 |
+
r"""Applies a 2D convolution over a quantized input signal composed of
|
| 384 |
+
several quantized input planes.
|
| 385 |
+
|
| 386 |
+
For details on input arguments, parameters, and implementation see
|
| 387 |
+
:class:`~torch.nn.Conv2d`.
|
| 388 |
+
|
| 389 |
+
.. note::
|
| 390 |
+
Only `zeros` is supported for the :attr:`padding_mode` argument.
|
| 391 |
+
|
| 392 |
+
.. note::
|
| 393 |
+
Only `torch.quint8` is supported for the input data type.
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
Attributes:
|
| 397 |
+
weight (Tensor): packed tensor derived from the learnable weight
|
| 398 |
+
parameter.
|
| 399 |
+
scale (Tensor): scalar for the output scale
|
| 400 |
+
zero_point (Tensor): scalar for the output zero point
|
| 401 |
+
|
| 402 |
+
See :class:`~torch.nn.Conv2d` for other attributes.
|
| 403 |
+
|
| 404 |
+
Examples::
|
| 405 |
+
|
| 406 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
|
| 407 |
+
>>> # With square kernels and equal stride
|
| 408 |
+
>>> m = nn.quantized.Conv2d(16, 33, 3, stride=2)
|
| 409 |
+
>>> # non-square kernels and unequal stride and with padding
|
| 410 |
+
>>> m = nn.quantized.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
|
| 411 |
+
>>> # non-square kernels and unequal stride and with padding and dilation
|
| 412 |
+
>>> m = nn.quantized.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
|
| 413 |
+
>>> input = torch.randn(20, 16, 50, 100)
|
| 414 |
+
>>> # quantize input to quint8
|
| 415 |
+
>>> # xdoctest: +SKIP
|
| 416 |
+
>>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
|
| 417 |
+
>>> output = m(q_input)
|
| 418 |
+
|
| 419 |
+
"""
|
| 420 |
+
_FLOAT_MODULE = nn.Conv2d
|
| 421 |
+
_NNIQAT_CONV_BN_MODULE = nniqat.ConvBn2d
|
| 422 |
+
_NNI_CONV_RELU_MODULE = nni.ConvReLU2d
|
| 423 |
+
_NNI_CONV_ADD_MODULE = nni.ConvAdd2d
|
| 424 |
+
_NNI_CONV_ADD_RELU_MODULE = nni.ConvAddReLU2d
|
| 425 |
+
|
| 426 |
+
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
|
| 427 |
+
padding=0, dilation=1, groups=1, bias=True,
|
| 428 |
+
padding_mode='zeros', device=None, dtype=None):
|
| 429 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
| 430 |
+
kernel_size = _pair(kernel_size)
|
| 431 |
+
stride = _pair(stride)
|
| 432 |
+
padding = _pair(padding)
|
| 433 |
+
dilation = _pair(dilation)
|
| 434 |
+
# Subclasses of _ConvNd need to call _init rather than __init__. See
|
| 435 |
+
# discussion on PR #49702
|
| 436 |
+
super()._init(
|
| 437 |
+
in_channels, out_channels, kernel_size, stride, padding, dilation,
|
| 438 |
+
False, _pair(0), groups, bias, padding_mode, **factory_kwargs)
|
| 439 |
+
|
| 440 |
+
def _get_name(self):
|
| 441 |
+
return 'QuantizedConv2d'
|
| 442 |
+
|
| 443 |
+
def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
|
| 444 |
+
if self.padding_mode == 'zeros':
|
| 445 |
+
self._packed_params = torch.ops.quantized.conv2d_prepack(
|
| 446 |
+
w, b, self.stride, self.padding, self.dilation, self.groups)
|
| 447 |
+
else:
|
| 448 |
+
self._packed_params = torch.ops.quantized.conv2d_prepack(
|
| 449 |
+
w, b, self.stride, _pair(0), self.dilation, self.groups)
|
| 450 |
+
|
| 451 |
+
def _weight_bias(self):
|
| 452 |
+
return self._packed_params.unpack()
|
| 453 |
+
|
| 454 |
+
def weight(self):
|
| 455 |
+
return self._weight_bias()[0]
|
| 456 |
+
|
| 457 |
+
def bias(self):
|
| 458 |
+
return self._weight_bias()[1]
|
| 459 |
+
|
| 460 |
+
def forward(self, input):
|
| 461 |
+
# Temporarily using len(shape) instead of ndim due to JIT issue
|
| 462 |
+
# https://github.com/pytorch/pytorch/issues/23890
|
| 463 |
+
if len(input.shape) != 4:
|
| 464 |
+
raise ValueError("Input shape must be `(N, C, H, W)`!")
|
| 465 |
+
if self.padding_mode != 'zeros':
|
| 466 |
+
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
|
| 467 |
+
input = F.pad(input, _reversed_padding_repeated_twice,
|
| 468 |
+
mode=self.padding_mode)
|
| 469 |
+
return ops.quantized.conv2d(
|
| 470 |
+
input, self._packed_params, self.scale, self.zero_point)
|
| 471 |
+
|
| 472 |
+
@classmethod
|
| 473 |
+
def from_float(cls, mod, use_precomputed_fake_quant=False):
|
| 474 |
+
r"""Creates a quantized module from a float module or qparams_dict.
|
| 475 |
+
|
| 476 |
+
Args:
|
| 477 |
+
mod (Module): a float module, either produced by torch.ao.quantization
|
| 478 |
+
utilities or provided by the user
|
| 479 |
+
"""
|
| 480 |
+
return _ConvNd.from_float(cls, mod, use_precomputed_fake_quant=use_precomputed_fake_quant)
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
class Conv3d(_ConvNd):
|
| 484 |
+
r"""Applies a 3D convolution over a quantized input signal composed of
|
| 485 |
+
several quantized input planes.
|
| 486 |
+
|
| 487 |
+
For details on input arguments, parameters, and implementation see
|
| 488 |
+
:class:`~torch.nn.Conv3d`.
|
| 489 |
+
|
| 490 |
+
.. note::
|
| 491 |
+
Only `zeros` is supported for the :attr:`padding_mode` argument.
|
| 492 |
+
|
| 493 |
+
.. note::
|
| 494 |
+
Only `torch.quint8` is supported for the input data type.
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
Attributes:
|
| 498 |
+
weight (Tensor): packed tensor derived from the learnable weight
|
| 499 |
+
parameter.
|
| 500 |
+
scale (Tensor): scalar for the output scale
|
| 501 |
+
zero_point (Tensor): scalar for the output zero point
|
| 502 |
+
|
| 503 |
+
See :class:`~torch.nn.Conv3d` for other attributes.
|
| 504 |
+
|
| 505 |
+
Examples::
|
| 506 |
+
|
| 507 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
|
| 508 |
+
>>> # With square kernels and equal stride
|
| 509 |
+
>>> m = nn.quantized.Conv3d(16, 33, 3, stride=2)
|
| 510 |
+
>>> # non-square kernels and unequal stride and with padding
|
| 511 |
+
>>> m = nn.quantized.Conv3d(16, 33, (3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2))
|
| 512 |
+
>>> # non-square kernels and unequal stride and with padding and dilation
|
| 513 |
+
>>> m = nn.quantized.Conv3d(16, 33, (3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2), dilation=(1, 2, 2))
|
| 514 |
+
>>> input = torch.randn(20, 16, 56, 56, 56)
|
| 515 |
+
>>> # quantize input to quint8
|
| 516 |
+
>>> # xdoctest: +SKIP
|
| 517 |
+
>>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
|
| 518 |
+
>>> output = m(q_input)
|
| 519 |
+
|
| 520 |
+
"""
|
| 521 |
+
_FLOAT_MODULE = nn.Conv3d
|
| 522 |
+
_NNIQAT_CONV_BN_MODULE = nniqat.ConvBn3d
|
| 523 |
+
_NNI_CONV_RELU_MODULE = nni.ConvReLU3d
|
| 524 |
+
_NNI_CONV_ADD_MODULE: None = None
|
| 525 |
+
_NNI_CONV_ADD_RELU_MODULE: None = None
|
| 526 |
+
|
| 527 |
+
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
|
| 528 |
+
padding=0, dilation=1, groups=1, bias=True,
|
| 529 |
+
padding_mode='zeros', device=None, dtype=None):
|
| 530 |
+
assert padding_mode != 'reflect', "Conv3d does not support reflection padding"
|
| 531 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
| 532 |
+
kernel_size = _triple(kernel_size)
|
| 533 |
+
stride = _triple(stride)
|
| 534 |
+
padding = _triple(padding)
|
| 535 |
+
dilation = _triple(dilation)
|
| 536 |
+
# Subclasses of _ConvNd need to call _init rather than __init__. See
|
| 537 |
+
# discussion on PR #49702
|
| 538 |
+
super()._init(
|
| 539 |
+
in_channels, out_channels, kernel_size, stride, padding, dilation,
|
| 540 |
+
False, _triple(0), groups, bias, padding_mode, **factory_kwargs)
|
| 541 |
+
|
| 542 |
+
def _get_name(self):
|
| 543 |
+
return 'QuantizedConv3d'
|
| 544 |
+
|
| 545 |
+
def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
|
| 546 |
+
if self.padding_mode == 'zeros':
|
| 547 |
+
self._packed_params = torch.ops.quantized.conv3d_prepack(
|
| 548 |
+
w, b, self.stride, self.padding, self.dilation, self.groups)
|
| 549 |
+
else:
|
| 550 |
+
self._packed_params = torch.ops.quantized.conv3d_prepack(
|
| 551 |
+
w, b, self.stride, _triple(0), self.dilation, self.groups)
|
| 552 |
+
|
| 553 |
+
def _weight_bias(self):
|
| 554 |
+
return self._packed_params.unpack()
|
| 555 |
+
|
| 556 |
+
def weight(self):
|
| 557 |
+
return self._weight_bias()[0]
|
| 558 |
+
|
| 559 |
+
def bias(self):
|
| 560 |
+
return self._weight_bias()[1]
|
| 561 |
+
|
| 562 |
+
def forward(self, input):
|
| 563 |
+
# Temporarily using len(shape) instead of ndim due to JIT issue
|
| 564 |
+
# https://github.com/pytorch/pytorch/issues/23890
|
| 565 |
+
if len(input.shape) != 5:
|
| 566 |
+
raise ValueError("Input shape must be `(N, C, D, H, W)`!")
|
| 567 |
+
if self.padding_mode != 'zeros':
|
| 568 |
+
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
|
| 569 |
+
input = F.pad(input, _reversed_padding_repeated_twice,
|
| 570 |
+
mode=self.padding_mode)
|
| 571 |
+
return ops.quantized.conv3d(
|
| 572 |
+
input, self._packed_params, self.scale, self.zero_point)
|
| 573 |
+
|
| 574 |
+
@classmethod
|
| 575 |
+
def from_float(cls, mod, use_precomputed_fake_quant=False):
|
| 576 |
+
r"""Creates a quantized module from a float module or qparams_dict.
|
| 577 |
+
|
| 578 |
+
Args:
|
| 579 |
+
mod (Module): a float module, either produced by torch.ao.quantization
|
| 580 |
+
utilities or provided by the user
|
| 581 |
+
"""
|
| 582 |
+
return _ConvNd.from_float(cls, mod, use_precomputed_fake_quant=use_precomputed_fake_quant)
|
| 583 |
+
|
| 584 |
+
# === Transposed Convolutions ===
|
| 585 |
+
MOD = TypeVar('MOD', bound=nn.modules.conv._ConvNd)
|
| 586 |
+
|
| 587 |
+
|
| 588 |
+
class _ConvTransposeNd(_ConvNd):
|
| 589 |
+
|
| 590 |
+
_FLOAT_MODULE = MOD
|
| 591 |
+
|
| 592 |
+
def __init__(self, in_channels, out_channels, kernel_size, stride,
|
| 593 |
+
padding, dilation, transposed, output_padding,
|
| 594 |
+
groups, bias, padding_mode, device=None, dtype=None):
|
| 595 |
+
if padding_mode != 'zeros':
|
| 596 |
+
raise ValueError(f'Only "zeros" padding mode is supported for {self.__class__.__name__}')
|
| 597 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
| 598 |
+
# Subclasses of _ConvNd need to call _init rather than __init__. See
|
| 599 |
+
# discussion on PR #49702
|
| 600 |
+
super()._init(
|
| 601 |
+
in_channels, out_channels, kernel_size, stride,
|
| 602 |
+
padding, dilation, transposed, output_padding,
|
| 603 |
+
groups, bias, padding_mode, **factory_kwargs)
|
| 604 |
+
|
| 605 |
+
def _input_padding(self, kernel_size: List[int], dilation: List[int], padding: List[int]) -> List[int]:
|
| 606 |
+
res = torch.jit.annotate(List[int], [])
|
| 607 |
+
for kdx in range(len(kernel_size)):
|
| 608 |
+
pad = (dilation[kdx] * (kernel_size[kdx] - 1) - padding[kdx])
|
| 609 |
+
res.append(pad)
|
| 610 |
+
return res
|
| 611 |
+
|
| 612 |
+
@classmethod
|
| 613 |
+
def from_float(cls, mod, use_precomputed_fake_quant=False):
|
| 614 |
+
r"""Creates a quantized module from a float module or qparams_dict.
|
| 615 |
+
Args:
|
| 616 |
+
mod (Module): a float module, either produced by torch.ao.quantization
|
| 617 |
+
utilities or provided by the user
|
| 618 |
+
"""
|
| 619 |
+
# derived classes override cls._FLOAT_MODULE attribute
|
| 620 |
+
msg = ' nnq.' + cls.__name__ + '.from_float only works for ' + \
|
| 621 |
+
cls._FLOAT_MODULE.__name__ # type: ignore[attr-defined]
|
| 622 |
+
assert type(mod) == cls._FLOAT_MODULE, msg
|
| 623 |
+
assert hasattr(mod, 'qconfig'), \
|
| 624 |
+
'Input float module must have qconfig defined.'
|
| 625 |
+
weight_post_process = mod.qconfig.weight()
|
| 626 |
+
weight_post_process(mod.weight)
|
| 627 |
+
assert weight_post_process.dtype == torch.qint8, \
|
| 628 |
+
'Weight observer must have a dtype of qint8'
|
| 629 |
+
qweight = _quantize_weight(mod.weight.float(), weight_post_process)
|
| 630 |
+
# the __init__ call used is the one from derived classes and not the one from _ConvTransposeNd
|
| 631 |
+
qconv = cls(mod.in_channels, mod.out_channels, mod.kernel_size, # type: ignore[call-arg]
|
| 632 |
+
mod.stride, mod.padding, mod.output_padding, mod.groups,
|
| 633 |
+
mod.bias is not None, mod.dilation, mod.padding_mode)
|
| 634 |
+
qconv.set_weight_bias(qweight, mod.bias)
|
| 635 |
+
if not hasattr(mod, "activation_post_process") or mod.activation_post_process.dtype == torch.float:
|
| 636 |
+
return qconv # dynamic quantization doesn't need scale/zero_point
|
| 637 |
+
else:
|
| 638 |
+
act_scale, act_zp = mod.activation_post_process.calculate_qparams()
|
| 639 |
+
qconv.scale = float(act_scale)
|
| 640 |
+
qconv.zero_point = int(act_zp)
|
| 641 |
+
return qconv
|
| 642 |
+
|
| 643 |
+
@staticmethod
|
| 644 |
+
def from_reference(cls, ref_qconvt, output_scale, output_zero_point):
|
| 645 |
+
r"""Create a (fbgemm/qnnpack) quantized module from a reference quantized module
|
| 646 |
+
Args:
|
| 647 |
+
ref_qconvt (Module): a reference quantized module, either produced by torch.ao.quantization
|
| 648 |
+
utilities or provided by the user
|
| 649 |
+
output_scale (float): scale for output Tensor
|
| 650 |
+
output_zero_point (int): zero point for output Tensor
|
| 651 |
+
"""
|
| 652 |
+
qconv = cls(
|
| 653 |
+
ref_qconvt.in_channels,
|
| 654 |
+
ref_qconvt.out_channels,
|
| 655 |
+
ref_qconvt.kernel_size, # type: ignore[arg-type]
|
| 656 |
+
ref_qconvt.stride, # type: ignore[arg-type]
|
| 657 |
+
ref_qconvt.padding, # type: ignore[arg-type]
|
| 658 |
+
ref_qconvt.output_padding, # type: ignore[arg-type]
|
| 659 |
+
ref_qconvt.groups,
|
| 660 |
+
ref_qconvt.bias is not None, # type: ignore[arg-type]
|
| 661 |
+
ref_qconvt.dilation, # type: ignore[arg-type]
|
| 662 |
+
ref_qconvt.padding_mode,
|
| 663 |
+
device=ref_qconvt.weight.device,
|
| 664 |
+
dtype=ref_qconvt.weight.dtype)
|
| 665 |
+
qweight = ref_qconvt.get_quantized_weight()
|
| 666 |
+
qconv.set_weight_bias(qweight, ref_qconvt.bias)
|
| 667 |
+
qconv.scale = float(output_scale)
|
| 668 |
+
qconv.zero_point = int(output_zero_point)
|
| 669 |
+
return qconv
|
| 670 |
+
|
| 671 |
+
|
| 672 |
+
class ConvTranspose1d(_ConvTransposeNd):
|
| 673 |
+
r"""Applies a 1D transposed convolution operator over an input image
|
| 674 |
+
composed of several input planes.
|
| 675 |
+
For details on input arguments, parameters, and implementation see
|
| 676 |
+
:class:`~torch.nn.ConvTranspose1d`.
|
| 677 |
+
|
| 678 |
+
.. note:: Currently only the QNNPACK engine is implemented.
|
| 679 |
+
Please, set the `torch.backends.quantized.engine = 'qnnpack'`
|
| 680 |
+
|
| 681 |
+
For special notes, please, see :class:`~torch.ao.nn.quantized.Conv1d`
|
| 682 |
+
|
| 683 |
+
Attributes:
|
| 684 |
+
weight (Tensor): packed tensor derived from the learnable weight
|
| 685 |
+
parameter.
|
| 686 |
+
scale (Tensor): scalar for the output scale
|
| 687 |
+
zero_point (Tensor): scalar for the output zero point
|
| 688 |
+
See :class:`~torch.nn.ConvTranspose2d` for other attributes.
|
| 689 |
+
|
| 690 |
+
Examples::
|
| 691 |
+
|
| 692 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
|
| 693 |
+
>>> torch.backends.quantized.engine = 'qnnpack'
|
| 694 |
+
>>> from torch.ao.nn import quantized as nnq
|
| 695 |
+
>>> # With square kernels and equal stride
|
| 696 |
+
>>> m = nnq.ConvTranspose1d(16, 33, 3, stride=2)
|
| 697 |
+
>>> # non-square kernels and unequal stride and with padding
|
| 698 |
+
>>> m = nnq.ConvTranspose1d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
|
| 699 |
+
>>> input = torch.randn(20, 16, 50)
|
| 700 |
+
>>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
|
| 701 |
+
>>> output = m(q_input)
|
| 702 |
+
>>> # exact output size can be also specified as an argument
|
| 703 |
+
>>> input = torch.randn(1, 16, 12)
|
| 704 |
+
>>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
|
| 705 |
+
>>> downsample = nnq.Conv1d(16, 16, 3, stride=2, padding=1)
|
| 706 |
+
>>> upsample = nnq.ConvTranspose1d(16, 16, 3, stride=2, padding=1)
|
| 707 |
+
>>> h = downsample(q_input)
|
| 708 |
+
>>> h.size()
|
| 709 |
+
torch.Size([1, 16, 6])
|
| 710 |
+
>>> # xdoctest: +SKIP("FIXME: output_size is not a parameter)
|
| 711 |
+
>>> output = upsample(h, output_size=input.size())
|
| 712 |
+
>>> output.size()
|
| 713 |
+
torch.Size([1, 16, 12])
|
| 714 |
+
"""
|
| 715 |
+
|
| 716 |
+
_FLOAT_MODULE = nn.ConvTranspose1d
|
| 717 |
+
|
| 718 |
+
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
|
| 719 |
+
padding=0, output_padding=0, groups=1, bias=True,
|
| 720 |
+
dilation=1, padding_mode='zeros', device=None, dtype=None):
|
| 721 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
| 722 |
+
kernel_size = _single(kernel_size)
|
| 723 |
+
stride = _single(stride)
|
| 724 |
+
padding = _single(padding)
|
| 725 |
+
dilation = _single(dilation)
|
| 726 |
+
output_padding = _single(output_padding)
|
| 727 |
+
|
| 728 |
+
super().__init__(
|
| 729 |
+
in_channels, out_channels, kernel_size, stride, padding, dilation,
|
| 730 |
+
True, output_padding, groups, bias, padding_mode, **factory_kwargs)
|
| 731 |
+
|
| 732 |
+
def _get_name(self):
|
| 733 |
+
return 'QuantizedConvTranspose1d'
|
| 734 |
+
|
| 735 |
+
def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
|
| 736 |
+
self._packed_params = torch.ops.quantized.conv_transpose1d_prepack(
|
| 737 |
+
w, b, self.stride, self.padding, self.output_padding, self.dilation,
|
| 738 |
+
self.groups)
|
| 739 |
+
|
| 740 |
+
def _weight_bias(self):
|
| 741 |
+
w, b = torch.ops.quantized.conv_transpose1d_unpack(self._packed_params)
|
| 742 |
+
return w, b
|
| 743 |
+
|
| 744 |
+
def weight(self):
|
| 745 |
+
(w, _) = self._weight_bias()
|
| 746 |
+
return w
|
| 747 |
+
|
| 748 |
+
def bias(self):
|
| 749 |
+
(_, b) = self._weight_bias()
|
| 750 |
+
return b
|
| 751 |
+
|
| 752 |
+
def forward(self, input):
|
| 753 |
+
# Temporarily using len(shape) instead of ndim due to JIT issue
|
| 754 |
+
# https://github.com/pytorch/pytorch/issues/23890
|
| 755 |
+
if len(input.shape) != 3:
|
| 756 |
+
raise ValueError("Input shape must be `(N, C, L)`!")
|
| 757 |
+
return torch.ops.quantized.conv_transpose1d(
|
| 758 |
+
input, self._packed_params, self.scale, self.zero_point)
|
| 759 |
+
|
| 760 |
+
@classmethod
|
| 761 |
+
def from_reference(cls, ref_qconvt, output_scale, output_zero_point):
|
| 762 |
+
return _ConvTransposeNd.from_reference(cls, ref_qconvt, output_scale, output_zero_point)
|
| 763 |
+
|
| 764 |
+
|
| 765 |
+
class ConvTranspose2d(_ConvTransposeNd):
|
| 766 |
+
r"""Applies a 2D transposed convolution operator over an input image
|
| 767 |
+
composed of several input planes.
|
| 768 |
+
For details on input arguments, parameters, and implementation see
|
| 769 |
+
:class:`~torch.nn.ConvTranspose2d`.
|
| 770 |
+
|
| 771 |
+
For special notes, please, see :class:`~torch.ao.nn.quantized.Conv2d`
|
| 772 |
+
|
| 773 |
+
Attributes:
|
| 774 |
+
weight (Tensor): packed tensor derived from the learnable weight
|
| 775 |
+
parameter.
|
| 776 |
+
scale (Tensor): scalar for the output scale
|
| 777 |
+
zero_point (Tensor): scalar for the output zero point
|
| 778 |
+
See :class:`~torch.nn.ConvTranspose2d` for other attributes.
|
| 779 |
+
|
| 780 |
+
Examples::
|
| 781 |
+
|
| 782 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
|
| 783 |
+
>>> # QNNPACK or FBGEMM as backend
|
| 784 |
+
>>> torch.backends.quantized.engine = 'qnnpack'
|
| 785 |
+
>>> # With square kernels and equal stride
|
| 786 |
+
>>> import torch.ao.nn.quantized as nnq
|
| 787 |
+
>>> m = nnq.ConvTranspose2d(16, 33, 3, stride=2)
|
| 788 |
+
>>> # non-square kernels and unequal stride and with padding
|
| 789 |
+
>>> m = nnq.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
|
| 790 |
+
>>> input = torch.randn(20, 16, 50, 100)
|
| 791 |
+
>>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
|
| 792 |
+
>>> output = m(q_input)
|
| 793 |
+
>>> # exact output size can be also specified as an argument
|
| 794 |
+
>>> input = torch.randn(1, 16, 12, 12)
|
| 795 |
+
>>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
|
| 796 |
+
>>> downsample = nnq.Conv2d(16, 16, 3, stride=2, padding=1)
|
| 797 |
+
>>> upsample = nnq.ConvTranspose2d(16, 16, 3, stride=2, padding=1)
|
| 798 |
+
>>> h = downsample(q_input)
|
| 799 |
+
>>> h.size()
|
| 800 |
+
torch.Size([1, 16, 6, 6])
|
| 801 |
+
>>> # xdoctest: +SKIP("FIXME: output_size is not a parameter)
|
| 802 |
+
>>> output = upsample(h, output_size=input.size())
|
| 803 |
+
>>> output.size()
|
| 804 |
+
torch.Size([1, 16, 12, 12])
|
| 805 |
+
"""
|
| 806 |
+
|
| 807 |
+
_FLOAT_MODULE = nn.ConvTranspose2d
|
| 808 |
+
|
| 809 |
+
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
|
| 810 |
+
padding=0, output_padding=0, groups=1, bias=True,
|
| 811 |
+
dilation=1, padding_mode='zeros', device=None, dtype=None):
|
| 812 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
| 813 |
+
kernel_size = _pair(kernel_size)
|
| 814 |
+
stride = _pair(stride)
|
| 815 |
+
padding = _pair(padding)
|
| 816 |
+
dilation = _pair(dilation)
|
| 817 |
+
output_padding = _pair(output_padding)
|
| 818 |
+
|
| 819 |
+
super().__init__(
|
| 820 |
+
in_channels, out_channels, kernel_size, stride, padding, dilation,
|
| 821 |
+
True, output_padding, groups, bias, padding_mode, **factory_kwargs)
|
| 822 |
+
|
| 823 |
+
def _get_name(self):
|
| 824 |
+
return 'QuantizedConvTranspose2d'
|
| 825 |
+
|
| 826 |
+
def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
|
| 827 |
+
self._packed_params = torch.ops.quantized.conv_transpose2d_prepack(
|
| 828 |
+
w, b, self.stride, self.padding, self.output_padding, self.dilation,
|
| 829 |
+
self.groups)
|
| 830 |
+
|
| 831 |
+
def _weight_bias(self):
|
| 832 |
+
w, b = torch.ops.quantized.conv2d_unpack(self._packed_params)
|
| 833 |
+
return w, b
|
| 834 |
+
|
| 835 |
+
def weight(self):
|
| 836 |
+
(w, _) = self._weight_bias()
|
| 837 |
+
return w
|
| 838 |
+
|
| 839 |
+
def bias(self):
|
| 840 |
+
(_, b) = self._weight_bias()
|
| 841 |
+
return b
|
| 842 |
+
|
| 843 |
+
def forward(self, input):
|
| 844 |
+
# Temporarily using len(shape) instead of ndim due to JIT issue
|
| 845 |
+
# https://github.com/pytorch/pytorch/issues/23890
|
| 846 |
+
if len(input.shape) != 4:
|
| 847 |
+
raise ValueError("Input shape must be `(N, C, H, W)`!")
|
| 848 |
+
return ops.quantized.conv_transpose2d(
|
| 849 |
+
input, self._packed_params, self.scale, self.zero_point)
|
| 850 |
+
|
| 851 |
+
@classmethod
|
| 852 |
+
def from_reference(cls, ref_qconvt, output_scale, output_zero_point):
|
| 853 |
+
return _ConvTransposeNd.from_reference(cls, ref_qconvt, output_scale, output_zero_point)
|
| 854 |
+
|
| 855 |
+
|
| 856 |
+
class ConvTranspose3d(_ConvTransposeNd):
|
| 857 |
+
r"""Applies a 3D transposed convolution operator over an input image
|
| 858 |
+
composed of several input planes.
|
| 859 |
+
For details on input arguments, parameters, and implementation see
|
| 860 |
+
:class:`~torch.nn.ConvTranspose3d`.
|
| 861 |
+
|
| 862 |
+
.. note:: Currently only the FBGEMM engine is implemented.
|
| 863 |
+
Please, set the `torch.backends.quantized.engine = 'fbgemm'`
|
| 864 |
+
|
| 865 |
+
For special notes, please, see :class:`~torch.ao.nn.quantized.Conv3d`
|
| 866 |
+
|
| 867 |
+
Attributes:
|
| 868 |
+
weight (Tensor): packed tensor derived from the learnable weight
|
| 869 |
+
parameter.
|
| 870 |
+
scale (Tensor): scalar for the output scale
|
| 871 |
+
zero_point (Tensor): scalar for the output zero point
|
| 872 |
+
See :class:`~torch.nn.ConvTranspose3d` for other attributes.
|
| 873 |
+
|
| 874 |
+
Examples::
|
| 875 |
+
|
| 876 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
|
| 877 |
+
>>> torch.backends.quantized.engine = 'fbgemm'
|
| 878 |
+
>>> from torch.ao.nn import quantized as nnq
|
| 879 |
+
>>> # With cubic kernels and equal stride
|
| 880 |
+
>>> m = nnq.ConvTranspose3d(16, 33, 3, stride=2)
|
| 881 |
+
>>> # non-cubic kernels and unequal stride and with padding
|
| 882 |
+
>>> m = nnq.ConvTranspose3d(16, 33, (3, 3, 5), stride=(2, 1, 1), padding=(4, 2, 2))
|
| 883 |
+
>>> input = torch.randn(20, 16, 50, 100, 100)
|
| 884 |
+
>>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
|
| 885 |
+
>>> output = m(q_input)
|
| 886 |
+
>>> # exact output size can be also specified as an argument
|
| 887 |
+
>>> input = torch.randn(1, 16, 12, 12, 12)
|
| 888 |
+
>>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
|
| 889 |
+
>>> downsample = nnq.Conv3d(16, 16, 3, stride=2, padding=1)
|
| 890 |
+
>>> upsample = nnq.ConvTranspose3d(16, 16, 3, stride=2, padding=1)
|
| 891 |
+
>>> h = downsample(q_input)
|
| 892 |
+
>>> h.size()
|
| 893 |
+
torch.Size([1, 16, 6, 6, 6])
|
| 894 |
+
>>> # xdoctest: +SKIP("FIXME: output_size is not a parameter)
|
| 895 |
+
>>> output = upsample(h, output_size=input.size())
|
| 896 |
+
>>> output.size()
|
| 897 |
+
torch.Size([1, 16, 12, 12, 12])
|
| 898 |
+
"""
|
| 899 |
+
|
| 900 |
+
_FLOAT_MODULE = nn.ConvTranspose3d
|
| 901 |
+
|
| 902 |
+
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
|
| 903 |
+
padding=0, output_padding=0, groups=1, bias=True,
|
| 904 |
+
dilation=1, padding_mode='zeros', device=None, dtype=None):
|
| 905 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
| 906 |
+
kernel_size = _triple(kernel_size)
|
| 907 |
+
stride = _triple(stride)
|
| 908 |
+
padding = _triple(padding)
|
| 909 |
+
dilation = _triple(dilation)
|
| 910 |
+
output_padding = _triple(output_padding)
|
| 911 |
+
|
| 912 |
+
super().__init__(
|
| 913 |
+
in_channels, out_channels, kernel_size, stride, padding, dilation,
|
| 914 |
+
True, output_padding, groups, bias, padding_mode, **factory_kwargs)
|
| 915 |
+
|
| 916 |
+
def _get_name(self):
|
| 917 |
+
return 'QuantizedConvTranspose3d'
|
| 918 |
+
|
| 919 |
+
def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
|
| 920 |
+
self._packed_params = torch.ops.quantized.conv_transpose3d_prepack(
|
| 921 |
+
w, b, self.stride, self.padding, self.output_padding, self.dilation,
|
| 922 |
+
self.groups)
|
| 923 |
+
|
| 924 |
+
def _weight_bias(self):
|
| 925 |
+
w, b = torch.ops.quantized.conv3d_unpack(self._packed_params)
|
| 926 |
+
return w, b
|
| 927 |
+
|
| 928 |
+
def weight(self):
|
| 929 |
+
(w, _) = self._weight_bias()
|
| 930 |
+
return w
|
| 931 |
+
|
| 932 |
+
def bias(self):
|
| 933 |
+
(_, b) = self._weight_bias()
|
| 934 |
+
return b
|
| 935 |
+
|
| 936 |
+
def forward(self, input):
|
| 937 |
+
# Temporarily using len(shape) instead of ndim due to JIT issue
|
| 938 |
+
# https://github.com/pytorch/pytorch/issues/23890
|
| 939 |
+
if len(input.shape) != 5:
|
| 940 |
+
raise ValueError("Input shape must be `(N, C, T, H, W)`!")
|
| 941 |
+
return ops.quantized.conv_transpose3d(
|
| 942 |
+
input, self._packed_params, self.scale, self.zero_point)
|
| 943 |
+
|
| 944 |
+
@classmethod
|
| 945 |
+
def from_reference(cls, ref_qconvt, output_scale, output_zero_point):
|
| 946 |
+
return _ConvTransposeNd.from_reference(cls, ref_qconvt, output_scale, output_zero_point)
|
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/dropout.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
__all__ = ['Dropout']
|
| 5 |
+
|
| 6 |
+
class Dropout(torch.nn.Dropout):
|
| 7 |
+
r"""This is the quantized equivalent of :class:`~torch.nn.Dropout`.
|
| 8 |
+
And this is a placeholder to enable models where fp32 tensors
|
| 9 |
+
had dropout to work with quantized tensors in train and eval mode.
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
p: probability of an element to be zeroed
|
| 13 |
+
inplace: can optionally do the operation in-place. Default: ``False``
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def forward(self, input):
|
| 17 |
+
return input
|
| 18 |
+
|
| 19 |
+
def _get_name(self):
|
| 20 |
+
return 'QuantizedDropout'
|
| 21 |
+
|
| 22 |
+
@classmethod
|
| 23 |
+
def from_float(cls, mod, use_precomputed_fake_quant=False):
|
| 24 |
+
return cls(mod.p, mod.inplace)
|
| 25 |
+
|
| 26 |
+
@classmethod
|
| 27 |
+
def from_reference(cls, mod, scale, zero_point):
|
| 28 |
+
return cls(mod.p, mod.inplace)
|
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/embedding_ops.py
ADDED
|
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
from torch import Tensor # noqa: F401
|
| 5 |
+
from torch._jit_internal import Optional, List # noqa: F401
|
| 6 |
+
|
| 7 |
+
from .utils import _hide_packed_params_repr
|
| 8 |
+
from .utils import _quantize_weight
|
| 9 |
+
|
| 10 |
+
__all__ = ['EmbeddingPackedParams', 'Embedding', 'EmbeddingBag']
|
| 11 |
+
|
| 12 |
+
class EmbeddingPackedParams(torch.nn.Module):
|
| 13 |
+
_version = 1
|
| 14 |
+
|
| 15 |
+
def __init__(self, num_embeddings, embedding_dim, dtype=torch.quint8):
|
| 16 |
+
super().__init__()
|
| 17 |
+
self.dtype = dtype
|
| 18 |
+
if self.dtype in [torch.quint8, torch.quint4x2]:
|
| 19 |
+
scales = torch.ones(num_embeddings, dtype=torch.float)
|
| 20 |
+
zero_points = torch.zeros(num_embeddings, dtype=torch.float)
|
| 21 |
+
wq = torch._empty_per_channel_affine_quantized([num_embeddings, embedding_dim], scales=scales,
|
| 22 |
+
zero_points=zero_points,
|
| 23 |
+
axis=0, dtype=self.dtype)
|
| 24 |
+
self.set_weight(wq)
|
| 25 |
+
else:
|
| 26 |
+
raise NotImplementedError(f'Unsupported dtype on quantized embedding! Supports quint8 and quint4x2. Got dtype: {dtype}')
|
| 27 |
+
|
| 28 |
+
@torch.jit.export
|
| 29 |
+
def set_weight(self, weight: torch.Tensor) -> None:
|
| 30 |
+
if self.dtype in [torch.quint8, torch.quint4x2]:
|
| 31 |
+
self._packed_weight = torch.ops.quantized.embedding_bag_prepack(weight)
|
| 32 |
+
else:
|
| 33 |
+
raise NotImplementedError('Unsupported dtype for quantized embedding prepack! Supports quint8 and quint4x2.')
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@torch.jit.export
|
| 37 |
+
def _weight(self):
|
| 38 |
+
if self.dtype in [torch.quint8, torch.quint4x2]:
|
| 39 |
+
return torch.ops.quantized.embedding_bag_unpack(self._packed_weight)
|
| 40 |
+
else:
|
| 41 |
+
raise NotImplementedError('Unsupported dtype for quantized embedding unpack! Supports quint8 and quint4x2.')
|
| 42 |
+
|
| 43 |
+
def forward(self, x):
|
| 44 |
+
return x
|
| 45 |
+
|
| 46 |
+
# Version 1
|
| 47 |
+
# self
|
| 48 |
+
# |--- _packed_weight : Tensor representing weight of EmbeddingPackedParamsBase
|
| 49 |
+
# |--- dtype : torch.dtype
|
| 50 |
+
|
| 51 |
+
def _save_to_state_dict(self, destination, prefix, keep_vars):
|
| 52 |
+
super()._save_to_state_dict(destination, prefix, keep_vars)
|
| 53 |
+
destination[prefix + 'dtype'] = self.dtype
|
| 54 |
+
destination[prefix + '_packed_weight'] = self._weight()
|
| 55 |
+
|
| 56 |
+
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
|
| 57 |
+
missing_keys, unexpected_keys, error_msgs):
|
| 58 |
+
self.dtype = state_dict[prefix + 'dtype']
|
| 59 |
+
state_dict.pop(prefix + 'dtype')
|
| 60 |
+
|
| 61 |
+
weight = state_dict[prefix + '_packed_weight']
|
| 62 |
+
state_dict.pop(prefix + '_packed_weight')
|
| 63 |
+
self.set_weight(weight)
|
| 64 |
+
|
| 65 |
+
super()._load_from_state_dict(state_dict, prefix, local_metadata, False,
|
| 66 |
+
missing_keys, unexpected_keys, error_msgs)
|
| 67 |
+
|
| 68 |
+
def __repr__(self):
|
| 69 |
+
return self._weight().__repr__()
|
| 70 |
+
|
| 71 |
+
class Embedding(torch.nn.Module):
|
| 72 |
+
r"""
|
| 73 |
+
A quantized Embedding module with quantized packed weights as inputs.
|
| 74 |
+
We adopt the same interface as `torch.nn.Embedding`, please see
|
| 75 |
+
https://pytorch.org/docs/stable/nn.html#torch.nn.Embedding for documentation.
|
| 76 |
+
|
| 77 |
+
Similar to :class:`~torch.nn.Embedding`, attributes will be randomly
|
| 78 |
+
initialized at module creation time and will be overwritten later
|
| 79 |
+
|
| 80 |
+
Attributes:
|
| 81 |
+
weight (Tensor): the non-learnable quantized weights of the module of
|
| 82 |
+
shape :math:`(\text{num\_embeddings}, \text{embedding\_dim})`.
|
| 83 |
+
|
| 84 |
+
Examples::
|
| 85 |
+
>>> m = nn.quantized.Embedding(num_embeddings=10, embedding_dim=12)
|
| 86 |
+
>>> indices = torch.tensor([9, 6, 5, 7, 8, 8, 9, 2, 8])
|
| 87 |
+
>>> output = m(indices)
|
| 88 |
+
>>> print(output.size())
|
| 89 |
+
torch.Size([9, 12])
|
| 90 |
+
|
| 91 |
+
"""
|
| 92 |
+
_version = 1
|
| 93 |
+
|
| 94 |
+
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None,
|
| 95 |
+
max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
|
| 96 |
+
sparse: bool = False, _weight: Optional[Tensor] = None, dtype=torch.quint8) -> None:
|
| 97 |
+
super().__init__()
|
| 98 |
+
self.num_embeddings = num_embeddings
|
| 99 |
+
self.embedding_dim = embedding_dim
|
| 100 |
+
self.dtype = dtype
|
| 101 |
+
|
| 102 |
+
if _weight is None:
|
| 103 |
+
scales = torch.ones(num_embeddings, dtype=torch.float)
|
| 104 |
+
zero_points = torch.zeros(num_embeddings, dtype=torch.float)
|
| 105 |
+
qweight = torch._empty_per_channel_affine_quantized([num_embeddings, embedding_dim],
|
| 106 |
+
scales=scales, zero_points=zero_points,
|
| 107 |
+
axis=0, dtype=torch.quint8)
|
| 108 |
+
else:
|
| 109 |
+
assert list(_weight.shape) == [num_embeddings, embedding_dim], \
|
| 110 |
+
'Shape of weight does not match num_embeddings and embedding_dim'
|
| 111 |
+
qweight = _weight
|
| 112 |
+
|
| 113 |
+
self._packed_params = EmbeddingPackedParams(num_embeddings, embedding_dim, dtype)
|
| 114 |
+
self._packed_params.set_weight(qweight)
|
| 115 |
+
|
| 116 |
+
def forward(self, indices: Tensor) -> Tensor:
|
| 117 |
+
if self.dtype == torch.quint4x2:
|
| 118 |
+
return torch.ops.quantized.embedding_4bit(self._packed_params._packed_weight, indices)
|
| 119 |
+
else:
|
| 120 |
+
return torch.ops.quantized.embedding_byte(self._packed_params._packed_weight, indices)
|
| 121 |
+
|
| 122 |
+
def _get_name(self):
|
| 123 |
+
return 'QuantizedEmbedding'
|
| 124 |
+
|
| 125 |
+
def __repr__(self):
|
| 126 |
+
return _hide_packed_params_repr(self, EmbeddingPackedParams)
|
| 127 |
+
|
| 128 |
+
def extra_repr(self):
|
| 129 |
+
extra_repr_str = (f'num_embeddings={self.num_embeddings}, embedding_dim={self.embedding_dim}, '
|
| 130 |
+
f'dtype={self._packed_params.dtype}, qscheme={self.weight().qscheme()}')
|
| 131 |
+
|
| 132 |
+
return extra_repr_str
|
| 133 |
+
|
| 134 |
+
def set_weight(self, w: torch.Tensor) -> None:
|
| 135 |
+
self._packed_params.set_weight(w)
|
| 136 |
+
|
| 137 |
+
def weight(self):
|
| 138 |
+
return self._packed_params._weight()
|
| 139 |
+
|
| 140 |
+
@classmethod
|
| 141 |
+
def from_float(cls, mod, use_precomputed_fake_quant=False):
|
| 142 |
+
r"""Create a quantized embedding module from a float module
|
| 143 |
+
|
| 144 |
+
Args:
|
| 145 |
+
mod (Module): a float module, either produced by torch.ao.quantization
|
| 146 |
+
utilities or provided by user
|
| 147 |
+
"""
|
| 148 |
+
if hasattr(mod, 'weight_fake_quant'):
|
| 149 |
+
assert type(mod) == torch.ao.nn.qat.Embedding, 'nnq.' + cls.__name__ + '.from_float ' + \
|
| 150 |
+
'with fake quant only works for ' + torch.ao.nn.qat.Embedding.__name__
|
| 151 |
+
weight_observer = mod.weight_fake_quant
|
| 152 |
+
activation_post_process = mod.activation_post_process
|
| 153 |
+
else:
|
| 154 |
+
assert type(mod) == nn.Embedding, 'nnq.' + cls.__name__ + '.from_float only works for ' + \
|
| 155 |
+
nn.Embedding.__name__
|
| 156 |
+
assert hasattr(mod, 'qconfig'), 'Embedding input float module must have qconfig defined'
|
| 157 |
+
from torch.ao.quantization import float_qparams_weight_only_qconfig
|
| 158 |
+
if mod.qconfig is not None and mod.qconfig.weight is not None: # type: ignore[union-attr]
|
| 159 |
+
weight_observer = mod.qconfig.weight() # type: ignore[union-attr, operator]
|
| 160 |
+
else:
|
| 161 |
+
weight_observer = float_qparams_weight_only_qconfig.weight()
|
| 162 |
+
|
| 163 |
+
dtype = weight_observer.dtype
|
| 164 |
+
is_float_qparams_qconfig = weight_observer.qscheme == torch.per_channel_affine_float_qparams
|
| 165 |
+
assert is_float_qparams_qconfig, \
|
| 166 |
+
'Embedding quantization is only supported with float_qparams_weight_only_qconfig.'
|
| 167 |
+
|
| 168 |
+
assert dtype == torch.quint8 or dtype == torch.quint4x2, \
|
| 169 |
+
f'The only supported dtype for nnq.Embedding is torch.quint8 and torch.quint4x2, got {dtype}'
|
| 170 |
+
|
| 171 |
+
# Run the observer to calculate qparams.
|
| 172 |
+
weight_observer(mod.weight)
|
| 173 |
+
qweight = _quantize_weight(mod.weight.float(), weight_observer)
|
| 174 |
+
|
| 175 |
+
# Create quantized Embedding module and pass in the quantized weight
|
| 176 |
+
qembedding = Embedding(mod.num_embeddings, mod.embedding_dim)
|
| 177 |
+
qembedding.set_weight(qweight)
|
| 178 |
+
return qembedding
|
| 179 |
+
|
| 180 |
+
@classmethod
|
| 181 |
+
def from_reference(cls, ref_embedding):
|
| 182 |
+
qembedding = cls(
|
| 183 |
+
ref_embedding.num_embeddings,
|
| 184 |
+
ref_embedding.embedding_dim,
|
| 185 |
+
ref_embedding.padding_idx,
|
| 186 |
+
ref_embedding.max_norm,
|
| 187 |
+
ref_embedding.norm_type,
|
| 188 |
+
ref_embedding.scale_grad_by_freq,
|
| 189 |
+
ref_embedding.sparse,
|
| 190 |
+
ref_embedding.get_quantized_weight(),
|
| 191 |
+
ref_embedding.weight_dtype,
|
| 192 |
+
)
|
| 193 |
+
return qembedding
|
| 194 |
+
|
| 195 |
+
class EmbeddingBag(Embedding):
|
| 196 |
+
r"""
|
| 197 |
+
A quantized EmbeddingBag module with quantized packed weights as inputs.
|
| 198 |
+
We adopt the same interface as `torch.nn.EmbeddingBag`, please see
|
| 199 |
+
https://pytorch.org/docs/stable/nn.html#torch.nn.EmbeddingBag for documentation.
|
| 200 |
+
|
| 201 |
+
Similar to :class:`~torch.nn.EmbeddingBag`, attributes will be randomly
|
| 202 |
+
initialized at module creation time and will be overwritten later
|
| 203 |
+
|
| 204 |
+
Attributes:
|
| 205 |
+
weight (Tensor): the non-learnable quantized weights of the module of
|
| 206 |
+
shape :math:`(\text{num\_embeddings}, \text{embedding\_dim})`.
|
| 207 |
+
|
| 208 |
+
Examples::
|
| 209 |
+
>>> m = nn.quantized.EmbeddingBag(num_embeddings=10, embedding_dim=12, include_last_offset=True, mode='sum')
|
| 210 |
+
>>> indices = torch.tensor([9, 6, 5, 7, 8, 8, 9, 2, 8, 6, 6, 9, 1, 6, 8, 8, 3, 2, 3, 6, 3, 6, 5, 7, 0, 8, 4, 6, 5, 8, 2, 3])
|
| 211 |
+
>>> offsets = torch.tensor([0, 19, 20, 28, 28, 32])
|
| 212 |
+
>>> output = m(indices, offsets)
|
| 213 |
+
>>> print(output.size())
|
| 214 |
+
torch.Size([5, 12])
|
| 215 |
+
|
| 216 |
+
"""
|
| 217 |
+
_version = 1
|
| 218 |
+
|
| 219 |
+
def __init__(self, num_embeddings: int, embedding_dim: int,
|
| 220 |
+
max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
|
| 221 |
+
mode: str = 'sum', sparse: bool = False, _weight: Optional[Tensor] = None,
|
| 222 |
+
include_last_offset: bool = False, dtype=torch.quint8) -> None:
|
| 223 |
+
super().__init__(num_embeddings, embedding_dim, _weight=_weight, dtype=dtype)
|
| 224 |
+
|
| 225 |
+
self.mode = mode
|
| 226 |
+
self.pruned_weights = False
|
| 227 |
+
self.include_last_offset = include_last_offset
|
| 228 |
+
self.dtype = dtype
|
| 229 |
+
|
| 230 |
+
def forward(self, indices: Tensor, offsets: Optional[Tensor] = None, per_sample_weights: Optional[Tensor] = None,
|
| 231 |
+
compressed_indices_mapping: Optional[Tensor] = None) -> Tensor:
|
| 232 |
+
if self.dtype == torch.quint4x2:
|
| 233 |
+
return torch.ops.quantized.embedding_bag_4bit(self._packed_params._packed_weight, indices, offsets, False, 0,
|
| 234 |
+
self.pruned_weights, per_sample_weights, compressed_indices_mapping,
|
| 235 |
+
self.include_last_offset)
|
| 236 |
+
else:
|
| 237 |
+
return torch.ops.quantized.embedding_bag_byte(self._packed_params._packed_weight, indices, offsets, False, 0,
|
| 238 |
+
self.pruned_weights, per_sample_weights, compressed_indices_mapping,
|
| 239 |
+
self.include_last_offset)
|
| 240 |
+
|
| 241 |
+
def _get_name(self):
|
| 242 |
+
return 'QuantizedEmbeddingBag'
|
| 243 |
+
|
| 244 |
+
@classmethod
|
| 245 |
+
def from_float(cls, mod, use_precomputed_fake_quant=False):
|
| 246 |
+
r"""Create a quantized embedding_bag module from a float module
|
| 247 |
+
|
| 248 |
+
Args:
|
| 249 |
+
mod (Module): a float module, either produced by torch.ao.quantization
|
| 250 |
+
utilities or provided by user
|
| 251 |
+
"""
|
| 252 |
+
if hasattr(mod, 'weight_fake_quant'):
|
| 253 |
+
weight_observer = mod.weight_fake_quant
|
| 254 |
+
else:
|
| 255 |
+
assert type(mod) == nn.EmbeddingBag, 'nnq.' + cls.__name__ + '.from_float only works for ' + \
|
| 256 |
+
nn.EmbeddingBag.__name__
|
| 257 |
+
assert hasattr(mod, 'qconfig'), 'EmbeddingBag input float module must have qconfig defined'
|
| 258 |
+
from torch.ao.quantization.qconfig import float_qparams_weight_only_qconfig
|
| 259 |
+
if mod.qconfig is not None and mod.qconfig.weight is not None: # type: ignore[union-attr]
|
| 260 |
+
weight_observer = mod.qconfig.weight() # type: ignore[union-attr, operator]
|
| 261 |
+
else:
|
| 262 |
+
weight_observer = float_qparams_weight_only_qconfig.weight()
|
| 263 |
+
|
| 264 |
+
dtype = weight_observer.dtype
|
| 265 |
+
is_float_qparams_qconfig = weight_observer.qscheme == torch.per_channel_affine_float_qparams
|
| 266 |
+
assert is_float_qparams_qconfig, \
|
| 267 |
+
'EmbeddingBag quantization is only supported with float_qparams_weight_only_qconfig.'
|
| 268 |
+
|
| 269 |
+
assert dtype == torch.quint8 or dtype == torch.quint4x2, \
|
| 270 |
+
f'The only supported dtype for nnq.EmbeddingBag is torch.quint8 and torch.quint4x2, got {dtype}'
|
| 271 |
+
|
| 272 |
+
# Run the observer to calculate qparams.
|
| 273 |
+
weight_observer(mod.weight)
|
| 274 |
+
qweight = _quantize_weight(mod.weight.float(), weight_observer)
|
| 275 |
+
|
| 276 |
+
# Create quantized EmbeddingBag module and pass in the quantized weight
|
| 277 |
+
qembedding_bag = EmbeddingBag(mod.num_embeddings, mod.embedding_dim, dtype=dtype)
|
| 278 |
+
qembedding_bag.set_weight(qweight)
|
| 279 |
+
return qembedding_bag
|
| 280 |
+
|
| 281 |
+
@classmethod
|
| 282 |
+
def from_reference(cls, ref_embedding_bag):
|
| 283 |
+
qembedding_bag = cls(
|
| 284 |
+
ref_embedding_bag.num_embeddings,
|
| 285 |
+
ref_embedding_bag.embedding_dim,
|
| 286 |
+
ref_embedding_bag.max_norm,
|
| 287 |
+
ref_embedding_bag.norm_type,
|
| 288 |
+
ref_embedding_bag.scale_grad_by_freq,
|
| 289 |
+
ref_embedding_bag.mode,
|
| 290 |
+
ref_embedding_bag.sparse,
|
| 291 |
+
ref_embedding_bag.get_quantized_weight(),
|
| 292 |
+
ref_embedding_bag.include_last_offset,
|
| 293 |
+
ref_embedding_bag.weight_dtype,
|
| 294 |
+
)
|
| 295 |
+
return qembedding_bag
|
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/functional_modules.py
ADDED
|
@@ -0,0 +1,250 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from typing import List
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch import Tensor
|
| 6 |
+
from torch._ops import ops
|
| 7 |
+
|
| 8 |
+
__all__ = ['FloatFunctional', 'FXFloatFunctional', 'QFunctional']
|
| 9 |
+
|
| 10 |
+
class FloatFunctional(torch.nn.Module):
|
| 11 |
+
r"""State collector class for float operations.
|
| 12 |
+
|
| 13 |
+
The instance of this class can be used instead of the ``torch.`` prefix for
|
| 14 |
+
some operations. See example usage below.
|
| 15 |
+
|
| 16 |
+
.. note::
|
| 17 |
+
|
| 18 |
+
This class does not provide a ``forward`` hook. Instead, you must use
|
| 19 |
+
one of the underlying functions (e.g. ``add``).
|
| 20 |
+
|
| 21 |
+
Examples::
|
| 22 |
+
|
| 23 |
+
>>> f_add = FloatFunctional()
|
| 24 |
+
>>> a = torch.tensor(3.0)
|
| 25 |
+
>>> b = torch.tensor(4.0)
|
| 26 |
+
>>> f_add.add(a, b) # Equivalent to ``torch.add(a, b)``
|
| 27 |
+
|
| 28 |
+
Valid operation names:
|
| 29 |
+
- add
|
| 30 |
+
- cat
|
| 31 |
+
- mul
|
| 32 |
+
- add_relu
|
| 33 |
+
- add_scalar
|
| 34 |
+
- mul_scalar
|
| 35 |
+
"""
|
| 36 |
+
def __init__(self):
|
| 37 |
+
super().__init__()
|
| 38 |
+
self.activation_post_process = torch.nn.Identity()
|
| 39 |
+
|
| 40 |
+
def forward(self, x):
|
| 41 |
+
raise RuntimeError("FloatFunctional is not intended to use the " +
|
| 42 |
+
"'forward'. Please use the underlying operation")
|
| 43 |
+
|
| 44 |
+
r"""Operation equivalent to ``torch.add(Tensor, Tensor)``"""
|
| 45 |
+
def add(self, x: Tensor, y: Tensor) -> Tensor:
|
| 46 |
+
r = torch.add(x, y)
|
| 47 |
+
r = self.activation_post_process(r)
|
| 48 |
+
return r
|
| 49 |
+
|
| 50 |
+
r"""Operation equivalent to ``torch.add(Tensor, float)``"""
|
| 51 |
+
def add_scalar(self, x: Tensor, y: float) -> Tensor:
|
| 52 |
+
r = torch.add(x, y)
|
| 53 |
+
# Note: this operation is not observed because the observation is not
|
| 54 |
+
# needed for the quantized op.
|
| 55 |
+
return r
|
| 56 |
+
|
| 57 |
+
r"""Operation equivalent to ``torch.mul(Tensor, Tensor)``"""
|
| 58 |
+
def mul(self, x: Tensor, y: Tensor) -> Tensor:
|
| 59 |
+
r = torch.mul(x, y)
|
| 60 |
+
r = self.activation_post_process(r)
|
| 61 |
+
return r
|
| 62 |
+
|
| 63 |
+
r"""Operation equivalent to ``torch.mul(Tensor, float)``"""
|
| 64 |
+
def mul_scalar(self, x: Tensor, y: float) -> Tensor:
|
| 65 |
+
r = torch.mul(x, y)
|
| 66 |
+
# Note: this operation is not observed because the observation is not
|
| 67 |
+
# needed for the quantized op.
|
| 68 |
+
return r
|
| 69 |
+
|
| 70 |
+
r"""Operation equivalent to ``torch.cat``"""
|
| 71 |
+
def cat(self, x: List[Tensor], dim: int = 0) -> Tensor:
|
| 72 |
+
r = torch.cat(x, dim=dim)
|
| 73 |
+
r = self.activation_post_process(r)
|
| 74 |
+
return r
|
| 75 |
+
|
| 76 |
+
r"""Operation equivalent to ``relu(torch.add(x,y))``"""
|
| 77 |
+
def add_relu(self, x: Tensor, y: Tensor) -> Tensor:
|
| 78 |
+
r = torch.add(x, y)
|
| 79 |
+
r = torch.nn.functional.relu(r)
|
| 80 |
+
r = self.activation_post_process(r)
|
| 81 |
+
return r
|
| 82 |
+
|
| 83 |
+
r"""Operation equivalent to ``torch.matmul(Tensor, Tensor)``"""
|
| 84 |
+
def matmul(self, x: Tensor, y: Tensor) -> Tensor:
|
| 85 |
+
r = torch.matmul(x, y)
|
| 86 |
+
r = self.activation_post_process(r)
|
| 87 |
+
return r
|
| 88 |
+
|
| 89 |
+
class FXFloatFunctional(torch.nn.Module):
|
| 90 |
+
r""" module to replace FloatFunctional module before FX graph mode quantization,
|
| 91 |
+
since activation_post_process will be inserted in top level module directly
|
| 92 |
+
|
| 93 |
+
Valid operation names:
|
| 94 |
+
- add
|
| 95 |
+
- cat
|
| 96 |
+
- mul
|
| 97 |
+
- add_relu
|
| 98 |
+
- add_scalar
|
| 99 |
+
- mul_scalar
|
| 100 |
+
"""
|
| 101 |
+
def forward(self, x):
|
| 102 |
+
raise RuntimeError("FloatFunctional is not intended to use the " +
|
| 103 |
+
"'forward'. Please use the underlying operation")
|
| 104 |
+
|
| 105 |
+
r"""Operation equivalent to ``torch.add(Tensor, Tensor)``"""
|
| 106 |
+
def add(self, x: Tensor, y: Tensor) -> Tensor:
|
| 107 |
+
r = torch.add(x, y)
|
| 108 |
+
return r
|
| 109 |
+
|
| 110 |
+
r"""Operation equivalent to ``torch.add(Tensor, float)``"""
|
| 111 |
+
def add_scalar(self, x: Tensor, y: float) -> Tensor:
|
| 112 |
+
r = torch.add(x, y)
|
| 113 |
+
return r
|
| 114 |
+
|
| 115 |
+
r"""Operation equivalent to ``torch.mul(Tensor, Tensor)``"""
|
| 116 |
+
def mul(self, x: Tensor, y: Tensor) -> Tensor:
|
| 117 |
+
r = torch.mul(x, y)
|
| 118 |
+
return r
|
| 119 |
+
|
| 120 |
+
r"""Operation equivalent to ``torch.mul(Tensor, float)``"""
|
| 121 |
+
def mul_scalar(self, x: Tensor, y: float) -> Tensor:
|
| 122 |
+
r = torch.mul(x, y)
|
| 123 |
+
return r
|
| 124 |
+
|
| 125 |
+
r"""Operation equivalent to ``torch.cat``"""
|
| 126 |
+
def cat(self, x: List[Tensor], dim: int = 0) -> Tensor:
|
| 127 |
+
r = torch.cat(x, dim=dim)
|
| 128 |
+
return r
|
| 129 |
+
|
| 130 |
+
r"""Operation equivalent to ``relu(torch.add(x,y))``"""
|
| 131 |
+
def add_relu(self, x: Tensor, y: Tensor) -> Tensor:
|
| 132 |
+
r = torch.add(x, y)
|
| 133 |
+
r = torch.nn.functional.relu(r)
|
| 134 |
+
return r
|
| 135 |
+
|
| 136 |
+
r"""Operation equivalent to ``torch.matmul(Tensor, Tensor)``"""
|
| 137 |
+
def matmul(self, x: Tensor, y: Tensor) -> Tensor:
|
| 138 |
+
r = torch.matmul(x, y)
|
| 139 |
+
return r
|
| 140 |
+
|
| 141 |
+
class QFunctional(torch.nn.Module):
|
| 142 |
+
r"""Wrapper class for quantized operations.
|
| 143 |
+
|
| 144 |
+
The instance of this class can be used instead of the
|
| 145 |
+
``torch.ops.quantized`` prefix. See example usage below.
|
| 146 |
+
|
| 147 |
+
.. note::
|
| 148 |
+
|
| 149 |
+
This class does not provide a ``forward`` hook. Instead, you must use
|
| 150 |
+
one of the underlying functions (e.g. ``add``).
|
| 151 |
+
|
| 152 |
+
Examples::
|
| 153 |
+
|
| 154 |
+
>>> q_add = QFunctional()
|
| 155 |
+
>>> # xdoctest: +SKIP
|
| 156 |
+
>>> a = torch.quantize_per_tensor(torch.tensor(3.0), 1.0, 0, torch.qint32)
|
| 157 |
+
>>> b = torch.quantize_per_tensor(torch.tensor(4.0), 1.0, 0, torch.qint32)
|
| 158 |
+
>>> q_add.add(a, b) # Equivalent to ``torch.ops.quantized.add(a, b, 1.0, 0)``
|
| 159 |
+
|
| 160 |
+
Valid operation names:
|
| 161 |
+
- add
|
| 162 |
+
- cat
|
| 163 |
+
- mul
|
| 164 |
+
- add_relu
|
| 165 |
+
- add_scalar
|
| 166 |
+
- mul_scalar
|
| 167 |
+
"""
|
| 168 |
+
def __init__(self):
|
| 169 |
+
super().__init__()
|
| 170 |
+
self.scale = 1.0
|
| 171 |
+
self.zero_point = 0
|
| 172 |
+
self.activation_post_process = torch.nn.Identity()
|
| 173 |
+
|
| 174 |
+
def _save_to_state_dict(self, destination, prefix, keep_vars):
|
| 175 |
+
super()._save_to_state_dict(destination, prefix, keep_vars)
|
| 176 |
+
destination[prefix + 'scale'] = torch.tensor(self.scale)
|
| 177 |
+
destination[prefix + 'zero_point'] = torch.tensor(self.zero_point)
|
| 178 |
+
|
| 179 |
+
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
|
| 180 |
+
missing_keys, unexpected_keys, error_msgs):
|
| 181 |
+
|
| 182 |
+
self.scale = float(state_dict.pop(prefix + 'scale'))
|
| 183 |
+
self.zero_point = int(state_dict.pop(prefix + 'zero_point'))
|
| 184 |
+
super()._load_from_state_dict(state_dict, prefix, local_metadata, False,
|
| 185 |
+
missing_keys, unexpected_keys, error_msgs)
|
| 186 |
+
|
| 187 |
+
def _get_name(self):
|
| 188 |
+
return 'QFunctional'
|
| 189 |
+
|
| 190 |
+
def extra_repr(self):
|
| 191 |
+
return f'scale={self.scale}, zero_point={self.zero_point}'
|
| 192 |
+
|
| 193 |
+
def forward(self, x):
|
| 194 |
+
raise RuntimeError("Functional is not intended to use the " +
|
| 195 |
+
"'forward'. Please use the underlying operation")
|
| 196 |
+
|
| 197 |
+
r"""Operation equivalent to ``torch.ops.quantized.add``"""
|
| 198 |
+
def add(self, x: Tensor, y: Tensor) -> Tensor:
|
| 199 |
+
r = ops.quantized.add(x, y, scale=self.scale, zero_point=self.zero_point)
|
| 200 |
+
r = self.activation_post_process(r)
|
| 201 |
+
return r
|
| 202 |
+
|
| 203 |
+
r"""Operation equivalent to ``torch.ops.quantized.add(Tensor, float)``"""
|
| 204 |
+
def add_scalar(self, x: Tensor, y: float) -> Tensor:
|
| 205 |
+
r = ops.quantized.add_scalar(x, y)
|
| 206 |
+
# Note: this operation is not observed because the observation is not
|
| 207 |
+
# needed for the quantized op.
|
| 208 |
+
return r
|
| 209 |
+
|
| 210 |
+
r"""Operation equivalent to ``torch.ops.quantized.mul(Tensor, Tensor)``"""
|
| 211 |
+
def mul(self, x: Tensor, y: Tensor) -> Tensor:
|
| 212 |
+
r = ops.quantized.mul(x, y, scale=self.scale, zero_point=self.zero_point)
|
| 213 |
+
r = self.activation_post_process(r)
|
| 214 |
+
return r
|
| 215 |
+
|
| 216 |
+
r"""Operation equivalent to ``torch.ops.quantized.mul(Tensor, float)``"""
|
| 217 |
+
def mul_scalar(self, x: Tensor, y: float) -> Tensor:
|
| 218 |
+
r = ops.quantized.mul_scalar(x, y)
|
| 219 |
+
# Note: this operation is not observed because the observation is not
|
| 220 |
+
# needed for the quantized op.
|
| 221 |
+
return r
|
| 222 |
+
|
| 223 |
+
r"""Operation equivalent to ``torch.ops.quantized.cat``"""
|
| 224 |
+
def cat(self, x: List[Tensor], dim: int = 0) -> Tensor:
|
| 225 |
+
r = ops.quantized.cat(x, scale=self.scale, zero_point=self.zero_point, dim=dim)
|
| 226 |
+
r = self.activation_post_process(r)
|
| 227 |
+
return r
|
| 228 |
+
|
| 229 |
+
r"""Operation equivalent to ``torch.ops.quantized.add_relu``"""
|
| 230 |
+
def add_relu(self, x: Tensor, y: Tensor) -> Tensor:
|
| 231 |
+
r = ops.quantized.add_relu(x, y, scale=self.scale, zero_point=self.zero_point)
|
| 232 |
+
r = self.activation_post_process(r)
|
| 233 |
+
return r
|
| 234 |
+
|
| 235 |
+
r"""Operation equivalent to ``torch.ops.quantized.matmul(Tensor, Tensor)``"""
|
| 236 |
+
def matmul(self, x: Tensor, y: Tensor) -> Tensor:
|
| 237 |
+
r = ops.quantized.matmul(x, y, scale=self.scale, zero_point=self.zero_point)
|
| 238 |
+
# Note: this operation is not observed because the observation is not
|
| 239 |
+
# needed for the quantized op.
|
| 240 |
+
return r
|
| 241 |
+
|
| 242 |
+
@classmethod
|
| 243 |
+
def from_float(cls, mod, use_precomputed_fake_quant=False):
|
| 244 |
+
assert type(mod) == FloatFunctional, \
|
| 245 |
+
"QFunctional.from_float expects an instance of FloatFunctional"
|
| 246 |
+
scale, zero_point = mod.activation_post_process.calculate_qparams() # type: ignore[operator]
|
| 247 |
+
new_mod = QFunctional()
|
| 248 |
+
new_mod.scale = float(scale)
|
| 249 |
+
new_mod.zero_point = int(zero_point)
|
| 250 |
+
return new_mod
|