Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +4 -0
- llava_next/share/terminfo/a/aaa-30-s +0 -0
- llava_next/share/terminfo/a/aaa-rv +0 -0
- llava_next/share/terminfo/a/aixterm +0 -0
- llava_next/share/terminfo/a/altos3 +0 -0
- llava_next/share/terminfo/a/ampex175-b +0 -0
- llava_next/share/terminfo/a/ampex232 +0 -0
- llava_next/share/terminfo/a/ampex80 +0 -0
- llava_next/share/terminfo/a/ansi+sgrbold +0 -0
- llava_next/share/terminfo/a/ansi-generic +0 -0
- llava_next/share/terminfo/a/ansi-nt +0 -0
- llava_next/share/terminfo/a/ansi.sysk +0 -0
- llava_next/share/terminfo/a/ansi80x50 +0 -0
- llava_next/share/terminfo/a/apple-ae +0 -0
- llava_next/share/terminfo/a/att4415-w-rv +0 -0
- llava_next/share/terminfo/a/att5420 +0 -0
- llava_next/share/terminfo/a/att605-w +0 -0
- llava_next/share/terminfo/a/att610+cvis +0 -0
- llava_next/share/terminfo/a/att620 +0 -0
- llava_next/share/terminfo/a/avatar0+ +0 -0
- parrot/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so +3 -0
- parrot/lib/python3.10/site-packages/torch/_inductor/__pycache__/ir.cpython-310.pyc +3 -0
- parrot/lib/python3.10/site-packages/torch/ao/pruning/__pycache__/_mappings.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/activation_sparsifier/__init__.py +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/base_data_sparsifier.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/data_norm_sparsifier.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/quantization_utils.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/__init__.py +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/__pycache__/data_sparsity.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/weight_norm_sparsifier.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/quantization/__init__.py +190 -0
- parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_correct_bias.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_equalize.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/fake_quantize.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/fuse_modules.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/observer.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/qconfig_mapping.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quant_type.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantization_mappings.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantize.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantize_fx.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantize_pt2e.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/stubs.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/torch/ao/quantization/_correct_bias.py +145 -0
- parrot/lib/python3.10/site-packages/torch/ao/quantization/_learnable_fake_quantize.py +164 -0
- parrot/lib/python3.10/site-packages/torch/ao/quantization/fake_quantize.py +536 -0
- parrot/lib/python3.10/site-packages/torch/ao/quantization/fuser_method_mappings.py +260 -0
- parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_decomposed.py +1033 -0
- parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_lower_to_native_backend.py +1177 -0
- parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/convert.py +1143 -0
.gitattributes
CHANGED
|
@@ -832,3 +832,7 @@ videochat2/lib/python3.10/site-packages/tensorflow/python/framework/_proto_compa
|
|
| 832 |
videochat2/lib/python3.10/site-packages/tensorflow/python/framework/_pywrap_python_op_gen.so filter=lfs diff=lfs merge=lfs -text
|
| 833 |
videochat2/lib/python3.10/site-packages/tensorflow/python/framework/_op_def_library_pybind.so filter=lfs diff=lfs merge=lfs -text
|
| 834 |
videochat2/lib/python3.10/site-packages/tensorflow/python/framework/__pycache__/ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 832 |
videochat2/lib/python3.10/site-packages/tensorflow/python/framework/_pywrap_python_op_gen.so filter=lfs diff=lfs merge=lfs -text
|
| 833 |
videochat2/lib/python3.10/site-packages/tensorflow/python/framework/_op_def_library_pybind.so filter=lfs diff=lfs merge=lfs -text
|
| 834 |
videochat2/lib/python3.10/site-packages/tensorflow/python/framework/__pycache__/ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 835 |
+
parrot/lib/python3.10/site-packages/torch/lib/libc10_cuda.so filter=lfs diff=lfs merge=lfs -text
|
| 836 |
+
parrot/lib/python3.10/site-packages/torch/_inductor/__pycache__/ir.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 837 |
+
parrot/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 838 |
+
parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
llava_next/share/terminfo/a/aaa-30-s
ADDED
|
Binary file (1.36 kB). View file
|
|
|
llava_next/share/terminfo/a/aaa-rv
ADDED
|
Binary file (1.33 kB). View file
|
|
|
llava_next/share/terminfo/a/aixterm
ADDED
|
Binary file (1.86 kB). View file
|
|
|
llava_next/share/terminfo/a/altos3
ADDED
|
Binary file (1.14 kB). View file
|
|
|
llava_next/share/terminfo/a/ampex175-b
ADDED
|
Binary file (444 Bytes). View file
|
|
|
llava_next/share/terminfo/a/ampex232
ADDED
|
Binary file (502 Bytes). View file
|
|
|
llava_next/share/terminfo/a/ampex80
ADDED
|
Binary file (481 Bytes). View file
|
|
|
llava_next/share/terminfo/a/ansi+sgrbold
ADDED
|
Binary file (463 Bytes). View file
|
|
|
llava_next/share/terminfo/a/ansi-generic
ADDED
|
Binary file (756 Bytes). View file
|
|
|
llava_next/share/terminfo/a/ansi-nt
ADDED
|
Binary file (476 Bytes). View file
|
|
|
llava_next/share/terminfo/a/ansi.sysk
ADDED
|
Binary file (1.83 kB). View file
|
|
|
llava_next/share/terminfo/a/ansi80x50
ADDED
|
Binary file (1.5 kB). View file
|
|
|
llava_next/share/terminfo/a/apple-ae
ADDED
|
Binary file (421 Bytes). View file
|
|
|
llava_next/share/terminfo/a/att4415-w-rv
ADDED
|
Binary file (1.4 kB). View file
|
|
|
llava_next/share/terminfo/a/att5420
ADDED
|
Binary file (1.38 kB). View file
|
|
|
llava_next/share/terminfo/a/att605-w
ADDED
|
Binary file (1.38 kB). View file
|
|
|
llava_next/share/terminfo/a/att610+cvis
ADDED
|
Binary file (134 Bytes). View file
|
|
|
llava_next/share/terminfo/a/att620
ADDED
|
Binary file (1.62 kB). View file
|
|
|
llava_next/share/terminfo/a/avatar0+
ADDED
|
Binary file (660 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:365bdc10ffac948c351faa9b05e8f157310ec4a1b7dc19edb5401d4a83f5c00e
|
| 3 |
+
size 766040
|
parrot/lib/python3.10/site-packages/torch/_inductor/__pycache__/ir.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f03d6cd6f3f2b3ded71c109ee11b184f6a94d3414410f392ed06075119f2ad03
|
| 3 |
+
size 224766
|
parrot/lib/python3.10/site-packages/torch/ao/pruning/__pycache__/_mappings.cpython-310.pyc
ADDED
|
Binary file (692 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/activation_sparsifier/__init__.py
ADDED
|
File without changes
|
parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/base_data_sparsifier.cpython-310.pyc
ADDED
|
Binary file (11.4 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/data_norm_sparsifier.cpython-310.pyc
ADDED
|
Binary file (5.34 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/quantization_utils.cpython-310.pyc
ADDED
|
Binary file (4.07 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/__init__.py
ADDED
|
File without changes
|
parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/__pycache__/data_sparsity.cpython-310.pyc
ADDED
|
Binary file (6.23 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/weight_norm_sparsifier.cpython-310.pyc
ADDED
|
Binary file (7.35 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/quantization/__init__.py
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
# flake8: noqa: F403
|
| 3 |
+
|
| 4 |
+
from .fake_quantize import * # noqa: F403
|
| 5 |
+
from .fuse_modules import fuse_modules # noqa: F403
|
| 6 |
+
from .fuse_modules import fuse_modules_qat # noqa: F403
|
| 7 |
+
from .fuser_method_mappings import * # noqa: F403
|
| 8 |
+
from .observer import * # noqa: F403
|
| 9 |
+
from .qconfig import * # noqa: F403
|
| 10 |
+
from .qconfig_mapping import * # noqa: F403
|
| 11 |
+
from .quant_type import * # noqa: F403
|
| 12 |
+
from .quantization_mappings import * # type: ignore[no-redef]
|
| 13 |
+
from .quantize import * # noqa: F403
|
| 14 |
+
from .quantize_jit import * # noqa: F403
|
| 15 |
+
from .stubs import * # noqa: F403
|
| 16 |
+
from .pt2e.export_utils import _move_exported_model_to_eval as move_exported_model_to_eval
|
| 17 |
+
from .pt2e.export_utils import _move_exported_model_to_train as move_exported_model_to_train
|
| 18 |
+
from .pt2e.export_utils import _allow_exported_model_train_eval as allow_exported_model_train_eval
|
| 19 |
+
from .pt2e.generate_numeric_debug_handle import generate_numeric_debug_handle # noqa: F401
|
| 20 |
+
from typing import Union, List, Callable, Tuple, Optional
|
| 21 |
+
from torch import Tensor
|
| 22 |
+
import torch
|
| 23 |
+
|
| 24 |
+
ObserverOrFakeQuantize = Union[ObserverBase, FakeQuantizeBase]
|
| 25 |
+
ObserverOrFakeQuantize.__module__ = "torch.ao.quantization"
|
| 26 |
+
|
| 27 |
+
__all__ = [
|
| 28 |
+
"DeQuantStub",
|
| 29 |
+
"FakeQuantize",
|
| 30 |
+
"FakeQuantizeBase",
|
| 31 |
+
"FixedQParamsFakeQuantize",
|
| 32 |
+
"FixedQParamsObserver",
|
| 33 |
+
"FusedMovingAvgObsFakeQuantize",
|
| 34 |
+
"HistogramObserver",
|
| 35 |
+
"MatchAllNode",
|
| 36 |
+
"MinMaxObserver",
|
| 37 |
+
"MovingAverageMinMaxObserver",
|
| 38 |
+
"MovingAveragePerChannelMinMaxObserver",
|
| 39 |
+
"NoopObserver",
|
| 40 |
+
"ObserverBase",
|
| 41 |
+
"ObserverOrFakeQuantize",
|
| 42 |
+
"Pattern",
|
| 43 |
+
"PerChannelMinMaxObserver",
|
| 44 |
+
"PlaceholderObserver",
|
| 45 |
+
"QConfig",
|
| 46 |
+
"QConfigAny",
|
| 47 |
+
"QConfigDynamic",
|
| 48 |
+
"QConfigMapping",
|
| 49 |
+
"QuantStub",
|
| 50 |
+
"QuantType",
|
| 51 |
+
"QuantWrapper",
|
| 52 |
+
"RecordingObserver",
|
| 53 |
+
"ReuseInputObserver",
|
| 54 |
+
"UniformQuantizationObserverBase",
|
| 55 |
+
"add_quant_dequant",
|
| 56 |
+
"convert",
|
| 57 |
+
"convert_dynamic_jit",
|
| 58 |
+
"convert_jit",
|
| 59 |
+
"default_affine_fixed_qparams_fake_quant",
|
| 60 |
+
"default_affine_fixed_qparams_observer",
|
| 61 |
+
"default_debug_observer",
|
| 62 |
+
"default_dynamic_fake_quant",
|
| 63 |
+
"default_dynamic_quant_observer",
|
| 64 |
+
"default_embedding_fake_quant",
|
| 65 |
+
"default_embedding_fake_quant_4bit",
|
| 66 |
+
"default_eval_fn",
|
| 67 |
+
"default_fake_quant",
|
| 68 |
+
"default_fixed_qparams_range_0to1_fake_quant",
|
| 69 |
+
"default_fixed_qparams_range_0to1_observer",
|
| 70 |
+
"default_fixed_qparams_range_neg1to1_fake_quant",
|
| 71 |
+
"default_fixed_qparams_range_neg1to1_observer",
|
| 72 |
+
"default_float_qparams_observer",
|
| 73 |
+
"default_float_qparams_observer_4bit",
|
| 74 |
+
"default_fused_act_fake_quant",
|
| 75 |
+
"default_fused_per_channel_wt_fake_quant",
|
| 76 |
+
"default_fused_wt_fake_quant",
|
| 77 |
+
"default_histogram_fake_quant",
|
| 78 |
+
"default_histogram_observer",
|
| 79 |
+
"default_observer",
|
| 80 |
+
"default_per_channel_weight_fake_quant",
|
| 81 |
+
"default_per_channel_weight_observer",
|
| 82 |
+
"default_placeholder_observer",
|
| 83 |
+
"default_reuse_input_observer",
|
| 84 |
+
"default_symmetric_fixed_qparams_fake_quant",
|
| 85 |
+
"default_symmetric_fixed_qparams_observer",
|
| 86 |
+
"default_weight_fake_quant",
|
| 87 |
+
"default_weight_observer",
|
| 88 |
+
"disable_fake_quant",
|
| 89 |
+
"disable_observer",
|
| 90 |
+
"enable_fake_quant",
|
| 91 |
+
"enable_observer",
|
| 92 |
+
"fuse_conv_bn",
|
| 93 |
+
"fuse_conv_bn_jit",
|
| 94 |
+
"fuse_conv_bn_relu",
|
| 95 |
+
"fuse_convtranspose_bn",
|
| 96 |
+
"fuse_linear_bn",
|
| 97 |
+
"fuse_modules",
|
| 98 |
+
"fuse_modules_qat",
|
| 99 |
+
"fused_per_channel_wt_fake_quant_range_neg_127_to_127",
|
| 100 |
+
"fused_wt_fake_quant_range_neg_127_to_127",
|
| 101 |
+
"get_combined_dict",
|
| 102 |
+
"get_default_compare_output_module_list",
|
| 103 |
+
"get_default_custom_config_dict",
|
| 104 |
+
"get_default_dynamic_quant_module_mappings",
|
| 105 |
+
"get_default_dynamic_sparse_quant_module_mappings",
|
| 106 |
+
"get_default_float_to_quantized_operator_mappings",
|
| 107 |
+
"get_default_qat_module_mappings",
|
| 108 |
+
"get_default_qat_qconfig",
|
| 109 |
+
"get_default_qat_qconfig_dict",
|
| 110 |
+
"get_default_qat_qconfig_mapping",
|
| 111 |
+
"get_default_qconfig",
|
| 112 |
+
"get_default_qconfig_dict",
|
| 113 |
+
"get_default_qconfig_mapping",
|
| 114 |
+
"get_default_qconfig_propagation_list",
|
| 115 |
+
"get_default_static_quant_module_mappings",
|
| 116 |
+
"get_default_static_quant_reference_module_mappings",
|
| 117 |
+
"get_default_static_sparse_quant_module_mappings",
|
| 118 |
+
"get_dynamic_quant_module_class",
|
| 119 |
+
"get_embedding_qat_module_mappings",
|
| 120 |
+
"get_embedding_static_quant_module_mappings",
|
| 121 |
+
"get_fuser_method",
|
| 122 |
+
"get_fuser_method_new",
|
| 123 |
+
"get_observer_state_dict",
|
| 124 |
+
"get_quantized_operator",
|
| 125 |
+
"get_static_quant_module_class",
|
| 126 |
+
"load_observer_state_dict",
|
| 127 |
+
"move_exported_model_to_eval",
|
| 128 |
+
"move_exported_model_to_train",
|
| 129 |
+
"allow_exported_model_train_eval",
|
| 130 |
+
"no_observer_set",
|
| 131 |
+
"per_channel_weight_observer_range_neg_127_to_127",
|
| 132 |
+
"prepare",
|
| 133 |
+
"prepare_dynamic_jit",
|
| 134 |
+
"prepare_jit",
|
| 135 |
+
"prepare_qat",
|
| 136 |
+
"propagate_qconfig_",
|
| 137 |
+
"qconfig_equals",
|
| 138 |
+
"quantize",
|
| 139 |
+
"quantize_dynamic",
|
| 140 |
+
"quantize_dynamic_jit",
|
| 141 |
+
"quantize_jit",
|
| 142 |
+
"quantize_qat",
|
| 143 |
+
"script_qconfig",
|
| 144 |
+
"script_qconfig_dict",
|
| 145 |
+
"swap_module",
|
| 146 |
+
"weight_observer_range_neg_127_to_127",
|
| 147 |
+
"generate_numeric_debug_handle",
|
| 148 |
+
]
|
| 149 |
+
|
| 150 |
+
def default_eval_fn(model, calib_data):
|
| 151 |
+
r"""Define the default evaluation function.
|
| 152 |
+
|
| 153 |
+
Default evaluation function takes a torch.utils.data.Dataset or a list of
|
| 154 |
+
input Tensors and run the model on the dataset
|
| 155 |
+
"""
|
| 156 |
+
for data, target in calib_data:
|
| 157 |
+
model(data)
|
| 158 |
+
|
| 159 |
+
class _DerivedObserverOrFakeQuantize(ObserverBase):
|
| 160 |
+
r"""This observer is used to describe an observer whose quantization parameters
|
| 161 |
+
are derived from other observers
|
| 162 |
+
"""
|
| 163 |
+
|
| 164 |
+
def __init__(
|
| 165 |
+
self,
|
| 166 |
+
dtype: torch.dtype,
|
| 167 |
+
obs_or_fqs: List[ObserverOrFakeQuantize],
|
| 168 |
+
derive_qparams_fn: Callable[[List[ObserverOrFakeQuantize]], Tuple[Tensor, Tensor]],
|
| 169 |
+
quant_min: Optional[int]=None,
|
| 170 |
+
quant_max: Optional[int]=None,
|
| 171 |
+
qscheme: Optional[torch.qscheme]=None,
|
| 172 |
+
ch_axis: Optional[int] = None
|
| 173 |
+
):
|
| 174 |
+
super().__init__(dtype)
|
| 175 |
+
self.obs_or_fqs = obs_or_fqs
|
| 176 |
+
self.derive_qparams_fn = derive_qparams_fn
|
| 177 |
+
self.quant_min = quant_min
|
| 178 |
+
self.quant_max = quant_max
|
| 179 |
+
self.qscheme = qscheme
|
| 180 |
+
self.ch_axis = ch_axis
|
| 181 |
+
|
| 182 |
+
from .utils import is_per_channel
|
| 183 |
+
if is_per_channel(self.qscheme):
|
| 184 |
+
assert self.ch_axis is not None, "Must provide a valid ch_axis if qscheme is per channel"
|
| 185 |
+
|
| 186 |
+
def forward(self, x: Tensor) -> Tensor:
|
| 187 |
+
return x
|
| 188 |
+
|
| 189 |
+
def calculate_qparams(self):
|
| 190 |
+
return self.derive_qparams_fn(self.obs_or_fqs)
|
parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_correct_bias.cpython-310.pyc
ADDED
|
Binary file (4.83 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_equalize.cpython-310.pyc
ADDED
|
Binary file (5.79 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/fake_quantize.cpython-310.pyc
ADDED
|
Binary file (15.7 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/fuse_modules.cpython-310.pyc
ADDED
|
Binary file (5.61 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/observer.cpython-310.pyc
ADDED
|
Binary file (48.1 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/qconfig_mapping.cpython-310.pyc
ADDED
|
Binary file (11.3 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quant_type.cpython-310.pyc
ADDED
|
Binary file (976 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantization_mappings.cpython-310.pyc
ADDED
|
Binary file (9.52 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantize.cpython-310.pyc
ADDED
|
Binary file (21.7 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantize_fx.cpython-310.pyc
ADDED
|
Binary file (28.1 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantize_pt2e.cpython-310.pyc
ADDED
|
Binary file (8.51 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/stubs.cpython-310.pyc
ADDED
|
Binary file (2.81 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/torch/ao/quantization/_correct_bias.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.ao.nn.quantized as nnq
|
| 5 |
+
|
| 6 |
+
import torch.ao.quantization
|
| 7 |
+
import torch.ao.ns._numeric_suite as ns
|
| 8 |
+
|
| 9 |
+
__all__ = [
|
| 10 |
+
"get_module",
|
| 11 |
+
"parent_child_names",
|
| 12 |
+
"get_param",
|
| 13 |
+
"MeanShadowLogger",
|
| 14 |
+
"bias_correction",
|
| 15 |
+
]
|
| 16 |
+
|
| 17 |
+
_supported_modules = {nn.Linear, nn.Conv2d}
|
| 18 |
+
_supported_modules_quantized = {nnq.Linear, nnq.Conv2d}
|
| 19 |
+
|
| 20 |
+
def get_module(model, name):
|
| 21 |
+
"""Given name of submodule, this function grabs the submodule from given model."""
|
| 22 |
+
return dict(model.named_modules())[name]
|
| 23 |
+
|
| 24 |
+
def parent_child_names(name):
|
| 25 |
+
"""Split full name of submodule into parent submodule's full name and submodule's name."""
|
| 26 |
+
split_name = name.rsplit('.', 1)
|
| 27 |
+
if len(split_name) == 1:
|
| 28 |
+
return '', split_name[0]
|
| 29 |
+
else:
|
| 30 |
+
return split_name[0], split_name[1]
|
| 31 |
+
|
| 32 |
+
def get_param(module, attr):
|
| 33 |
+
"""Get the parameter given a module and attribute.
|
| 34 |
+
|
| 35 |
+
Sometimes the weights/bias attribute gives you the raw tensor, but sometimes
|
| 36 |
+
gives a function that will give you the raw tensor, this function takes care of that logic
|
| 37 |
+
"""
|
| 38 |
+
param = getattr(module, attr, None)
|
| 39 |
+
if callable(param):
|
| 40 |
+
return param()
|
| 41 |
+
else:
|
| 42 |
+
return param
|
| 43 |
+
|
| 44 |
+
class MeanShadowLogger(ns.Logger):
|
| 45 |
+
"""Mean Logger for a Shadow module.
|
| 46 |
+
|
| 47 |
+
A logger for a Shadow module whose purpose is to record the rolling mean
|
| 48 |
+
of the data passed to the floating point and quantized models
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
def __init__(self):
|
| 52 |
+
"""Set up initial values for float and quantized stats, count, float sum, and quant sum."""
|
| 53 |
+
super().__init__()
|
| 54 |
+
self.stats["float"] = None
|
| 55 |
+
self.stats["quantized"] = None
|
| 56 |
+
self.count = 0
|
| 57 |
+
self.float_sum = None
|
| 58 |
+
self.quant_sum = None
|
| 59 |
+
|
| 60 |
+
def forward(self, x, y):
|
| 61 |
+
"""Compute the average of quantized and floating-point data from modules.
|
| 62 |
+
|
| 63 |
+
The inputs x,y are output data from the quantized and floating-point modules.
|
| 64 |
+
x is for the quantized module, y is for the floating point module
|
| 65 |
+
"""
|
| 66 |
+
if x.is_quantized:
|
| 67 |
+
x = x.dequantize()
|
| 68 |
+
|
| 69 |
+
self.count += 1
|
| 70 |
+
if self.stats["quantized"] is None:
|
| 71 |
+
self.stats["quantized"] = x
|
| 72 |
+
self.quant_sum = x
|
| 73 |
+
else:
|
| 74 |
+
self.quant_sum += x
|
| 75 |
+
self.stats["quantized"] = self.quant_sum / self.count
|
| 76 |
+
|
| 77 |
+
if self.stats["float"] is None:
|
| 78 |
+
self.stats["float"] = y
|
| 79 |
+
self.float_sum = y
|
| 80 |
+
else:
|
| 81 |
+
self.float_sum += y
|
| 82 |
+
self.stats["float"] = self.float_sum / self.count
|
| 83 |
+
|
| 84 |
+
def clear(self):
|
| 85 |
+
self.stats["float"] = None
|
| 86 |
+
self.stats["quantized"] = None
|
| 87 |
+
self.count = 0
|
| 88 |
+
self.float_sum = None
|
| 89 |
+
self.quant_sum = None
|
| 90 |
+
|
| 91 |
+
def bias_correction(float_model, quantized_model, img_data, target_modules=_supported_modules_quantized, neval_batches=None):
|
| 92 |
+
"""Perform bias correction on a module.
|
| 93 |
+
|
| 94 |
+
Using numeric suite shadow module, the expected output of the floating point and quantized modules
|
| 95 |
+
is recorded. Using that data the bias of supported modules is shifted to compensate for the drift caused
|
| 96 |
+
by quantization
|
| 97 |
+
Paper reference: https://arxiv.org/pdf/1906.04721.pdf (Section 4.2)
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
float_model: a trained model that serves as a reference to what bias correction should aim for
|
| 101 |
+
quantized_model: quantized form of float_model that bias correction is to applied to
|
| 102 |
+
img_data: calibration data to estimate the expected output (used to find quantization error)
|
| 103 |
+
target_modules: specifies what submodules in quantized_model need bias correction (can be extended to
|
| 104 |
+
unquantized submodules)
|
| 105 |
+
neval_batches: a cap to the number of batches you want to be used for estimating the expected output
|
| 106 |
+
"""
|
| 107 |
+
ns.prepare_model_with_stubs(float_model, quantized_model, _supported_modules, MeanShadowLogger)
|
| 108 |
+
|
| 109 |
+
uncorrected_modules = {}
|
| 110 |
+
for name, submodule in quantized_model.named_modules():
|
| 111 |
+
if type(submodule) in target_modules:
|
| 112 |
+
uncorrected_modules[name] = submodule
|
| 113 |
+
|
| 114 |
+
for uncorrected_module in uncorrected_modules:
|
| 115 |
+
quantized_submodule = get_module(quantized_model, uncorrected_module)
|
| 116 |
+
bias = get_param(quantized_submodule, 'bias')
|
| 117 |
+
if bias is not None:
|
| 118 |
+
|
| 119 |
+
count = 0
|
| 120 |
+
for data in img_data:
|
| 121 |
+
quantized_model(data[0])
|
| 122 |
+
count += 1
|
| 123 |
+
if count == neval_batches:
|
| 124 |
+
break
|
| 125 |
+
ob_dict = ns.get_logger_dict(quantized_model)
|
| 126 |
+
parent_name, _ = parent_child_names(uncorrected_module)
|
| 127 |
+
|
| 128 |
+
float_data = ob_dict[parent_name + '.stats']['float']
|
| 129 |
+
quant_data = ob_dict[parent_name + '.stats']['quantized']
|
| 130 |
+
|
| 131 |
+
# math for expected_error
|
| 132 |
+
quantization_error = quant_data - float_data
|
| 133 |
+
dims = list(range(quantization_error.dim()))
|
| 134 |
+
# Note: we don't want to take the mean over the output channel dimension
|
| 135 |
+
dims.remove(1)
|
| 136 |
+
expected_error = torch.mean(quantization_error, dims)
|
| 137 |
+
|
| 138 |
+
updated_bias = bias.data - expected_error
|
| 139 |
+
|
| 140 |
+
bias.data = updated_bias
|
| 141 |
+
|
| 142 |
+
# Resets the data contained in the loggers
|
| 143 |
+
for name, submodule in quantized_model.named_modules():
|
| 144 |
+
if isinstance(submodule, MeanShadowLogger):
|
| 145 |
+
submodule.clear()
|
parrot/lib/python3.10/site-packages/torch/ao/quantization/_learnable_fake_quantize.py
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
from torch.nn.parameter import Parameter
|
| 4 |
+
from typing import List
|
| 5 |
+
|
| 6 |
+
__all__: List[str] = []
|
| 7 |
+
|
| 8 |
+
class _LearnableFakeQuantize(torch.ao.quantization.FakeQuantizeBase):
|
| 9 |
+
r"""Generalized extension of the FakeQuantize module in fake_quantize.py.
|
| 10 |
+
|
| 11 |
+
This is an extension of the FakeQuantize module in fake_quantize.py, which
|
| 12 |
+
supports more generalized lower-bit quantization and supports learning of the scale
|
| 13 |
+
and zero point parameters through backpropagation.
|
| 14 |
+
|
| 15 |
+
In addition to the attributes in the original FakeQuantize module, the _LearnableFakeQuantize
|
| 16 |
+
module also includes the following attributes to support quantization parameter learning.
|
| 17 |
+
|
| 18 |
+
* :attr:`channel_len` defines the length of the channel when initializing scale and zero point
|
| 19 |
+
for the per channel case.
|
| 20 |
+
|
| 21 |
+
* :attr:`use_grad_scaling` defines the flag for whether the gradients for scale and zero point are
|
| 22 |
+
normalized by the constant, which is proportional to the square root of the number of
|
| 23 |
+
elements in the tensor. The related literature justifying the use of this particular constant
|
| 24 |
+
can be found here: https://openreview.net/pdf?id=rkgO66VKDS.
|
| 25 |
+
|
| 26 |
+
* :attr:`fake_quant_enabled` defines the flag for enabling fake quantization on the output.
|
| 27 |
+
|
| 28 |
+
* :attr:`static_enabled` defines the flag for using observer's static estimation for
|
| 29 |
+
scale and zero point.
|
| 30 |
+
|
| 31 |
+
* :attr:`learning_enabled` defines the flag for enabling backpropagation for scale and zero point.
|
| 32 |
+
"""
|
| 33 |
+
def __init__(self, observer, quant_min=0, quant_max=255, scale=1., zero_point=0., channel_len=-1,
|
| 34 |
+
use_grad_scaling=False, **observer_kwargs):
|
| 35 |
+
super().__init__()
|
| 36 |
+
assert quant_min < quant_max, 'quant_min must be strictly less than quant_max.'
|
| 37 |
+
self.quant_min = quant_min
|
| 38 |
+
self.quant_max = quant_max
|
| 39 |
+
# also pass quant_min and quant_max to observer
|
| 40 |
+
observer_kwargs["quant_min"] = quant_min
|
| 41 |
+
observer_kwargs["quant_max"] = quant_max
|
| 42 |
+
self.use_grad_scaling = use_grad_scaling
|
| 43 |
+
if channel_len == -1:
|
| 44 |
+
self.scale = Parameter(torch.tensor([scale]))
|
| 45 |
+
self.zero_point = Parameter(torch.tensor([zero_point]))
|
| 46 |
+
else:
|
| 47 |
+
assert isinstance(channel_len, int) and channel_len > 0, "Channel size must be a positive integer."
|
| 48 |
+
self.scale = Parameter(torch.tensor([scale] * channel_len))
|
| 49 |
+
self.zero_point = Parameter(torch.tensor([zero_point] * channel_len))
|
| 50 |
+
|
| 51 |
+
self.activation_post_process = observer(**observer_kwargs)
|
| 52 |
+
assert torch.iinfo(self.activation_post_process.dtype).min <= quant_min, \
|
| 53 |
+
'quant_min out of bound'
|
| 54 |
+
assert quant_max <= torch.iinfo(self.activation_post_process.dtype).max, \
|
| 55 |
+
'quant_max out of bound'
|
| 56 |
+
self.dtype = self.activation_post_process.dtype
|
| 57 |
+
self.qscheme = self.activation_post_process.qscheme
|
| 58 |
+
self.ch_axis = self.activation_post_process.ch_axis \
|
| 59 |
+
if hasattr(self.activation_post_process, 'ch_axis') else -1
|
| 60 |
+
self.register_buffer('fake_quant_enabled', torch.tensor([1], dtype=torch.uint8))
|
| 61 |
+
self.register_buffer('static_enabled', torch.tensor([1], dtype=torch.uint8))
|
| 62 |
+
self.register_buffer('learning_enabled', torch.tensor([0], dtype=torch.uint8))
|
| 63 |
+
|
| 64 |
+
bitrange = torch.tensor(quant_max - quant_min + 1).double()
|
| 65 |
+
self.bitwidth = int(torch.log2(bitrange).item())
|
| 66 |
+
self.register_buffer('eps', torch.tensor([torch.finfo(torch.float32).eps]))
|
| 67 |
+
|
| 68 |
+
@torch.jit.export
|
| 69 |
+
def enable_param_learning(self):
|
| 70 |
+
r"""Enable parameter learning over static observer estimates.
|
| 71 |
+
|
| 72 |
+
Enables learning of quantization parameters and
|
| 73 |
+
disables static observer estimates. Forward path returns fake quantized X.
|
| 74 |
+
"""
|
| 75 |
+
self.toggle_qparam_learning(enabled=True) \
|
| 76 |
+
.toggle_fake_quant(enabled=True) \
|
| 77 |
+
.toggle_observer_update(enabled=False)
|
| 78 |
+
return self
|
| 79 |
+
|
| 80 |
+
@torch.jit.export
|
| 81 |
+
def enable_static_estimate(self):
|
| 82 |
+
"""Enable static estimates of quantization parameters.
|
| 83 |
+
|
| 84 |
+
Enables static observer estimates and disables learning of
|
| 85 |
+
quantization parameters. Forward path returns fake quantized X.
|
| 86 |
+
"""
|
| 87 |
+
self.toggle_qparam_learning(enabled=False) \
|
| 88 |
+
.toggle_fake_quant(enabled=True) \
|
| 89 |
+
.toggle_observer_update(enabled=True)
|
| 90 |
+
|
| 91 |
+
@torch.jit.export
|
| 92 |
+
def enable_static_observation(self):
|
| 93 |
+
"""Enable accumulation of data without updating quantization parameters.
|
| 94 |
+
|
| 95 |
+
Enables static observer accumulating data from input but doesn't
|
| 96 |
+
update the quantization parameters. Forward path returns the original X.
|
| 97 |
+
"""
|
| 98 |
+
self.toggle_qparam_learning(enabled=False) \
|
| 99 |
+
.toggle_fake_quant(enabled=False) \
|
| 100 |
+
.toggle_observer_update(enabled=True)
|
| 101 |
+
|
| 102 |
+
@torch.jit.export
|
| 103 |
+
def toggle_observer_update(self, enabled=True):
|
| 104 |
+
self.static_enabled[0] = int(enabled) # type: ignore[operator]
|
| 105 |
+
return self
|
| 106 |
+
|
| 107 |
+
@torch.jit.export
|
| 108 |
+
def enable_observer(self, enabled=True):
|
| 109 |
+
self.toggle_observer_update(enabled)
|
| 110 |
+
|
| 111 |
+
@torch.jit.export
|
| 112 |
+
def toggle_qparam_learning(self, enabled=True):
|
| 113 |
+
self.learning_enabled[0] = int(enabled) # type: ignore[operator]
|
| 114 |
+
self.scale.requires_grad = enabled
|
| 115 |
+
self.zero_point.requires_grad = enabled
|
| 116 |
+
return self
|
| 117 |
+
|
| 118 |
+
@torch.jit.export
|
| 119 |
+
def toggle_fake_quant(self, enabled=True):
|
| 120 |
+
self.fake_quant_enabled[0] = int(enabled)
|
| 121 |
+
return self
|
| 122 |
+
|
| 123 |
+
@torch.jit.export
|
| 124 |
+
def observe_quant_params(self):
|
| 125 |
+
print(f'_LearnableFakeQuantize Scale: {self.scale.detach()}')
|
| 126 |
+
print(f'_LearnableFakeQuantize Zero Point: {self.zero_point.detach()}')
|
| 127 |
+
|
| 128 |
+
@torch.jit.export
|
| 129 |
+
def calculate_qparams(self):
|
| 130 |
+
self.scale.data.clamp_(min=self.eps.item()) # type: ignore[operator]
|
| 131 |
+
scale = self.scale.detach()
|
| 132 |
+
zero_point = self.zero_point.detach().round().clamp(self.quant_min, self.quant_max).long()
|
| 133 |
+
return scale, zero_point
|
| 134 |
+
|
| 135 |
+
def forward(self, X):
|
| 136 |
+
if self.static_enabled[0] == 1: # type: ignore[index]
|
| 137 |
+
self.activation_post_process(X.detach())
|
| 138 |
+
_scale, _zero_point = self.activation_post_process.calculate_qparams()
|
| 139 |
+
_scale = _scale.to(self.scale.device)
|
| 140 |
+
_zero_point = _zero_point.to(self.zero_point.device)
|
| 141 |
+
self.scale.data.copy_(_scale)
|
| 142 |
+
self.zero_point.data.copy_(_zero_point)
|
| 143 |
+
else:
|
| 144 |
+
self.scale.data.clamp_(min=self.eps.item()) # type: ignore[operator]
|
| 145 |
+
|
| 146 |
+
if self.fake_quant_enabled[0] == 1:
|
| 147 |
+
if self.qscheme in (torch.per_channel_symmetric, torch.per_tensor_symmetric):
|
| 148 |
+
self.zero_point.data.zero_()
|
| 149 |
+
|
| 150 |
+
if self.use_grad_scaling:
|
| 151 |
+
grad_factor = 1.0 / (X.numel() * self.quant_max) ** 0.5
|
| 152 |
+
else:
|
| 153 |
+
grad_factor = 1.0
|
| 154 |
+
if self.qscheme in (
|
| 155 |
+
torch.per_channel_symmetric, torch.per_channel_affine):
|
| 156 |
+
X = torch._fake_quantize_learnable_per_channel_affine(
|
| 157 |
+
X, self.scale, self.zero_point, self.ch_axis,
|
| 158 |
+
self.quant_min, self.quant_max, grad_factor)
|
| 159 |
+
else:
|
| 160 |
+
X = torch._fake_quantize_learnable_per_tensor_affine(
|
| 161 |
+
X, self.scale, self.zero_point,
|
| 162 |
+
self.quant_min, self.quant_max, grad_factor)
|
| 163 |
+
|
| 164 |
+
return X
|
parrot/lib/python3.10/site-packages/torch/ao/quantization/fake_quantize.py
ADDED
|
@@ -0,0 +1,536 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
"""Implements modules used to perform fake quantization."""
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch.nn import Module
|
| 6 |
+
from torch.ao.quantization.observer import (
|
| 7 |
+
MovingAverageMinMaxObserver,
|
| 8 |
+
HistogramObserver,
|
| 9 |
+
MovingAveragePerChannelMinMaxObserver,
|
| 10 |
+
FixedQParamsObserver,
|
| 11 |
+
default_fixed_qparams_range_0to1_observer,
|
| 12 |
+
default_fixed_qparams_range_neg1to1_observer,
|
| 13 |
+
_with_args,
|
| 14 |
+
)
|
| 15 |
+
import re
|
| 16 |
+
from abc import ABC, abstractmethod
|
| 17 |
+
from typing import Any, Tuple
|
| 18 |
+
|
| 19 |
+
__all__ = [
|
| 20 |
+
"FakeQuantizeBase",
|
| 21 |
+
"FakeQuantize",
|
| 22 |
+
"FixedQParamsFakeQuantize",
|
| 23 |
+
"FusedMovingAvgObsFakeQuantize",
|
| 24 |
+
"disable_fake_quant",
|
| 25 |
+
"disable_observer",
|
| 26 |
+
"enable_fake_quant",
|
| 27 |
+
"enable_observer",
|
| 28 |
+
"default_fake_quant",
|
| 29 |
+
"default_weight_fake_quant",
|
| 30 |
+
"default_dynamic_fake_quant",
|
| 31 |
+
"default_fixed_qparams_range_neg1to1_fake_quant",
|
| 32 |
+
"default_fixed_qparams_range_0to1_fake_quant",
|
| 33 |
+
"default_symmetric_fixed_qparams_fake_quant",
|
| 34 |
+
"default_affine_fixed_qparams_fake_quant",
|
| 35 |
+
"default_per_channel_weight_fake_quant",
|
| 36 |
+
"default_embedding_fake_quant",
|
| 37 |
+
"default_embedding_fake_quant_4bit",
|
| 38 |
+
"default_histogram_fake_quant",
|
| 39 |
+
"default_fused_act_fake_quant",
|
| 40 |
+
"default_fused_wt_fake_quant",
|
| 41 |
+
"default_fused_per_channel_wt_fake_quant",
|
| 42 |
+
"fused_wt_fake_quant_range_neg_127_to_127",
|
| 43 |
+
"fused_per_channel_wt_fake_quant_range_neg_127_to_127",
|
| 44 |
+
]
|
| 45 |
+
|
| 46 |
+
def _is_per_channel(qscheme: 'torch.qscheme') -> bool:
|
| 47 |
+
return qscheme in [torch.per_channel_symmetric, torch.per_channel_affine, torch.per_channel_affine_float_qparams]
|
| 48 |
+
|
| 49 |
+
def _is_per_tensor(qscheme: 'torch.qscheme') -> bool:
|
| 50 |
+
return qscheme in [torch.per_tensor_symmetric, torch.per_tensor_affine]
|
| 51 |
+
|
| 52 |
+
def _is_symmetric_quant(qscheme: 'torch.qscheme') -> bool:
|
| 53 |
+
return qscheme in [torch.per_tensor_symmetric, torch.per_channel_symmetric]
|
| 54 |
+
|
| 55 |
+
def _is_float_qparams(qscheme: 'torch.qscheme') -> bool:
|
| 56 |
+
return qscheme in [torch.per_channel_affine_float_qparams, ]
|
| 57 |
+
|
| 58 |
+
class FakeQuantizeBase(ABC, Module):
|
| 59 |
+
r"""Base fake quantize module.
|
| 60 |
+
|
| 61 |
+
Base fake quantize module
|
| 62 |
+
Any fake quantize implementation should derive from this class.
|
| 63 |
+
|
| 64 |
+
Concrete fake quantize module should follow the same API. In forward, they will update
|
| 65 |
+
the statistics of the observed Tensor and fake quantize the input. They should also provide a
|
| 66 |
+
`calculate_qparams` function that computes the quantization parameters given
|
| 67 |
+
the collected statistics.
|
| 68 |
+
|
| 69 |
+
"""
|
| 70 |
+
|
| 71 |
+
fake_quant_enabled: torch.Tensor
|
| 72 |
+
observer_enabled: torch.Tensor
|
| 73 |
+
|
| 74 |
+
def __init__(self):
|
| 75 |
+
"""Set fake_quant_enabled and observer_enabled."""
|
| 76 |
+
super().__init__()
|
| 77 |
+
# fake_quant_enabled and observer_enabled are buffers to support their
|
| 78 |
+
# replication in DDP. Data type is uint8 because NCCL does not support
|
| 79 |
+
# bool tensors.
|
| 80 |
+
self.register_buffer('fake_quant_enabled', torch.tensor([1], dtype=torch.uint8))
|
| 81 |
+
self.register_buffer('observer_enabled', torch.tensor([1], dtype=torch.uint8))
|
| 82 |
+
|
| 83 |
+
@abstractmethod
|
| 84 |
+
def forward(self, x):
|
| 85 |
+
pass
|
| 86 |
+
|
| 87 |
+
@abstractmethod
|
| 88 |
+
def calculate_qparams(self, **kwargs):
|
| 89 |
+
pass
|
| 90 |
+
|
| 91 |
+
@torch.jit.export
|
| 92 |
+
def enable_fake_quant(self, enabled: bool = True) -> None:
|
| 93 |
+
self.fake_quant_enabled[0] = 1 if enabled else 0
|
| 94 |
+
|
| 95 |
+
@torch.jit.export
|
| 96 |
+
def disable_fake_quant(self):
|
| 97 |
+
self.enable_fake_quant(False)
|
| 98 |
+
|
| 99 |
+
@torch.jit.export
|
| 100 |
+
def enable_observer(self, enabled: bool = True) -> None:
|
| 101 |
+
self.observer_enabled[0] = 1 if enabled else 0
|
| 102 |
+
|
| 103 |
+
@torch.jit.export
|
| 104 |
+
def disable_observer(self):
|
| 105 |
+
self.enable_observer(False)
|
| 106 |
+
|
| 107 |
+
@classmethod
|
| 108 |
+
def with_args(cls, **kwargs):
|
| 109 |
+
fake_quant_constructor = _with_args(cls, **kwargs)
|
| 110 |
+
# need to assign the correct module to fake_quantize
|
| 111 |
+
# constructors to satisfy public v private requirements
|
| 112 |
+
fake_quant_constructor.__module__ = "torch.ao.quantization.fake_quantize"
|
| 113 |
+
return fake_quant_constructor
|
| 114 |
+
|
| 115 |
+
class FakeQuantize(FakeQuantizeBase):
|
| 116 |
+
r"""Simulate the quantize and dequantize operations in training time.
|
| 117 |
+
|
| 118 |
+
The output of this module is given by::
|
| 119 |
+
|
| 120 |
+
x_out = (
|
| 121 |
+
clamp(round(x/scale + zero_point), quant_min, quant_max) - zero_point
|
| 122 |
+
) * scale
|
| 123 |
+
|
| 124 |
+
* :attr:`is_dynamic` indicates whether the fake quantie is a placeholder for dynamic quantization
|
| 125 |
+
operators (choose_qparams -> q -> dq) or static quantization operators (q -> dq)
|
| 126 |
+
|
| 127 |
+
* :attr:`scale` defines the scale factor used for quantization.
|
| 128 |
+
|
| 129 |
+
* :attr:`zero_point` specifies the quantized value to which 0 in floating point maps to
|
| 130 |
+
|
| 131 |
+
* :attr:`fake_quant_enabled` controls the application of fake quantization on tensors, note that
|
| 132 |
+
statistics can still be updated.
|
| 133 |
+
|
| 134 |
+
* :attr:`observer_enabled` controls statistics collection on tensors
|
| 135 |
+
|
| 136 |
+
* :attr:`dtype` specifies the quantized dtype that is being emulated with fake-quantization,
|
| 137 |
+
allowable values are torch.qint8 and torch.quint8.
|
| 138 |
+
|
| 139 |
+
Args:
|
| 140 |
+
|
| 141 |
+
observer (module): Module for observing statistics on input tensors and calculating scale
|
| 142 |
+
and zero-point.
|
| 143 |
+
observer_kwargs (optional): Arguments for the observer module
|
| 144 |
+
|
| 145 |
+
Attributes:
|
| 146 |
+
activation_post_process (Module): User provided module that collects statistics on the input tensor and
|
| 147 |
+
provides a method to calculate scale and zero-point.
|
| 148 |
+
|
| 149 |
+
"""
|
| 150 |
+
|
| 151 |
+
scale: torch.Tensor
|
| 152 |
+
zero_point: torch.Tensor
|
| 153 |
+
|
| 154 |
+
def __init__(self, observer=MovingAverageMinMaxObserver, quant_min=None, quant_max=None, is_dynamic=False, **observer_kwargs):
|
| 155 |
+
super().__init__()
|
| 156 |
+
# Populate quant_min/quant_max to observer_kwargs if valid
|
| 157 |
+
if quant_min is not None and quant_max is not None:
|
| 158 |
+
assert quant_min <= quant_max, \
|
| 159 |
+
'quant_min must be less than or equal to quant_max'
|
| 160 |
+
dtype = observer_kwargs.get("dtype", torch.quint8)
|
| 161 |
+
if hasattr(observer, "p"):
|
| 162 |
+
# In case observer is _PartialWrapper, dtype can be stored in
|
| 163 |
+
# observer.p.keywords["dtype"]
|
| 164 |
+
dtype = getattr(getattr(observer, "p", {}), "keywords", {}).get(
|
| 165 |
+
"dtype", dtype
|
| 166 |
+
)
|
| 167 |
+
assert torch.iinfo(dtype).min <= quant_min, 'quant_min out of bound'
|
| 168 |
+
assert quant_max <= torch.iinfo(dtype).max, 'quant_max out of bound'
|
| 169 |
+
observer_kwargs.update({"quant_min": quant_min, "quant_max": quant_max})
|
| 170 |
+
observer_kwargs["is_dynamic"] = is_dynamic
|
| 171 |
+
self.activation_post_process = observer(**observer_kwargs)
|
| 172 |
+
# TODO: keeping self.quant_min/max for BC; remove after a couple releases
|
| 173 |
+
# Users should use self.activation_post_process.quant_min
|
| 174 |
+
self.quant_min = self.activation_post_process.quant_min
|
| 175 |
+
self.quant_max = self.activation_post_process.quant_max
|
| 176 |
+
self.is_dynamic = self.activation_post_process.is_dynamic
|
| 177 |
+
if _is_float_qparams(self.activation_post_process.qscheme):
|
| 178 |
+
zero_point_dtype = torch.float
|
| 179 |
+
else:
|
| 180 |
+
zero_point_dtype = torch.int
|
| 181 |
+
self.register_buffer('scale', torch.tensor([1.0], dtype=torch.float))
|
| 182 |
+
self.register_buffer('zero_point', torch.tensor([0], dtype=zero_point_dtype))
|
| 183 |
+
self.dtype = self.activation_post_process.dtype
|
| 184 |
+
self.qscheme = self.activation_post_process.qscheme
|
| 185 |
+
self.ch_axis = self.activation_post_process.ch_axis \
|
| 186 |
+
if hasattr(self.activation_post_process, 'ch_axis') else -1
|
| 187 |
+
assert _is_per_channel(self.qscheme) or \
|
| 188 |
+
_is_per_tensor(self.qscheme), \
|
| 189 |
+
'Only per channel and per tensor quantization are supported in fake quantize' + \
|
| 190 |
+
' got qscheme: ' + str(self.qscheme)
|
| 191 |
+
self.is_per_channel = _is_per_channel(self.qscheme)
|
| 192 |
+
|
| 193 |
+
@torch.jit.export
|
| 194 |
+
def calculate_qparams(self):
|
| 195 |
+
return self.activation_post_process.calculate_qparams()
|
| 196 |
+
|
| 197 |
+
def forward(self, X):
|
| 198 |
+
if self.observer_enabled[0] == 1:
|
| 199 |
+
self.activation_post_process(X.detach())
|
| 200 |
+
_scale, _zero_point = self.calculate_qparams()
|
| 201 |
+
_scale, _zero_point = _scale.to(self.scale.device), _zero_point.to(self.zero_point.device)
|
| 202 |
+
if self.scale.shape != _scale.shape:
|
| 203 |
+
self.scale.resize_(_scale.shape)
|
| 204 |
+
self.zero_point.resize_(_zero_point.shape)
|
| 205 |
+
self.scale.copy_(_scale)
|
| 206 |
+
self.zero_point.copy_(_zero_point)
|
| 207 |
+
|
| 208 |
+
if self.fake_quant_enabled[0] == 1:
|
| 209 |
+
if self.is_per_channel:
|
| 210 |
+
X = torch.fake_quantize_per_channel_affine(
|
| 211 |
+
X, self.scale, self.zero_point,
|
| 212 |
+
self.ch_axis, self.activation_post_process.quant_min, self.activation_post_process.quant_max)
|
| 213 |
+
else:
|
| 214 |
+
X = torch.fake_quantize_per_tensor_affine(
|
| 215 |
+
X, self.scale, self.zero_point,
|
| 216 |
+
self.activation_post_process.quant_min, self.activation_post_process.quant_max)
|
| 217 |
+
return X
|
| 218 |
+
|
| 219 |
+
@torch.jit.export
|
| 220 |
+
def extra_repr(self):
|
| 221 |
+
return f'fake_quant_enabled={self.fake_quant_enabled}, observer_enabled={self.observer_enabled}, ' \
|
| 222 |
+
f'quant_min={self.activation_post_process.quant_min}, quant_max={self.activation_post_process.quant_max}, ' \
|
| 223 |
+
f'dtype={self.dtype}, qscheme={self.qscheme}, ch_axis={self.ch_axis}, ' \
|
| 224 |
+
f'scale={self.scale}, zero_point={self.zero_point}'
|
| 225 |
+
|
| 226 |
+
def _save_to_state_dict(self, destination, prefix, keep_vars):
|
| 227 |
+
# We cannot currently register scalar values as buffers, so need to manually
|
| 228 |
+
# specify serialization here.
|
| 229 |
+
super()._save_to_state_dict(destination, prefix, keep_vars)
|
| 230 |
+
destination[prefix + 'scale'] = self.scale
|
| 231 |
+
destination[prefix + 'zero_point'] = self.zero_point
|
| 232 |
+
|
| 233 |
+
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
|
| 234 |
+
missing_keys, unexpected_keys, error_msgs):
|
| 235 |
+
# Removing this function throws an error that the size of the loaded tensor does not match the original size
|
| 236 |
+
# i.e., These buffers start out with numel 0 and become numel 1 once they have their first forward pass.
|
| 237 |
+
local_state = ['scale', 'zero_point']
|
| 238 |
+
for name in local_state:
|
| 239 |
+
key = prefix + name
|
| 240 |
+
if key in state_dict:
|
| 241 |
+
val = state_dict[key]
|
| 242 |
+
# Custom handling to allow loading scale and zero_point
|
| 243 |
+
# of size N into uninitialized buffers of size 0. The
|
| 244 |
+
# buffers are resized here, and the values are copied in
|
| 245 |
+
# the default state_dict loading code of the parent.
|
| 246 |
+
if name == 'scale':
|
| 247 |
+
self.scale.resize_(val.shape)
|
| 248 |
+
else:
|
| 249 |
+
assert name == 'zero_point'
|
| 250 |
+
self.zero_point.resize_(val.shape)
|
| 251 |
+
# For torchscript module we need to update the attributes here since we do not
|
| 252 |
+
# call the `_load_from_state_dict` function defined module.py
|
| 253 |
+
if torch.jit.is_scripting():
|
| 254 |
+
if name == 'scale':
|
| 255 |
+
self.scale.copy_(val)
|
| 256 |
+
else:
|
| 257 |
+
assert name == 'zero_point'
|
| 258 |
+
self.zero_point.copy_(val)
|
| 259 |
+
elif strict:
|
| 260 |
+
missing_keys.append(key)
|
| 261 |
+
super()._load_from_state_dict(state_dict, prefix, local_metadata, strict,
|
| 262 |
+
missing_keys, unexpected_keys, error_msgs)
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
class FixedQParamsFakeQuantize(FakeQuantize):
|
| 266 |
+
"""Simulate quantize and dequantize in training time.
|
| 267 |
+
|
| 268 |
+
Simulate quantize and dequantize with fixed quantization
|
| 269 |
+
parameters in training time. Only per tensor quantization
|
| 270 |
+
is supported.
|
| 271 |
+
"""
|
| 272 |
+
|
| 273 |
+
# TODO: rename observer to observer_ctr
|
| 274 |
+
def __init__(self, observer):
|
| 275 |
+
super().__init__(observer=observer)
|
| 276 |
+
assert type(self.activation_post_process) == FixedQParamsObserver, \
|
| 277 |
+
f"{self.__class__.__name__}'s observer must be a {FixedQParamsObserver.__name__}"
|
| 278 |
+
self._observer_ctr = observer
|
| 279 |
+
self.scale = self.activation_post_process.scale
|
| 280 |
+
self.zero_point = self.activation_post_process.zero_point
|
| 281 |
+
assert _is_per_tensor(self.qscheme), 'Only per tensor quantization is supported' + \
|
| 282 |
+
' FixedQParamsFakeQuantize module, got qscheme:' + str(self.qscheme)
|
| 283 |
+
|
| 284 |
+
@torch.jit.export
|
| 285 |
+
def calculate_qparams(self):
|
| 286 |
+
return self.scale, self.zero_point
|
| 287 |
+
|
| 288 |
+
@torch.jit.export
|
| 289 |
+
def extra_repr(self):
|
| 290 |
+
"""Define a string representation of the object's attributes."""
|
| 291 |
+
return f'fake_quant_enabled={self.fake_quant_enabled}, observer_enabled={self.observer_enabled}, ' \
|
| 292 |
+
f'scale={self.scale}, zero_point={self.zero_point}, ' \
|
| 293 |
+
f'dtype={self.dtype}, quant_min={self.activation_post_process.quant_min}, ' \
|
| 294 |
+
f'quant_max={self.activation_post_process.quant_max}, qscheme={self.qscheme}'
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
class FusedMovingAvgObsFakeQuantize(FakeQuantize):
|
| 298 |
+
r"""Define a fused module to observe the tensor.
|
| 299 |
+
|
| 300 |
+
Fused module that is used to observe the input tensor (compute min/max), compute
|
| 301 |
+
scale/zero_point and fake_quantize the tensor.
|
| 302 |
+
This module uses calculation similar MovingAverageMinMaxObserver for the inputs,
|
| 303 |
+
to compute the min/max values in order to compute the scale/zero_point.
|
| 304 |
+
The qscheme input in the observer is used to differentiate between symmetric/affine
|
| 305 |
+
quantization scheme.
|
| 306 |
+
|
| 307 |
+
The output of this module is given by
|
| 308 |
+
x_out = (clamp(round(x/scale + zero_point), quant_min, quant_max)-zero_point)*scale
|
| 309 |
+
|
| 310 |
+
Similar to :class:`~torch.ao.quantization.FakeQuantize`, and accepts the same attributes as the
|
| 311 |
+
base class.
|
| 312 |
+
|
| 313 |
+
"""
|
| 314 |
+
|
| 315 |
+
def __init__(
|
| 316 |
+
self,
|
| 317 |
+
observer: Any = MovingAverageMinMaxObserver,
|
| 318 |
+
quant_min: int = 0,
|
| 319 |
+
quant_max: int = 255,
|
| 320 |
+
**observer_kwargs: Any
|
| 321 |
+
) -> None:
|
| 322 |
+
super().__init__(observer, quant_min, quant_max, **observer_kwargs)
|
| 323 |
+
assert isinstance(self.activation_post_process, (MovingAverageMinMaxObserver, MovingAveragePerChannelMinMaxObserver)), \
|
| 324 |
+
"Fused observer+fake_quant module only works with MovingAverageMinMaxObserver"
|
| 325 |
+
self.register_buffer("fake_quant_enabled", torch.tensor([1], dtype=torch.long))
|
| 326 |
+
self.register_buffer("observer_enabled", torch.tensor([1], dtype=torch.long))
|
| 327 |
+
self.is_symmetric_quant = _is_symmetric_quant(self.activation_post_process.qscheme)
|
| 328 |
+
|
| 329 |
+
@torch.jit.export
|
| 330 |
+
def calculate_qparams(self) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 331 |
+
return self.activation_post_process.calculate_qparams()
|
| 332 |
+
|
| 333 |
+
@torch.jit.export
|
| 334 |
+
def extra_repr(self) -> str:
|
| 335 |
+
return (
|
| 336 |
+
f"fake_quant_enabled={self.fake_quant_enabled}, observer_enabled={self.observer_enabled}, "
|
| 337 |
+
f"scale={self.scale}, zero_point={self.zero_point}, dtype={self.dtype}, "
|
| 338 |
+
f"quant_min={self.activation_post_process.quant_min}, quant_max={self.activation_post_process.quant_max}, "
|
| 339 |
+
f"qscheme={self.qscheme}, reduce_range={self.activation_post_process.reduce_range}"
|
| 340 |
+
)
|
| 341 |
+
|
| 342 |
+
def forward(self, X: torch.Tensor) -> torch.Tensor:
|
| 343 |
+
return torch.fused_moving_avg_obs_fake_quant(
|
| 344 |
+
X,
|
| 345 |
+
self.observer_enabled,
|
| 346 |
+
self.fake_quant_enabled,
|
| 347 |
+
self.activation_post_process.min_val,
|
| 348 |
+
self.activation_post_process.max_val,
|
| 349 |
+
self.scale,
|
| 350 |
+
self.zero_point,
|
| 351 |
+
self.activation_post_process.averaging_constant,
|
| 352 |
+
self.activation_post_process.quant_min,
|
| 353 |
+
self.activation_post_process.quant_max,
|
| 354 |
+
self.ch_axis,
|
| 355 |
+
self.is_per_channel,
|
| 356 |
+
self.is_symmetric_quant,
|
| 357 |
+
)
|
| 358 |
+
|
| 359 |
+
default_fake_quant = FakeQuantize.with_args(observer=MovingAverageMinMaxObserver, quant_min=0, quant_max=255,
|
| 360 |
+
dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=True)
|
| 361 |
+
"""
|
| 362 |
+
Default fake_quant for activations.
|
| 363 |
+
"""
|
| 364 |
+
|
| 365 |
+
default_weight_fake_quant = FakeQuantize.with_args(observer=MovingAverageMinMaxObserver, quant_min=-128, quant_max=127,
|
| 366 |
+
dtype=torch.qint8, qscheme=torch.per_tensor_symmetric, reduce_range=False)
|
| 367 |
+
"""
|
| 368 |
+
Default fake_quant for weights.
|
| 369 |
+
Observer is memoryless since averaging_constant is 1.
|
| 370 |
+
"""
|
| 371 |
+
|
| 372 |
+
default_dynamic_fake_quant = FakeQuantize.with_args(
|
| 373 |
+
observer=MovingAverageMinMaxObserver, quant_min=0, quant_max=255, is_dynamic=True,
|
| 374 |
+
dtype=torch.quint8, averaging_constant=1)
|
| 375 |
+
"""
|
| 376 |
+
Default dynamic fake_quant for activations.
|
| 377 |
+
"""
|
| 378 |
+
|
| 379 |
+
default_fixed_qparams_range_neg1to1_fake_quant = (
|
| 380 |
+
FixedQParamsFakeQuantize.with_args(observer=default_fixed_qparams_range_neg1to1_observer)
|
| 381 |
+
)
|
| 382 |
+
default_fixed_qparams_range_0to1_fake_quant = (
|
| 383 |
+
FixedQParamsFakeQuantize.with_args(observer=default_fixed_qparams_range_0to1_observer)
|
| 384 |
+
)
|
| 385 |
+
# TODO: the following 2 variables are kept for backwards compatibility; remove after a few releases
|
| 386 |
+
default_symmetric_fixed_qparams_fake_quant = default_fixed_qparams_range_neg1to1_fake_quant
|
| 387 |
+
default_affine_fixed_qparams_fake_quant = default_fixed_qparams_range_0to1_fake_quant
|
| 388 |
+
|
| 389 |
+
default_per_channel_weight_fake_quant = FakeQuantize.with_args(observer=MovingAveragePerChannelMinMaxObserver,
|
| 390 |
+
quant_min=-128,
|
| 391 |
+
quant_max=127,
|
| 392 |
+
dtype=torch.qint8,
|
| 393 |
+
qscheme=torch.per_channel_symmetric,
|
| 394 |
+
reduce_range=False,
|
| 395 |
+
ch_axis=0)
|
| 396 |
+
"""
|
| 397 |
+
Default fake_quant for per-channel weights.
|
| 398 |
+
Observer is memoryless since averaging_constant is 1.
|
| 399 |
+
"""
|
| 400 |
+
default_embedding_fake_quant = FakeQuantize.with_args(observer=MovingAveragePerChannelMinMaxObserver,
|
| 401 |
+
qscheme=torch.per_channel_affine_float_qparams,
|
| 402 |
+
dtype=torch.quint8,
|
| 403 |
+
quant_min=0,
|
| 404 |
+
quant_max=255,
|
| 405 |
+
ch_axis=0,
|
| 406 |
+
averaging_constant=1)
|
| 407 |
+
"""
|
| 408 |
+
Default fake_quant for embeddings.
|
| 409 |
+
Observer is memoryless since averaging_constant is 1.
|
| 410 |
+
"""
|
| 411 |
+
|
| 412 |
+
default_embedding_fake_quant_4bit = FakeQuantize.with_args(observer=MovingAveragePerChannelMinMaxObserver,
|
| 413 |
+
qscheme=torch.per_channel_affine_float_qparams,
|
| 414 |
+
ch_axis=0,
|
| 415 |
+
dtype=torch.quint4x2,
|
| 416 |
+
averaging_constant=1)
|
| 417 |
+
|
| 418 |
+
default_histogram_fake_quant = FakeQuantize.with_args(observer=HistogramObserver,
|
| 419 |
+
quant_min=0,
|
| 420 |
+
quant_max=255,
|
| 421 |
+
dtype=torch.quint8,
|
| 422 |
+
qscheme=torch.per_tensor_affine,
|
| 423 |
+
reduce_range=True)
|
| 424 |
+
"""
|
| 425 |
+
Fake_quant for activations using a histogram..
|
| 426 |
+
"""
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
default_fused_act_fake_quant = FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
|
| 430 |
+
quant_min=0,
|
| 431 |
+
quant_max=255,
|
| 432 |
+
dtype=torch.quint8,)
|
| 433 |
+
|
| 434 |
+
"""
|
| 435 |
+
Fused version of `default_fake_quant`, with improved performance.
|
| 436 |
+
"""
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
default_fused_wt_fake_quant = FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
|
| 440 |
+
quant_min=-128,
|
| 441 |
+
quant_max=127,
|
| 442 |
+
dtype=torch.qint8,
|
| 443 |
+
qscheme=torch.per_tensor_symmetric)
|
| 444 |
+
"""
|
| 445 |
+
Fused version of `default_weight_fake_quant`, with improved performance.
|
| 446 |
+
"""
|
| 447 |
+
|
| 448 |
+
default_fused_per_channel_wt_fake_quant = FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAveragePerChannelMinMaxObserver,
|
| 449 |
+
quant_min=-128,
|
| 450 |
+
quant_max=127,
|
| 451 |
+
dtype=torch.qint8,
|
| 452 |
+
qscheme=torch.per_channel_symmetric)
|
| 453 |
+
"""
|
| 454 |
+
Fused version of `default_per_channel_weight_fake_quant`, with improved performance.
|
| 455 |
+
"""
|
| 456 |
+
|
| 457 |
+
fused_wt_fake_quant_range_neg_127_to_127 = FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
|
| 458 |
+
quant_min=-127,
|
| 459 |
+
quant_max=127,
|
| 460 |
+
dtype=torch.qint8,
|
| 461 |
+
qscheme=torch.per_tensor_symmetric,
|
| 462 |
+
eps=2 ** -12)
|
| 463 |
+
"""
|
| 464 |
+
Fused version of `default_weight_fake_quant`, with the 8-bit values restricted to [-127, +127], excluding -128.
|
| 465 |
+
"""
|
| 466 |
+
|
| 467 |
+
fused_per_channel_wt_fake_quant_range_neg_127_to_127 = \
|
| 468 |
+
FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAveragePerChannelMinMaxObserver,
|
| 469 |
+
quant_min=-127,
|
| 470 |
+
quant_max=127,
|
| 471 |
+
dtype=torch.qint8,
|
| 472 |
+
qscheme=torch.per_channel_symmetric,
|
| 473 |
+
eps=2 ** -12)
|
| 474 |
+
|
| 475 |
+
"""
|
| 476 |
+
Fused version of `default_per_channel_weight_fake_quant`, with the 8-bit values restricted to [-127, +127], excluding -128.
|
| 477 |
+
"""
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
def _is_fake_quant_script_module(mod):
|
| 481 |
+
"""Return true if given mod is an instance of FakeQuantize script module."""
|
| 482 |
+
if isinstance(mod, torch.jit.RecursiveScriptModule):
|
| 483 |
+
# qualified name looks like '__torch__.torch.ao.quantization.fake_quantize.___torch_mangle_2.FakeQuantize'
|
| 484 |
+
suffix = mod._c.qualified_name.split('.', 1)[1]
|
| 485 |
+
name = re.sub(r'\.___torch_mangle_\d+', '', suffix)
|
| 486 |
+
return name == 'torch.ao.quantization.fake_quantize.FakeQuantize' or \
|
| 487 |
+
name == 'torch.ao.quantization.fake_quantize.FusedMovingAvgObsFakeQuantize'
|
| 488 |
+
return False
|
| 489 |
+
|
| 490 |
+
def disable_fake_quant(mod):
|
| 491 |
+
"""Disable fake quantization for the module.
|
| 492 |
+
|
| 493 |
+
Disable fake quantization for this module, if applicable. Example usage::
|
| 494 |
+
|
| 495 |
+
# model is any PyTorch model
|
| 496 |
+
model.apply(torch.ao.quantization.disable_fake_quant)
|
| 497 |
+
|
| 498 |
+
"""
|
| 499 |
+
if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod):
|
| 500 |
+
mod.disable_fake_quant()
|
| 501 |
+
|
| 502 |
+
def enable_fake_quant(mod):
|
| 503 |
+
"""Enable fake quantization for the module.
|
| 504 |
+
|
| 505 |
+
Enable fake quantization for this module, if applicable. Example usage::
|
| 506 |
+
|
| 507 |
+
# model is any PyTorch model
|
| 508 |
+
model.apply(torch.ao.quantization.enable_fake_quant)
|
| 509 |
+
|
| 510 |
+
"""
|
| 511 |
+
if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod):
|
| 512 |
+
mod.enable_fake_quant()
|
| 513 |
+
|
| 514 |
+
def disable_observer(mod):
|
| 515 |
+
"""Disable observation for this module.
|
| 516 |
+
|
| 517 |
+
Disable observation for this module, if applicable. Example usage::
|
| 518 |
+
|
| 519 |
+
# model is any PyTorch model
|
| 520 |
+
model.apply(torch.ao.quantization.disable_observer)
|
| 521 |
+
|
| 522 |
+
"""
|
| 523 |
+
if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod):
|
| 524 |
+
mod.disable_observer()
|
| 525 |
+
|
| 526 |
+
def enable_observer(mod):
|
| 527 |
+
"""Enable observation for this module.
|
| 528 |
+
|
| 529 |
+
Enable observation for this module, if applicable. Example usage::
|
| 530 |
+
|
| 531 |
+
# model is any PyTorch model
|
| 532 |
+
model.apply(torch.ao.quantization.enable_observer)
|
| 533 |
+
|
| 534 |
+
"""
|
| 535 |
+
if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod):
|
| 536 |
+
mod.enable_observer()
|
parrot/lib/python3.10/site-packages/torch/ao/quantization/fuser_method_mappings.py
ADDED
|
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.ao.nn.intrinsic as nni
|
| 4 |
+
|
| 5 |
+
from typing import Any, Union, Callable, List, Tuple, Dict, Optional, Type
|
| 6 |
+
from torch.ao.quantization.utils import Pattern, get_combined_dict, MatchAllNode
|
| 7 |
+
import itertools
|
| 8 |
+
|
| 9 |
+
__all__ = [
|
| 10 |
+
"fuse_conv_bn",
|
| 11 |
+
"fuse_conv_bn_relu",
|
| 12 |
+
"fuse_linear_bn",
|
| 13 |
+
"fuse_convtranspose_bn",
|
| 14 |
+
"get_fuser_method",
|
| 15 |
+
"get_fuser_method_new",
|
| 16 |
+
]
|
| 17 |
+
|
| 18 |
+
def fuse_conv_bn(is_qat, conv, bn):
|
| 19 |
+
r"""Return the fused the conv and bn modules.
|
| 20 |
+
Given the conv and bn modules, fuses them and returns the fused module
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
is_qat: a flag for whether we are using quantization aware training fusion
|
| 24 |
+
or post training quantization fusion
|
| 25 |
+
conv: Module instance of type conv2d/conv3d
|
| 26 |
+
bn: Spatial BN instance that needs to be fused with the conv
|
| 27 |
+
|
| 28 |
+
Examples::
|
| 29 |
+
|
| 30 |
+
>>> m1 = nn.Conv2d(10, 20, 3)
|
| 31 |
+
>>> b1 = nn.BatchNorm2d(20)
|
| 32 |
+
>>> # xdoctest: +SKIP
|
| 33 |
+
>>> m2 = fuse_conv_bn(m1, b1)
|
| 34 |
+
"""
|
| 35 |
+
assert conv.training == bn.training, \
|
| 36 |
+
"Conv and BN both must be in the same mode (train or eval)."
|
| 37 |
+
|
| 38 |
+
fused_module_class_map = {
|
| 39 |
+
nn.Conv1d: nni.ConvBn1d,
|
| 40 |
+
nn.Conv2d: nni.ConvBn2d,
|
| 41 |
+
nn.Conv3d: nni.ConvBn3d,
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
if is_qat:
|
| 45 |
+
assert bn.num_features == conv.out_channels, 'Output channel of Conv2d must match num_features of BatchNorm2d'
|
| 46 |
+
assert bn.affine, 'Only support fusing BatchNorm2d with affine set to True'
|
| 47 |
+
assert bn.track_running_stats, 'Only support fusing BatchNorm2d with tracking_running_stats set to True'
|
| 48 |
+
fused_module_class = fused_module_class_map.get((type(conv)), None)
|
| 49 |
+
if fused_module_class is not None:
|
| 50 |
+
return fused_module_class(conv, bn)
|
| 51 |
+
else:
|
| 52 |
+
raise NotImplementedError(f"Cannot fuse train modules: {(conv, bn)}")
|
| 53 |
+
else:
|
| 54 |
+
return nn.utils.fuse_conv_bn_eval(conv, bn)
|
| 55 |
+
|
| 56 |
+
def fuse_conv_bn_relu(is_qat, conv, bn, relu):
|
| 57 |
+
r"""Return the fused conv and bv modules.
|
| 58 |
+
|
| 59 |
+
Given the conv and bn modules, fuses them and returns the fused module
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
is_qat: a flag for whether we are using quantization aware training fusion
|
| 63 |
+
or post training quantization fusion
|
| 64 |
+
conv: Module instance of type conv2d/conv3d
|
| 65 |
+
bn: Spatial BN instance that needs to be fused with the conv
|
| 66 |
+
|
| 67 |
+
Examples::
|
| 68 |
+
|
| 69 |
+
>>> m1 = nn.Conv2d(10, 20, 3)
|
| 70 |
+
>>> b1 = nn.BatchNorm2d(20)
|
| 71 |
+
>>> r1 = nn.ReLU(inplace=False)
|
| 72 |
+
>>> # xdoctest: +SKIP
|
| 73 |
+
>>> m2 = fuse_conv_bn_relu(m1, b1, r1)
|
| 74 |
+
"""
|
| 75 |
+
assert conv.training == bn.training == relu.training, \
|
| 76 |
+
"Conv and BN both must be in the same mode (train or eval)."
|
| 77 |
+
fused_module : Optional[Type[nn.Sequential]] = None
|
| 78 |
+
if is_qat:
|
| 79 |
+
map_to_fused_module_train = {
|
| 80 |
+
nn.Conv1d: nni.ConvBnReLU1d,
|
| 81 |
+
nn.Conv2d: nni.ConvBnReLU2d,
|
| 82 |
+
nn.Conv3d: nni.ConvBnReLU3d,
|
| 83 |
+
}
|
| 84 |
+
assert bn.num_features == conv.out_channels, 'Output channel of Conv must match num_features of BatchNorm'
|
| 85 |
+
assert bn.affine, 'Only support fusing BatchNorm with affine set to True'
|
| 86 |
+
assert bn.track_running_stats, 'Only support fusing BatchNorm with tracking_running_stats set to True'
|
| 87 |
+
fused_module = map_to_fused_module_train.get(type(conv), None)
|
| 88 |
+
if fused_module is not None:
|
| 89 |
+
return fused_module(conv, bn, relu)
|
| 90 |
+
else:
|
| 91 |
+
raise NotImplementedError(f"Cannot fuse train modules: {(conv, bn, relu)}")
|
| 92 |
+
else:
|
| 93 |
+
map_to_fused_module_eval = {
|
| 94 |
+
nn.Conv1d: nni.ConvReLU1d,
|
| 95 |
+
nn.Conv2d: nni.ConvReLU2d,
|
| 96 |
+
nn.Conv3d: nni.ConvReLU3d,
|
| 97 |
+
}
|
| 98 |
+
fused_module = map_to_fused_module_eval.get(type(conv), None)
|
| 99 |
+
if fused_module is not None:
|
| 100 |
+
fused_conv = nn.utils.fusion.fuse_conv_bn_eval(conv, bn)
|
| 101 |
+
return fused_module(fused_conv, relu)
|
| 102 |
+
else:
|
| 103 |
+
raise NotImplementedError(f"Cannot fuse eval modules: {(conv, bn, relu)}")
|
| 104 |
+
|
| 105 |
+
def fuse_linear_bn(is_qat, linear, bn):
|
| 106 |
+
r"""Return the fused linear and bn modules.
|
| 107 |
+
Given the linear and bn modules, fuses them and returns the fused module
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
is_qat: a flag for whether we are using quantization aware training fusion
|
| 111 |
+
or post training quantization fusion
|
| 112 |
+
linear: Module instance of type Linear
|
| 113 |
+
bn: BatchNorm1d instance that needs to be fused with the linear layer
|
| 114 |
+
|
| 115 |
+
Examples::
|
| 116 |
+
|
| 117 |
+
>>> m1 = nn.Linear(20, 10)
|
| 118 |
+
>>> b1 = nn.BatchNorm1d(10)
|
| 119 |
+
>>> # xdoctest: +SKIP
|
| 120 |
+
>>> m2 = fuse_linear_bn(m1, b1)
|
| 121 |
+
"""
|
| 122 |
+
assert linear.training == bn.training, \
|
| 123 |
+
"Linear and BN both must be in the same mode (train or eval)."
|
| 124 |
+
|
| 125 |
+
if is_qat:
|
| 126 |
+
assert bn.num_features == linear.out_features, \
|
| 127 |
+
"Output features of Linear must match num_features of BatchNorm1d"
|
| 128 |
+
assert bn.affine, "Only support fusing BatchNorm1d with affine set to True"
|
| 129 |
+
assert bn.track_running_stats, \
|
| 130 |
+
"Only support fusing BatchNorm1d with tracking_running_stats set to True"
|
| 131 |
+
return nni.LinearBn1d(linear, bn)
|
| 132 |
+
else:
|
| 133 |
+
return nn.utils.fusion.fuse_linear_bn_eval(linear, bn)
|
| 134 |
+
|
| 135 |
+
def fuse_convtranspose_bn(is_qat, convt, bn):
|
| 136 |
+
r"""Return the fused ConvTranspose and bn modules.
|
| 137 |
+
Given ConvTranspose and bn modules, fuses them and returns the fused module
|
| 138 |
+
|
| 139 |
+
Args:
|
| 140 |
+
convt: Module instance of type ConvTransposeNd
|
| 141 |
+
bn: BatchNormNd instance that needs to be fused with the linear layer.
|
| 142 |
+
batch norm N should match the ConvTranspose N
|
| 143 |
+
|
| 144 |
+
Examples::
|
| 145 |
+
|
| 146 |
+
>>> m1 = nn.ConvTranspose2d(10, 20, 3)
|
| 147 |
+
>>> b1 = nn.BatchNorm2d(20)
|
| 148 |
+
>>> # xdoctest: +SKIP
|
| 149 |
+
>>> m2 = fuse_convtranspose_bn(m1, b1)
|
| 150 |
+
"""
|
| 151 |
+
assert convt.training == bn.training, \
|
| 152 |
+
"ConvTranspose and BN both must be in the same mode (train or eval)."
|
| 153 |
+
|
| 154 |
+
if is_qat:
|
| 155 |
+
raise Exception("Fusing ConvTranspose+BatchNorm not yet supported in QAT.") # noqa: TRY002
|
| 156 |
+
else:
|
| 157 |
+
return nn.utils.fusion.fuse_conv_bn_eval(convt, bn, transpose=True)
|
| 158 |
+
|
| 159 |
+
def _sequential_wrapper2(sequential):
|
| 160 |
+
"""Return a sequential wrapped that for is_qat and two modules.
|
| 161 |
+
Given a sequential class for two modules, return a function that takes
|
| 162 |
+
is_qat, and then two modules as argument, that ignores the is_qat flag
|
| 163 |
+
and always returns the sequential that combines the two input modules
|
| 164 |
+
"""
|
| 165 |
+
def fuser_method(is_qat, m1, m2):
|
| 166 |
+
return sequential(m1, m2)
|
| 167 |
+
return fuser_method
|
| 168 |
+
|
| 169 |
+
_DEFAULT_OP_LIST_TO_FUSER_METHOD: Dict[Tuple, Union[nn.Sequential, Callable]] = {
|
| 170 |
+
(nn.Conv1d, nn.BatchNorm1d): fuse_conv_bn,
|
| 171 |
+
(nn.Conv1d, nn.BatchNorm1d, nn.ReLU): fuse_conv_bn_relu,
|
| 172 |
+
(nn.Conv2d, nn.BatchNorm2d): fuse_conv_bn,
|
| 173 |
+
(nn.Conv2d, nn.BatchNorm2d, nn.ReLU): fuse_conv_bn_relu,
|
| 174 |
+
(nn.Conv3d, nn.BatchNorm3d): fuse_conv_bn,
|
| 175 |
+
(nn.Conv3d, nn.BatchNorm3d, nn.ReLU): fuse_conv_bn_relu,
|
| 176 |
+
(nn.Conv1d, nn.ReLU): _sequential_wrapper2(nni.ConvReLU1d),
|
| 177 |
+
(nn.Conv2d, nn.ReLU): _sequential_wrapper2(nni.ConvReLU2d),
|
| 178 |
+
(nn.Conv3d, nn.ReLU): _sequential_wrapper2(nni.ConvReLU3d),
|
| 179 |
+
(nn.Linear, nn.BatchNorm1d): fuse_linear_bn,
|
| 180 |
+
(nn.Linear, nn.ReLU): _sequential_wrapper2(nni.LinearReLU),
|
| 181 |
+
(nn.BatchNorm2d, nn.ReLU): _sequential_wrapper2(nni.BNReLU2d),
|
| 182 |
+
(nn.BatchNorm3d, nn.ReLU): _sequential_wrapper2(nni.BNReLU3d),
|
| 183 |
+
(nn.ConvTranspose1d, nn.BatchNorm1d): fuse_convtranspose_bn,
|
| 184 |
+
(nn.ConvTranspose2d, nn.BatchNorm2d): fuse_convtranspose_bn,
|
| 185 |
+
(nn.ConvTranspose3d, nn.BatchNorm3d): fuse_convtranspose_bn,
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
def get_fuser_method(op_list, additional_fuser_method_mapping=None):
|
| 189 |
+
"""Get fuser method for the given list of module types.
|
| 190 |
+
|
| 191 |
+
Get fuser method for the given list of module types,
|
| 192 |
+
return None if fuser method does not exist
|
| 193 |
+
"""
|
| 194 |
+
if additional_fuser_method_mapping is None:
|
| 195 |
+
additional_fuser_method_mapping = {}
|
| 196 |
+
all_mappings = get_combined_dict(_DEFAULT_OP_LIST_TO_FUSER_METHOD,
|
| 197 |
+
additional_fuser_method_mapping)
|
| 198 |
+
fuser_method = all_mappings.get(op_list, None)
|
| 199 |
+
assert fuser_method is not None, f"did not find fuser method for: {op_list} "
|
| 200 |
+
return fuser_method
|
| 201 |
+
|
| 202 |
+
def _reverse2(f):
|
| 203 |
+
def reversed(is_qat, x, y):
|
| 204 |
+
return f(is_qat, y, x)
|
| 205 |
+
return reversed
|
| 206 |
+
|
| 207 |
+
def _reverse3(f):
|
| 208 |
+
def reversed(is_qat, x, w):
|
| 209 |
+
y, z = w
|
| 210 |
+
return f(is_qat, z, y, x)
|
| 211 |
+
return reversed
|
| 212 |
+
|
| 213 |
+
def _get_valid_patterns(op_pattern):
|
| 214 |
+
"""Return a list of valid patterns generated from the op_pattern.
|
| 215 |
+
|
| 216 |
+
Returns a list of valid patterns generated from the op_pattern,
|
| 217 |
+
since MatchAllNode can match all types of nodes,
|
| 218 |
+
e.g. pattern (torch.nn.Conv2d, torch.add) should also be able to match keys like
|
| 219 |
+
(MatchAllNode, torch.add) and (torch.nn.Conv2d, MatchAllNode)
|
| 220 |
+
|
| 221 |
+
Example Input:
|
| 222 |
+
(torch.add, (torch.nn.ReLU, torch.nn.Conv2d))
|
| 223 |
+
|
| 224 |
+
Example Output:
|
| 225 |
+
[(torch.add, (torch.nn.ReLU, torch.nn.Conv2d)),
|
| 226 |
+
(torch.add, (torch.nn.ReLU, MatchAllNode)),
|
| 227 |
+
(torch.add, (MatchAllNode, torch.nn.Conv2d)),
|
| 228 |
+
(torch.add, (MatchAllNode, MatchAllNode)),
|
| 229 |
+
(MatchAllNode, (torch.nn.ReLU, torch.nn.Conv2d)),
|
| 230 |
+
(MatchAllNode, (torch.nn.ReLU, MatchAllNode)),
|
| 231 |
+
(MatchAllNode, (MatchAllNode, torch.nn.Conv2d)),
|
| 232 |
+
(MatchAllNode, (MatchAllNode, MatchAllNode)),
|
| 233 |
+
]
|
| 234 |
+
"""
|
| 235 |
+
result: List[Any]
|
| 236 |
+
if isinstance(op_pattern, (tuple, list)):
|
| 237 |
+
sub_combs = []
|
| 238 |
+
for sub_pattern in op_pattern:
|
| 239 |
+
sub_combs.append(_get_valid_patterns(sub_pattern))
|
| 240 |
+
result = list(itertools.product(*sub_combs))
|
| 241 |
+
else:
|
| 242 |
+
result = [op_pattern, MatchAllNode]
|
| 243 |
+
return result
|
| 244 |
+
|
| 245 |
+
def get_fuser_method_new(
|
| 246 |
+
op_pattern: Pattern,
|
| 247 |
+
fuser_method_mapping: Dict[Pattern, Union[nn.Sequential, Callable]]):
|
| 248 |
+
"""Get fuser method.
|
| 249 |
+
|
| 250 |
+
This will be made default after we deprecate the get_fuser_method
|
| 251 |
+
Would like to implement this first and have a separate PR for deprecation
|
| 252 |
+
"""
|
| 253 |
+
op_patterns = _get_valid_patterns(op_pattern)
|
| 254 |
+
fuser_method = None
|
| 255 |
+
for op_pattern in op_patterns:
|
| 256 |
+
fuser_method = fuser_method_mapping.get(op_pattern, None)
|
| 257 |
+
if fuser_method is not None:
|
| 258 |
+
break
|
| 259 |
+
assert fuser_method is not None, f"did not find fuser method for: {op_pattern} "
|
| 260 |
+
return fuser_method
|
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_decomposed.py
ADDED
|
@@ -0,0 +1,1033 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import math
|
| 3 |
+
from typing import Optional, Tuple
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from torch._refs import _unsqueeze_multiple
|
| 7 |
+
from torch.ao.quantization.utils import determine_qparams, validate_qmin_qmax
|
| 8 |
+
from torch.library import impl, Library
|
| 9 |
+
|
| 10 |
+
# Note: decomposed means decomposed quantized tensor, using decomposed so that the
|
| 11 |
+
# name is not too long
|
| 12 |
+
quantized_decomposed_lib = Library("quantized_decomposed", "DEF")
|
| 13 |
+
|
| 14 |
+
_INTEGER_DTYPES = [torch.uint8, torch.int8, torch.int16, torch.int32]
|
| 15 |
+
_FLOAT_DTYPES = [torch.float8_e5m2, torch.float8_e4m3fn]
|
| 16 |
+
|
| 17 |
+
_DTYPE_TO_QVALUE_BOUNDS = {k : (torch.iinfo(k).min, torch.iinfo(k).max) for k in _INTEGER_DTYPES}
|
| 18 |
+
_DTYPE_TO_QVALUE_BOUNDS.update({k : (int(torch.finfo(k).min), int(torch.finfo(k).max)) for k in _FLOAT_DTYPES})
|
| 19 |
+
|
| 20 |
+
# Helper to check the passed in quant min and max are valid for the dtype
|
| 21 |
+
def _quant_min_max_bounds_check(quant_min, quant_max, dtype):
|
| 22 |
+
if dtype not in _DTYPE_TO_QVALUE_BOUNDS:
|
| 23 |
+
raise ValueError(f"Unsupported dtype: {dtype}")
|
| 24 |
+
quant_min_lower_bound, quant_max_upper_bound = _DTYPE_TO_QVALUE_BOUNDS[dtype]
|
| 25 |
+
|
| 26 |
+
assert quant_min >= quant_min_lower_bound, \
|
| 27 |
+
"quant_min out of bound for dtype, " \
|
| 28 |
+
f"quant_min_lower_bound: {quant_min_lower_bound} quant_min: {quant_min}"
|
| 29 |
+
|
| 30 |
+
assert quant_max <= quant_max_upper_bound, \
|
| 31 |
+
"quant_max out of bound for dtype, " \
|
| 32 |
+
f"quant_max_upper_bound: {quant_max_upper_bound} quant_max: {quant_max}"
|
| 33 |
+
|
| 34 |
+
quantized_decomposed_lib.define(
|
| 35 |
+
"quantize_per_tensor(Tensor input, float scale, int zero_point, "
|
| 36 |
+
"int quant_min, int quant_max, ScalarType dtype) -> Tensor")
|
| 37 |
+
|
| 38 |
+
@impl(quantized_decomposed_lib, "quantize_per_tensor", "CompositeExplicitAutograd")
|
| 39 |
+
def quantize_per_tensor(
|
| 40 |
+
input: torch.Tensor,
|
| 41 |
+
scale: float,
|
| 42 |
+
zero_point: int,
|
| 43 |
+
quant_min: int,
|
| 44 |
+
quant_max: int,
|
| 45 |
+
dtype: torch.dtype
|
| 46 |
+
) -> torch.Tensor:
|
| 47 |
+
""" Affine quantization for the Tensor using the same quantization parameters to map
|
| 48 |
+
from floating point to quantized values
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
input (torch.Tensor): original float32 or bfloat16 Tensor
|
| 52 |
+
scale (float): quantization parameter for affine quantization
|
| 53 |
+
zero_point (int): quantization parameter for affine quantization
|
| 54 |
+
quant_min (int): minimum quantized value for output Tensor
|
| 55 |
+
quant_max (int): maximum quantized value for output Tensor
|
| 56 |
+
dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
Tensor with requested dtype (e.g. torch.uint8), note the quantization parameters
|
| 60 |
+
are not stored in the Tensor, we are storing them in function arguments instead
|
| 61 |
+
"""
|
| 62 |
+
if input.dtype in [torch.float16, torch.bfloat16]:
|
| 63 |
+
input = input.to(torch.float32)
|
| 64 |
+
assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
|
| 65 |
+
_quant_min_max_bounds_check(quant_min, quant_max, dtype)
|
| 66 |
+
|
| 67 |
+
inv_scale = 1.0 / scale
|
| 68 |
+
return torch.clamp(torch.round(input * inv_scale) + zero_point, quant_min, quant_max).to(dtype)
|
| 69 |
+
|
| 70 |
+
@impl(quantized_decomposed_lib, "quantize_per_tensor", "Meta")
|
| 71 |
+
def quantize_per_tensor_meta(
|
| 72 |
+
input: torch.Tensor,
|
| 73 |
+
scale: float,
|
| 74 |
+
zero_point: int,
|
| 75 |
+
quant_min: int,
|
| 76 |
+
quant_max: int,
|
| 77 |
+
dtype: torch.dtype
|
| 78 |
+
) -> torch.Tensor:
|
| 79 |
+
if input.dtype in [torch.float16, torch.bfloat16]:
|
| 80 |
+
input = input.to(torch.float32)
|
| 81 |
+
assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
|
| 82 |
+
return torch.empty_like(input, dtype=dtype)
|
| 83 |
+
|
| 84 |
+
quantized_decomposed_lib.define(
|
| 85 |
+
"quantize_per_tensor.tensor(Tensor input, Tensor scale, Tensor zero_point, "
|
| 86 |
+
"int quant_min, int quant_max, ScalarType dtype) -> Tensor")
|
| 87 |
+
|
| 88 |
+
@impl(quantized_decomposed_lib, "quantize_per_tensor.tensor", "CompositeExplicitAutograd")
|
| 89 |
+
def quantize_per_tensor_tensor(
|
| 90 |
+
input: torch.Tensor,
|
| 91 |
+
scale: torch.Tensor,
|
| 92 |
+
zero_point: torch.Tensor,
|
| 93 |
+
quant_min: int,
|
| 94 |
+
quant_max: int,
|
| 95 |
+
dtype: torch.dtype
|
| 96 |
+
) -> torch.Tensor:
|
| 97 |
+
""" Affine quantization for the Tensor using the same quantization parameters to map
|
| 98 |
+
from floating point to quantized values
|
| 99 |
+
Same as `quantize_per_tensor` but scale and zero_point are Scalar Tensor instead of
|
| 100 |
+
scalar values
|
| 101 |
+
"""
|
| 102 |
+
assert zero_point.numel() == 1, f"Expecting zero_point tensor to be one element, but received : {zero_point.numel()}"
|
| 103 |
+
assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}"
|
| 104 |
+
return quantize_per_tensor(input, scale.item(), zero_point.item(), quant_min, quant_max, dtype)
|
| 105 |
+
|
| 106 |
+
@impl(quantized_decomposed_lib, "quantize_per_tensor.tensor", "Meta")
|
| 107 |
+
def quantize_per_tensor_tensor_meta(
|
| 108 |
+
input: torch.Tensor,
|
| 109 |
+
scale: torch.Tensor,
|
| 110 |
+
zero_point: torch.Tensor,
|
| 111 |
+
quant_min: int,
|
| 112 |
+
quant_max: int,
|
| 113 |
+
dtype: torch.dtype
|
| 114 |
+
) -> torch.Tensor:
|
| 115 |
+
if input.dtype in [torch.float16, torch.bfloat16]:
|
| 116 |
+
input = input.to(torch.float32)
|
| 117 |
+
assert zero_point.numel() == 1, f"Expecting zero_point tensor to be one element, but received : {zero_point.numel()}"
|
| 118 |
+
assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}"
|
| 119 |
+
assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
|
| 120 |
+
return torch.empty_like(input, dtype=dtype)
|
| 121 |
+
|
| 122 |
+
# TODO: remove other variants and keep this one
|
| 123 |
+
quantized_decomposed_lib.define(
|
| 124 |
+
"quantize_per_tensor.tensor2(Tensor input, Tensor scale, Tensor zero_point, "
|
| 125 |
+
"Tensor quant_min, Tensor quant_max, ScalarType dtype) -> Tensor")
|
| 126 |
+
|
| 127 |
+
@impl(quantized_decomposed_lib, "quantize_per_tensor.tensor2", "CompositeExplicitAutograd")
|
| 128 |
+
def quantize_per_tensor_tensor2(
|
| 129 |
+
input: torch.Tensor,
|
| 130 |
+
scale: torch.Tensor,
|
| 131 |
+
zero_point: torch.Tensor,
|
| 132 |
+
quant_min: torch.Tensor,
|
| 133 |
+
quant_max: torch.Tensor,
|
| 134 |
+
dtype: torch.dtype
|
| 135 |
+
) -> torch.Tensor:
|
| 136 |
+
""" Affine quantization for the Tensor using the same quantization parameters to map
|
| 137 |
+
from floating point to quantized values
|
| 138 |
+
Same as `quantize_per_tensor` but scale and zero_point are Scalar Tensor instead of
|
| 139 |
+
scalar values
|
| 140 |
+
"""
|
| 141 |
+
assert zero_point.numel() == 1, f"Expecting zero_point tensor to be one element, but received : {zero_point.numel()}"
|
| 142 |
+
assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}"
|
| 143 |
+
return quantize_per_tensor(input, scale.item(), zero_point.item(), quant_min.item(), quant_max.item(), dtype)
|
| 144 |
+
|
| 145 |
+
@impl(quantized_decomposed_lib, "quantize_per_tensor.tensor2", "Meta")
|
| 146 |
+
def quantize_per_tensor_tensor2_meta(
|
| 147 |
+
input: torch.Tensor,
|
| 148 |
+
scale: torch.Tensor,
|
| 149 |
+
zero_point: torch.Tensor,
|
| 150 |
+
quant_min: torch.Tensor,
|
| 151 |
+
quant_max: torch.Tensor,
|
| 152 |
+
dtype: torch.dtype
|
| 153 |
+
) -> torch.Tensor:
|
| 154 |
+
return quantize_per_tensor_tensor_meta(input, scale, zero_point, quant_min, quant_max, dtype)
|
| 155 |
+
|
| 156 |
+
# Note: quant_min/quant_max/dtype are not used in the operator, but for now it's kept in
|
| 157 |
+
# the signature as metadata for the input Tensor, this might be useful for pattern
|
| 158 |
+
# matching in the future
|
| 159 |
+
# We will revisit this later if we found there are no use cases for it
|
| 160 |
+
quantized_decomposed_lib.define(
|
| 161 |
+
"dequantize_per_tensor(Tensor input, float scale, int zero_point, "
|
| 162 |
+
"int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor")
|
| 163 |
+
|
| 164 |
+
@impl(quantized_decomposed_lib, "dequantize_per_tensor", "CompositeExplicitAutograd")
|
| 165 |
+
def dequantize_per_tensor(
|
| 166 |
+
input: torch.Tensor,
|
| 167 |
+
scale: float,
|
| 168 |
+
zero_point: int,
|
| 169 |
+
quant_min: int,
|
| 170 |
+
quant_max: int,
|
| 171 |
+
dtype: torch.dtype,
|
| 172 |
+
*,
|
| 173 |
+
out_dtype: Optional[torch.dtype] = None
|
| 174 |
+
) -> torch.Tensor:
|
| 175 |
+
""" Affine dequantization for the Tensor using the same quantization parameters to map
|
| 176 |
+
from quantized values to floating point values
|
| 177 |
+
|
| 178 |
+
Args:
|
| 179 |
+
input (torch.Tensor): Tensor with dtype matching `dtype` argument,
|
| 180 |
+
e.g. (`torch.uint8`), it is a per tensor quantized Tensor if combined with
|
| 181 |
+
quantization parameters in the argument of this function (scale/zero_point)
|
| 182 |
+
|
| 183 |
+
scale (float): quantization parameter for affine quantization
|
| 184 |
+
|
| 185 |
+
zero_point (int): quantization parameter for affine quantization
|
| 186 |
+
|
| 187 |
+
quant_min (int): minimum quantized value for input Tensor (not used in computation,
|
| 188 |
+
reserved for pattern matching)
|
| 189 |
+
|
| 190 |
+
quant_max (int): maximum quantized value for input Tensor (not used in computation,
|
| 191 |
+
reserved for pattern matching)
|
| 192 |
+
|
| 193 |
+
dtype (torch.dtype): dtype for input Tensor (not used in computation,
|
| 194 |
+
reserved for pattern matching)
|
| 195 |
+
|
| 196 |
+
out_dtype (torch.dtype?): optional dtype for output Tensor
|
| 197 |
+
|
| 198 |
+
Returns:
|
| 199 |
+
dequantized float32 Tensor
|
| 200 |
+
"""
|
| 201 |
+
assert input.dtype == dtype, f"Expecting input to have dtype: {dtype}, but got {input.dtype}"
|
| 202 |
+
if out_dtype is None:
|
| 203 |
+
out_dtype = torch.float32
|
| 204 |
+
if dtype in _DTYPE_TO_QVALUE_BOUNDS:
|
| 205 |
+
# TODO: investigate why
|
| 206 |
+
# (input - zero_point).to(torch.float32) * scale
|
| 207 |
+
# failed the test
|
| 208 |
+
return (input.to(out_dtype) - zero_point) * scale
|
| 209 |
+
else:
|
| 210 |
+
raise ValueError(f"Unsupported dtype in dequantize_per_tensor: {dtype}")
|
| 211 |
+
|
| 212 |
+
@impl(quantized_decomposed_lib, "dequantize_per_tensor", "Meta")
|
| 213 |
+
def dequantize_per_tensor_meta(
|
| 214 |
+
input: torch.Tensor,
|
| 215 |
+
scale: torch.Tensor,
|
| 216 |
+
zero_point: torch.Tensor,
|
| 217 |
+
quant_min: int,
|
| 218 |
+
quant_max: int,
|
| 219 |
+
dtype: torch.dtype,
|
| 220 |
+
*,
|
| 221 |
+
out_dtype: Optional[torch.dtype] = None
|
| 222 |
+
) -> torch.Tensor:
|
| 223 |
+
if out_dtype is None:
|
| 224 |
+
out_dtype = torch.float32
|
| 225 |
+
return torch.empty_like(input, dtype=out_dtype)
|
| 226 |
+
|
| 227 |
+
quantized_decomposed_lib.define(
|
| 228 |
+
"dequantize_per_tensor.tensor(Tensor input, Tensor scale, Tensor zero_point, "
|
| 229 |
+
"int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor")
|
| 230 |
+
|
| 231 |
+
@impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor", "CompositeExplicitAutograd")
|
| 232 |
+
def dequantize_per_tensor_tensor(
|
| 233 |
+
input: torch.Tensor,
|
| 234 |
+
scale: torch.Tensor,
|
| 235 |
+
zero_point: torch.Tensor,
|
| 236 |
+
quant_min: int,
|
| 237 |
+
quant_max: int,
|
| 238 |
+
dtype: torch.dtype,
|
| 239 |
+
*,
|
| 240 |
+
out_dtype: Optional[torch.dtype] = None
|
| 241 |
+
) -> torch.Tensor:
|
| 242 |
+
""" Affine dequantization for the Tensor using the same quantization parameters to map
|
| 243 |
+
from quantized values to floating point values
|
| 244 |
+
Same as `dequantize_per_tensor` but scale and zero_point are Scalar Tensor instead of
|
| 245 |
+
scalar values
|
| 246 |
+
"""
|
| 247 |
+
assert zero_point.numel() == 1, f"Expecting zero_point tensor to be one element, but received : {zero_point.numel()}"
|
| 248 |
+
assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}"
|
| 249 |
+
return dequantize_per_tensor(input, scale.item(), zero_point.item(), quant_min, quant_max, dtype, out_dtype=out_dtype)
|
| 250 |
+
|
| 251 |
+
@impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor", "Meta")
|
| 252 |
+
def dequantize_per_tensor_tensor_meta(
|
| 253 |
+
input: torch.Tensor,
|
| 254 |
+
scale: torch.Tensor,
|
| 255 |
+
zero_point: torch.Tensor,
|
| 256 |
+
quant_min: int,
|
| 257 |
+
quant_max: int,
|
| 258 |
+
dtype: torch.dtype,
|
| 259 |
+
*,
|
| 260 |
+
out_dtype: Optional[torch.dtype] = None
|
| 261 |
+
) -> torch.Tensor:
|
| 262 |
+
if out_dtype is None:
|
| 263 |
+
out_dtype = torch.float32
|
| 264 |
+
assert zero_point.numel() == 1, f"Expecting zero_point tensor to be one element, but received : {zero_point.numel()}"
|
| 265 |
+
assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}"
|
| 266 |
+
assert input.dtype == dtype, f"Expecting input to have dtype: {dtype}"
|
| 267 |
+
if dtype in _DTYPE_TO_QVALUE_BOUNDS:
|
| 268 |
+
return torch.empty_like(input, dtype=out_dtype)
|
| 269 |
+
else:
|
| 270 |
+
raise ValueError(f"Unsupported dtype in dequantize_per_tensor: {dtype}")
|
| 271 |
+
|
| 272 |
+
# TODO: remove other variants and keep this one
|
| 273 |
+
quantized_decomposed_lib.define(
|
| 274 |
+
"dequantize_per_tensor.tensor2(Tensor input, Tensor scale, Tensor zero_point, "
|
| 275 |
+
"Tensor quant_min, Tensor quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor")
|
| 276 |
+
|
| 277 |
+
@impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor2", "CompositeExplicitAutograd")
|
| 278 |
+
def dequantize_per_tensor_tensor2(
|
| 279 |
+
input: torch.Tensor,
|
| 280 |
+
scale: torch.Tensor,
|
| 281 |
+
zero_point: torch.Tensor,
|
| 282 |
+
quant_min: torch.Tensor,
|
| 283 |
+
quant_max: torch.Tensor,
|
| 284 |
+
dtype: torch.dtype,
|
| 285 |
+
*,
|
| 286 |
+
out_dtype: Optional[torch.dtype] = None
|
| 287 |
+
) -> torch.Tensor:
|
| 288 |
+
""" Affine dequantization for the Tensor using the same quantization parameters to map
|
| 289 |
+
from quantized values to floating point values
|
| 290 |
+
Same as `dequantize_per_tensor` but scale and zero_point are Scalar Tensor instead of
|
| 291 |
+
scalar values
|
| 292 |
+
"""
|
| 293 |
+
assert zero_point.numel() == 1, f"Expecting zero_point tensor to be one element, but received : {zero_point.numel()}"
|
| 294 |
+
assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}"
|
| 295 |
+
return dequantize_per_tensor(
|
| 296 |
+
input, scale.item(), zero_point.item(), quant_min.item(), quant_max.item(), dtype, out_dtype=out_dtype)
|
| 297 |
+
|
| 298 |
+
@impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor2", "Meta")
|
| 299 |
+
def dequantize_per_tensor_tensor2_meta(
|
| 300 |
+
input,
|
| 301 |
+
scale,
|
| 302 |
+
zero_point,
|
| 303 |
+
quant_min,
|
| 304 |
+
quant_max,
|
| 305 |
+
dtype,
|
| 306 |
+
*,
|
| 307 |
+
out_dtype: Optional[torch.dtype] = None
|
| 308 |
+
) -> torch.Tensor:
|
| 309 |
+
return dequantize_per_tensor_tensor_meta(input, scale, zero_point, quant_min, quant_max, dtype, out_dtype=out_dtype)
|
| 310 |
+
|
| 311 |
+
quantized_decomposed_lib.define(
|
| 312 |
+
"choose_qparams.tensor(Tensor input, int quant_min, int quant_max, "
|
| 313 |
+
"float eps, ScalarType dtype) -> (Tensor, Tensor)")
|
| 314 |
+
|
| 315 |
+
@impl(quantized_decomposed_lib, "choose_qparams.tensor", "CompositeExplicitAutograd")
|
| 316 |
+
def choose_qparams_tensor(
|
| 317 |
+
input: torch.Tensor,
|
| 318 |
+
qmin: int,
|
| 319 |
+
qmax: int,
|
| 320 |
+
eps: float,
|
| 321 |
+
dtype: torch.dtype
|
| 322 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 323 |
+
""" Given an input Tensor, derive the per tensor affine quantization parameter
|
| 324 |
+
(scale and zero_point) for target quantized Tensor from the Tensor
|
| 325 |
+
|
| 326 |
+
Args:
|
| 327 |
+
input (torch.Tensor): floating point input Tensor
|
| 328 |
+
quant_min (int): minimum quantized value for target quantized Tensor
|
| 329 |
+
quant_max (int): maximum quantized value for target quantized Tensor
|
| 330 |
+
dtype (torch.dtype): dtype for target quantized Tensor
|
| 331 |
+
|
| 332 |
+
Returns:
|
| 333 |
+
scale (float): quantization parameter for the target quantized Tensor
|
| 334 |
+
zero_point (int): quantization parameter for the target quantized Tensor
|
| 335 |
+
"""
|
| 336 |
+
assert input.dtype in [
|
| 337 |
+
torch.float32,
|
| 338 |
+
torch.float16,
|
| 339 |
+
torch.bfloat16,
|
| 340 |
+
], f"Expecting input to have dtype torch.float32/16/b16, but got dtype: {input.dtype}"
|
| 341 |
+
assert dtype in _DTYPE_TO_QVALUE_BOUNDS, \
|
| 342 |
+
f"Expecting target dtype to be one of {_DTYPE_TO_QVALUE_BOUNDS.keys()}, but got: {dtype}"
|
| 343 |
+
validate_qmin_qmax(qmin, qmax)
|
| 344 |
+
|
| 345 |
+
min_val, max_val = torch.aminmax(input)
|
| 346 |
+
|
| 347 |
+
return determine_qparams(
|
| 348 |
+
min_val, max_val, qmin, qmax, dtype, torch.Tensor([eps]), has_customized_qrange=False)
|
| 349 |
+
|
| 350 |
+
quantized_decomposed_lib.define(
|
| 351 |
+
"choose_qparams_symmetric.tensor(Tensor input, int quant_min, int quant_max, "
|
| 352 |
+
"float eps, ScalarType dtype) -> (Tensor, Tensor)")
|
| 353 |
+
|
| 354 |
+
@impl(quantized_decomposed_lib, "choose_qparams_symmetric.tensor", "CompositeExplicitAutograd")
|
| 355 |
+
def choose_qparams_symmetric_tensor(
|
| 356 |
+
input: torch.Tensor,
|
| 357 |
+
qmin: int,
|
| 358 |
+
qmax: int,
|
| 359 |
+
eps: float,
|
| 360 |
+
dtype: torch.dtype
|
| 361 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 362 |
+
""" Given an input Tensor, derive the per tensor affine quantization parameter
|
| 363 |
+
(scale and zero_point) for target quantized Tensor from the Tensor
|
| 364 |
+
|
| 365 |
+
Args:
|
| 366 |
+
input (torch.Tensor): floating point input Tensor
|
| 367 |
+
quant_min (int): minimum quantized value for target quantized Tensor
|
| 368 |
+
quant_max (int): maximum quantized value for target quantized Tensor
|
| 369 |
+
dtype (torch.dtype): dtype for target quantized Tensor
|
| 370 |
+
|
| 371 |
+
Returns:
|
| 372 |
+
scale (float): quantization parameter for the target quantized Tensor
|
| 373 |
+
zero_point (int): quantization parameter for the target quantized Tensor
|
| 374 |
+
"""
|
| 375 |
+
assert input.dtype in [
|
| 376 |
+
torch.float32,
|
| 377 |
+
torch.float16,
|
| 378 |
+
torch.bfloat16,
|
| 379 |
+
], f"Expecting input to have dtype torch.float32/16/b16, but got dtype: {input.dtype}"
|
| 380 |
+
assert dtype in _DTYPE_TO_QVALUE_BOUNDS, \
|
| 381 |
+
f"Expecting target dtype to be one of {_DTYPE_TO_QVALUE_BOUNDS.keys()}, but got: {dtype}"
|
| 382 |
+
validate_qmin_qmax(qmin, qmax)
|
| 383 |
+
|
| 384 |
+
min_val, max_val = torch.aminmax(input)
|
| 385 |
+
return determine_qparams(
|
| 386 |
+
min_val,
|
| 387 |
+
max_val,
|
| 388 |
+
qmin,
|
| 389 |
+
qmax,
|
| 390 |
+
dtype,
|
| 391 |
+
torch.Tensor([eps]),
|
| 392 |
+
has_customized_qrange=False,
|
| 393 |
+
qscheme=torch.per_tensor_symmetric
|
| 394 |
+
)
|
| 395 |
+
|
| 396 |
+
@impl(quantized_decomposed_lib, "choose_qparams.tensor", "Meta")
|
| 397 |
+
def choose_qparams_tensor_meta(
|
| 398 |
+
input: torch.Tensor,
|
| 399 |
+
quant_min: int,
|
| 400 |
+
quant_max: int,
|
| 401 |
+
eps: float,
|
| 402 |
+
dtype: torch.dtype
|
| 403 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 404 |
+
assert input.dtype in [
|
| 405 |
+
torch.float32,
|
| 406 |
+
torch.float16,
|
| 407 |
+
torch.bfloat16,
|
| 408 |
+
], f"Expecting input to have dtype torch.float32/16/b16, but got dtype: {input.dtype}"
|
| 409 |
+
assert quant_min < quant_max, f"Expecting quant_min to be smaller than quant_max but received min: \
|
| 410 |
+
{quant_min} max: {quant_max}"
|
| 411 |
+
return torch.empty(1, dtype=torch.double, device=input.device), torch.empty(1, dtype=torch.int64, device=input.device)
|
| 412 |
+
|
| 413 |
+
@impl(quantized_decomposed_lib, "choose_qparams_symmetric.tensor", "Meta")
|
| 414 |
+
def choose_qparams_symmetric_tensor_meta(
|
| 415 |
+
input: torch.Tensor,
|
| 416 |
+
quant_min: int,
|
| 417 |
+
quant_max: int,
|
| 418 |
+
eps: float,
|
| 419 |
+
dtype: torch.dtype
|
| 420 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 421 |
+
return torch.empty(1, dtype=torch.double, device=input.device), torch.empty(1, dtype=torch.int64, device=input.device)
|
| 422 |
+
|
| 423 |
+
# Helper function used to implement per-channel quantization against any axis
|
| 424 |
+
def _permute_to_axis_zero(x, axis):
|
| 425 |
+
new_axis_list = list(range(x.dim()))
|
| 426 |
+
new_axis_list[axis] = 0
|
| 427 |
+
new_axis_list[0] = axis
|
| 428 |
+
y = x.permute(tuple(new_axis_list))
|
| 429 |
+
return y, new_axis_list
|
| 430 |
+
|
| 431 |
+
quantized_decomposed_lib.define(
|
| 432 |
+
"quantize_per_channel(Tensor input, Tensor scales, Tensor zero_points, int axis, "
|
| 433 |
+
"int quant_min, int quant_max, ScalarType dtype) -> Tensor")
|
| 434 |
+
|
| 435 |
+
@impl(quantized_decomposed_lib, "quantize_per_channel", "CompositeExplicitAutograd")
|
| 436 |
+
def quantize_per_channel(
|
| 437 |
+
input: torch.Tensor,
|
| 438 |
+
scales: torch.Tensor,
|
| 439 |
+
zero_points: torch.Tensor,
|
| 440 |
+
axis: int,
|
| 441 |
+
quant_min: int,
|
| 442 |
+
quant_max: int,
|
| 443 |
+
dtype: torch.dtype
|
| 444 |
+
) -> torch.Tensor:
|
| 445 |
+
""" Affine per channel quantization for the Tensor using the same quantization
|
| 446 |
+
parameters for each channel/axis to map from floating point to quantized values
|
| 447 |
+
|
| 448 |
+
Args:
|
| 449 |
+
input (torch.Tensor): original float32 or bfloat16 Tensor
|
| 450 |
+
scales (torch.Tensor): a list of scale quantization parameter for
|
| 451 |
+
affine quantization, one per channel
|
| 452 |
+
zero_point (torch.Tensor): a list of zero_point quantization parameter for
|
| 453 |
+
affine quantization, one per channel
|
| 454 |
+
quant_min (int): minimum quantized value for output Tensor
|
| 455 |
+
quant_max (int): maximum quantized value for output Tensor
|
| 456 |
+
dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor
|
| 457 |
+
|
| 458 |
+
Returns:
|
| 459 |
+
Tensor with requested dtype (e.g. torch.uint8), note the quantization parameters
|
| 460 |
+
are not stored in the Tensor, we are storing them in function arguments instead
|
| 461 |
+
"""
|
| 462 |
+
if input.dtype in [torch.float16, torch.bfloat16]:
|
| 463 |
+
input = input.to(torch.float32)
|
| 464 |
+
assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
|
| 465 |
+
assert axis < input.dim(), f"Expecting axis to be < {input.dim()}"
|
| 466 |
+
_quant_min_max_bounds_check(quant_min, quant_max, dtype)
|
| 467 |
+
input, permute_axis_list = _permute_to_axis_zero(input, axis)
|
| 468 |
+
res = torch.zeros_like(input)
|
| 469 |
+
|
| 470 |
+
for i in range(input.size(0)):
|
| 471 |
+
res[i] = torch.clamp(
|
| 472 |
+
torch.round(input[i] * (1.0 / scales[i])) + zero_points[i],
|
| 473 |
+
quant_min,
|
| 474 |
+
quant_max
|
| 475 |
+
)
|
| 476 |
+
|
| 477 |
+
out = res.permute(tuple(permute_axis_list))
|
| 478 |
+
return out.to(dtype)
|
| 479 |
+
|
| 480 |
+
@impl(quantized_decomposed_lib, "quantize_per_channel", "Meta")
|
| 481 |
+
def quantize_per_channel_meta(
|
| 482 |
+
input: torch.Tensor,
|
| 483 |
+
scales: torch.Tensor,
|
| 484 |
+
zero_points: torch.Tensor,
|
| 485 |
+
axis: int,
|
| 486 |
+
quant_min: int,
|
| 487 |
+
quant_max: int,
|
| 488 |
+
dtype: torch.dtype
|
| 489 |
+
) -> torch.Tensor:
|
| 490 |
+
if input.dtype in [torch.float16, torch.bfloat16]:
|
| 491 |
+
input = input.to(torch.float32)
|
| 492 |
+
assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
|
| 493 |
+
assert axis < input.dim(), f"Expecting axis to be < {input.dim()}"
|
| 494 |
+
_quant_min_max_bounds_check(quant_min, quant_max, dtype)
|
| 495 |
+
return torch.empty_like(input, dtype=dtype)
|
| 496 |
+
|
| 497 |
+
# Note: quant_min/quant_max/dtype are not used in the operator, but for now it's kept in
|
| 498 |
+
# the signature as metadata for the input Tensor, this might be useful for pattern
|
| 499 |
+
# matching in the future
|
| 500 |
+
# We will revisit this later if we found there are no use cases for it
|
| 501 |
+
quantized_decomposed_lib.define(
|
| 502 |
+
"dequantize_per_channel(Tensor input, Tensor scales, Tensor? zero_points, int axis, "
|
| 503 |
+
"int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor")
|
| 504 |
+
|
| 505 |
+
@impl(quantized_decomposed_lib, "dequantize_per_channel", "CompositeExplicitAutograd")
|
| 506 |
+
def dequantize_per_channel(
|
| 507 |
+
input: torch.Tensor,
|
| 508 |
+
scales: torch.Tensor,
|
| 509 |
+
zero_points: Optional[torch.Tensor],
|
| 510 |
+
axis: int,
|
| 511 |
+
quant_min: int,
|
| 512 |
+
quant_max: int,
|
| 513 |
+
dtype: torch.dtype,
|
| 514 |
+
*,
|
| 515 |
+
out_dtype: Optional[torch.dtype] = None
|
| 516 |
+
) -> torch.Tensor:
|
| 517 |
+
""" Affine per channel dequantization for the Tensor using the same quantization
|
| 518 |
+
parameters for each channel/axis to map from quantized values to floating point values
|
| 519 |
+
|
| 520 |
+
Args:
|
| 521 |
+
input (torch.Tensor): Tensor with dtype matching `dtype` argument,
|
| 522 |
+
e.g. (`torch.uint8`), it is a per channel quantized Tensor if combined with
|
| 523 |
+
quantization parameter in the argument of this function (scales/zero_points/axis)
|
| 524 |
+
|
| 525 |
+
scales (torch.Tensor): a list of scale quantization parameter for
|
| 526 |
+
affine quantization, one per channel
|
| 527 |
+
|
| 528 |
+
zero_points (torch.Tensor): a list of zero_point quantization parameter for
|
| 529 |
+
affine quantization, one per channel
|
| 530 |
+
|
| 531 |
+
quant_min (int): minimum quantized value for output Tensor (not used in computation,
|
| 532 |
+
reserved for pattern matching)
|
| 533 |
+
|
| 534 |
+
quant_max (int): maximum quantized value for output Tensor (not used in computation,
|
| 535 |
+
reserved for pattern matching)
|
| 536 |
+
|
| 537 |
+
dtype (torch.dtype): requested dtype for output Tensor (not used in computation,
|
| 538 |
+
reserved for pattern matching)
|
| 539 |
+
|
| 540 |
+
out_dtype (torch.dtype?): optional dtype for output Tensor
|
| 541 |
+
|
| 542 |
+
Returns:
|
| 543 |
+
dequantized float32 Tensor
|
| 544 |
+
"""
|
| 545 |
+
assert input.dtype == dtype, f"Expecting input to have dtype {dtype}, but got dtype: {input.dtype}"
|
| 546 |
+
if out_dtype is None:
|
| 547 |
+
out_dtype = torch.float32
|
| 548 |
+
assert axis < input.dim(), f"Expecting axis to be < {input.dim()}"
|
| 549 |
+
_quant_min_max_bounds_check(quant_min, quant_max, dtype)
|
| 550 |
+
input, permute_axis_list = _permute_to_axis_zero(input, axis)
|
| 551 |
+
res = torch.zeros_like(input, dtype=out_dtype)
|
| 552 |
+
|
| 553 |
+
for i in range(input.size(0)):
|
| 554 |
+
zp = zero_points[i] if zero_points is not None else 0
|
| 555 |
+
# TODO: investigate why
|
| 556 |
+
# (input[i] - zero_points[i]).to(out_dtype) * scales[i]
|
| 557 |
+
# failed the test
|
| 558 |
+
res[i] = (input[i].to(out_dtype) - zp) * scales[i]
|
| 559 |
+
|
| 560 |
+
out = res.permute(tuple(permute_axis_list))
|
| 561 |
+
return out
|
| 562 |
+
|
| 563 |
+
@impl(quantized_decomposed_lib, "dequantize_per_channel", "Meta")
|
| 564 |
+
def dequantize_per_channel_meta(
|
| 565 |
+
input: torch.Tensor,
|
| 566 |
+
scales: torch.Tensor,
|
| 567 |
+
zero_points: Optional[torch.Tensor],
|
| 568 |
+
axis: int,
|
| 569 |
+
quant_min: int,
|
| 570 |
+
quant_max: int,
|
| 571 |
+
dtype: torch.dtype,
|
| 572 |
+
*,
|
| 573 |
+
out_dtype: Optional[torch.dtype] = None
|
| 574 |
+
) -> torch.Tensor:
|
| 575 |
+
assert input.dtype == dtype, f"Expecting input to have dtype {dtype}, but got dtype: {input.dtype}"
|
| 576 |
+
if out_dtype is None:
|
| 577 |
+
out_dtype = torch.float32
|
| 578 |
+
assert axis < input.dim(), f"Expecting axis to be < {input.dim()}"
|
| 579 |
+
_quant_min_max_bounds_check(quant_min, quant_max, dtype)
|
| 580 |
+
return torch.empty_like(input, dtype=out_dtype)
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
quantized_decomposed_lib.define(
|
| 584 |
+
"choose_qparams_per_token(Tensor input, ScalarType dtype) -> (Tensor, Tensor)"
|
| 585 |
+
)
|
| 586 |
+
|
| 587 |
+
|
| 588 |
+
@impl(
|
| 589 |
+
quantized_decomposed_lib,
|
| 590 |
+
"choose_qparams_per_token",
|
| 591 |
+
"CompositeExplicitAutograd",
|
| 592 |
+
)
|
| 593 |
+
def choose_qparams_per_token(
|
| 594 |
+
input: torch.Tensor,
|
| 595 |
+
dtype: torch.dtype,
|
| 596 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 597 |
+
"""Choose quantization parameters for per token quantization. This means for a N dimension Tensor
|
| 598 |
+
(M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize
|
| 599 |
+
every N elements with the same quantization parameter. The dimension for scales/zero_points
|
| 600 |
+
will be (M1 * M2 ... * Mn)
|
| 601 |
+
|
| 602 |
+
Args:
|
| 603 |
+
input (torch.Tensor): original float32/float16 Tensor
|
| 604 |
+
dtype (torch.dtype): dtype (e.g. torch.uint8) for input Tensor
|
| 605 |
+
|
| 606 |
+
Returns:
|
| 607 |
+
scales and zero_points, both float32 Tensors
|
| 608 |
+
"""
|
| 609 |
+
|
| 610 |
+
scales = input.abs().amax(dim=-1, keepdim=True)
|
| 611 |
+
if scales.dtype == torch.float16:
|
| 612 |
+
scales = (
|
| 613 |
+
scales.float()
|
| 614 |
+
) # want float scales to avoid overflows for fp16, (bf16 has wide enough range)
|
| 615 |
+
if dtype == torch.int8:
|
| 616 |
+
n_bits = 8
|
| 617 |
+
quant_max = 2 ** (n_bits - 1) - 1
|
| 618 |
+
else:
|
| 619 |
+
raise Exception(f"unsupported dtype in choose_qparams_per_token: {dtype}") # noqa: TRY002
|
| 620 |
+
|
| 621 |
+
scales = scales.clamp(min=1e-5).div(quant_max)
|
| 622 |
+
zero_points = torch.zeros_like(scales)
|
| 623 |
+
return scales, zero_points
|
| 624 |
+
|
| 625 |
+
|
| 626 |
+
@impl(
|
| 627 |
+
quantized_decomposed_lib,
|
| 628 |
+
"choose_qparams_per_token",
|
| 629 |
+
"Meta",
|
| 630 |
+
)
|
| 631 |
+
def choose_qparams_per_token_meta(
|
| 632 |
+
input: torch.Tensor,
|
| 633 |
+
dtype: torch.dtype,
|
| 634 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 635 |
+
size = (1, input.size(-1))
|
| 636 |
+
return torch.empty(size, dtype=torch.double, device=input.device), torch.empty(
|
| 637 |
+
size, dtype=torch.int64, device=input.device
|
| 638 |
+
)
|
| 639 |
+
|
| 640 |
+
|
| 641 |
+
quantized_decomposed_lib.define(
|
| 642 |
+
"_choose_qparams_per_token_asymmetric_impl(Tensor input, ScalarType dtype) -> (Tensor, Tensor)"
|
| 643 |
+
)
|
| 644 |
+
|
| 645 |
+
|
| 646 |
+
@impl(
|
| 647 |
+
quantized_decomposed_lib,
|
| 648 |
+
"_choose_qparams_per_token_asymmetric_impl",
|
| 649 |
+
"CompositeImplicitAutograd",
|
| 650 |
+
)
|
| 651 |
+
def _choose_qparams_per_token_asymmetric_impl(
|
| 652 |
+
input: torch.Tensor,
|
| 653 |
+
dtype: torch.dtype,
|
| 654 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 655 |
+
"""Choose quantization parameters for per token quantization. This means for a N dimension Tensor
|
| 656 |
+
(M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize
|
| 657 |
+
every N elements with the same quantization parameter. The dimension for scales/zero_points
|
| 658 |
+
will be (M1 * M2 ... * Mn)
|
| 659 |
+
|
| 660 |
+
Args:
|
| 661 |
+
input (torch.Tensor): original float32/float16 Tensor
|
| 662 |
+
dtype (torch.dtype): dtype (e.g. torch.uint8) for input Tensor
|
| 663 |
+
|
| 664 |
+
Returns:
|
| 665 |
+
scales and zero_points, both float32 Tensors
|
| 666 |
+
"""
|
| 667 |
+
# Based on https://github.com/google/XNNPACK/blob/df156f0cf3db5a4576cc711123eeb54915f82ffc/src/xnnpack/quantization.h#L18
|
| 668 |
+
qmin, qmax = -128, 127
|
| 669 |
+
min_val = torch.amin(input, dim=-1, keepdim=True)
|
| 670 |
+
max_val = torch.amax(input, dim=-1, keepdim=True)
|
| 671 |
+
min_val_neg = torch.min(min_val, torch.zeros_like(min_val))
|
| 672 |
+
max_val_pos = torch.max(max_val, torch.zeros_like(max_val))
|
| 673 |
+
eps = torch.finfo(torch.float32).eps # use xnnpack eps?
|
| 674 |
+
|
| 675 |
+
# scale
|
| 676 |
+
scale = (max_val_pos - min_val_neg) / float(qmax - qmin)
|
| 677 |
+
scale = scale.clamp(min=eps)
|
| 678 |
+
|
| 679 |
+
# zero point
|
| 680 |
+
descaled_min = min_val_neg / scale
|
| 681 |
+
descaled_max = max_val_pos / scale
|
| 682 |
+
zero_point_from_min_error = qmin + descaled_min
|
| 683 |
+
zero_point_from_max_error = qmax + descaled_max
|
| 684 |
+
zero_point = torch.where(
|
| 685 |
+
zero_point_from_min_error + zero_point_from_max_error > 0,
|
| 686 |
+
qmin - descaled_min,
|
| 687 |
+
qmax - descaled_max,
|
| 688 |
+
)
|
| 689 |
+
zero_point = torch.clamp(zero_point, qmin, qmax).round()
|
| 690 |
+
|
| 691 |
+
return scale.to(torch.float32), zero_point.to(torch.float32)
|
| 692 |
+
|
| 693 |
+
|
| 694 |
+
quantized_decomposed_lib.define(
|
| 695 |
+
"choose_qparams_per_token_asymmetric(Tensor input, ScalarType dtype) -> (Tensor, Tensor)"
|
| 696 |
+
)
|
| 697 |
+
|
| 698 |
+
|
| 699 |
+
@impl(
|
| 700 |
+
quantized_decomposed_lib,
|
| 701 |
+
"choose_qparams_per_token_asymmetric",
|
| 702 |
+
"CompositeExplicitAutograd",
|
| 703 |
+
)
|
| 704 |
+
def choose_qparams_per_token_asymmetric(
|
| 705 |
+
input: torch.Tensor,
|
| 706 |
+
dtype: torch.dtype,
|
| 707 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 708 |
+
return _choose_qparams_per_token_asymmetric_impl(input, dtype)
|
| 709 |
+
|
| 710 |
+
|
| 711 |
+
@impl(
|
| 712 |
+
quantized_decomposed_lib,
|
| 713 |
+
"choose_qparams_per_token_asymmetric",
|
| 714 |
+
"Meta",
|
| 715 |
+
)
|
| 716 |
+
def choose_qparams_per_token_asymmetric_meta(
|
| 717 |
+
input: torch.Tensor,
|
| 718 |
+
dtype: torch.dtype,
|
| 719 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 720 |
+
size = (1, input.size(-1))
|
| 721 |
+
return torch.empty(size, dtype=torch.double, device=input.device), torch.empty(
|
| 722 |
+
size, dtype=torch.int64, device=input.device
|
| 723 |
+
)
|
| 724 |
+
|
| 725 |
+
|
| 726 |
+
def _per_token_quant_qparam_dim_check(input, scales, zero_points):
|
| 727 |
+
num_tokens = math.prod(list(input.size())[:-1])
|
| 728 |
+
assert (
|
| 729 |
+
num_tokens == scales.numel()
|
| 730 |
+
), f"num_tokens: {num_tokens} scales: {scales.size()}"
|
| 731 |
+
assert (
|
| 732 |
+
num_tokens == zero_points.numel()
|
| 733 |
+
), f"num_tokens: {num_tokens} zero_points: {zero_points.size()}"
|
| 734 |
+
|
| 735 |
+
|
| 736 |
+
quantized_decomposed_lib.define(
|
| 737 |
+
"quantize_per_token(Tensor input, Tensor scales, Tensor zero_points, "
|
| 738 |
+
"int quant_min, int quant_max, ScalarType dtype) -> Tensor"
|
| 739 |
+
)
|
| 740 |
+
|
| 741 |
+
|
| 742 |
+
@impl(quantized_decomposed_lib, "quantize_per_token", "CompositeExplicitAutograd")
|
| 743 |
+
def quantize_per_token(
|
| 744 |
+
input: torch.Tensor,
|
| 745 |
+
scales: torch.Tensor,
|
| 746 |
+
zero_points: torch.Tensor,
|
| 747 |
+
quant_min: int,
|
| 748 |
+
quant_max: int,
|
| 749 |
+
dtype: torch.dtype,
|
| 750 |
+
):
|
| 751 |
+
"""Per token quantization for the Tensor using the quantization parameters to map
|
| 752 |
+
from floating point to quantized values. This means for a N dimension Tensor
|
| 753 |
+
(M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize
|
| 754 |
+
every N elements with the same quantization parameter. The dimension for scales/zero_points
|
| 755 |
+
will be (M1 * M2 ... * Mn)
|
| 756 |
+
|
| 757 |
+
Args:
|
| 758 |
+
input (torch.Tensor): original float32 or bfloat16 Tensor
|
| 759 |
+
scales (float32 torch.Tensor): quantization parameter for per token affine quantization
|
| 760 |
+
zero_points (int32 torch.Tensor): quantization parameter for per token affine quantization
|
| 761 |
+
quant_min (int): minimum quantized value for output Tensor
|
| 762 |
+
quant_max (int): maximum quantized value for output Tensor
|
| 763 |
+
dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor
|
| 764 |
+
|
| 765 |
+
Returns:
|
| 766 |
+
Tensor with requested dtype (e.g. torch.uint8), note the quantization parameters
|
| 767 |
+
are not stored in the Tensor, we are storing them in function arguments instead
|
| 768 |
+
"""
|
| 769 |
+
_quant_min_max_bounds_check(quant_min, quant_max, dtype)
|
| 770 |
+
_per_token_quant_qparam_dim_check(input, scales, zero_points)
|
| 771 |
+
input = (
|
| 772 |
+
input.mul(1.0 / scales).add(zero_points).round().clamp(quant_min, quant_max).to(dtype)
|
| 773 |
+
)
|
| 774 |
+
return input
|
| 775 |
+
|
| 776 |
+
|
| 777 |
+
@impl(quantized_decomposed_lib, "quantize_per_token", "Meta")
|
| 778 |
+
def quantize_per_token_meta(
|
| 779 |
+
input: torch.Tensor,
|
| 780 |
+
scales: torch.Tensor,
|
| 781 |
+
zero_points: torch.Tensor,
|
| 782 |
+
quant_min: int,
|
| 783 |
+
quant_max: int,
|
| 784 |
+
dtype: torch.dtype,
|
| 785 |
+
):
|
| 786 |
+
_quant_min_max_bounds_check(quant_min, quant_max, dtype)
|
| 787 |
+
return torch.empty_like(input, dtype=dtype)
|
| 788 |
+
|
| 789 |
+
|
| 790 |
+
quantized_decomposed_lib.define(
|
| 791 |
+
"dequantize_per_token(Tensor input, Tensor scales, Tensor zero_points, "
|
| 792 |
+
"int quant_min, int quant_max, ScalarType dtype, ScalarType output_dtype) -> Tensor"
|
| 793 |
+
)
|
| 794 |
+
|
| 795 |
+
|
| 796 |
+
@impl(quantized_decomposed_lib, "dequantize_per_token", "CompositeExplicitAutograd")
|
| 797 |
+
def dequantize_per_token(
|
| 798 |
+
input: torch.Tensor,
|
| 799 |
+
scales: torch.Tensor,
|
| 800 |
+
zero_points: torch.Tensor,
|
| 801 |
+
quant_min: int,
|
| 802 |
+
quant_max: int,
|
| 803 |
+
dtype: torch.dtype,
|
| 804 |
+
output_dtype: torch.dtype = torch.float32,
|
| 805 |
+
):
|
| 806 |
+
"""Per token dequantization for the Tensor using the quantization parameters to map
|
| 807 |
+
from floating point to quantized values. This means for a N dimension Tensor
|
| 808 |
+
(M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize
|
| 809 |
+
every N elements with the same quantization parameter. The dimension for scales/zero_points
|
| 810 |
+
will be (M1 * M2 ... * Mn)
|
| 811 |
+
|
| 812 |
+
Args:
|
| 813 |
+
input (torch.Tensor): quantized Tensor (uint8, int8 etc.)
|
| 814 |
+
scales (float32 torch.Tensor): quantization parameter for per token affine quantization
|
| 815 |
+
zero_points (int32 torch.Tensor): quantization parameter for per token affine quantization
|
| 816 |
+
quant_min (int): minimum quantized value for input Tensor
|
| 817 |
+
quant_max (int): maximum quantized value for input Tensor
|
| 818 |
+
dtype (torch.dtype): dtype (e.g. torch.uint8) for input Tensor
|
| 819 |
+
output_dtype (torch.dtype): dtype (e.g. torch.float32) for output Tensor
|
| 820 |
+
|
| 821 |
+
Returns:
|
| 822 |
+
dequantized Tensor with dtype `output_dtype`
|
| 823 |
+
"""
|
| 824 |
+
input = input - zero_points
|
| 825 |
+
input = input.to(output_dtype) * scales
|
| 826 |
+
return input
|
| 827 |
+
|
| 828 |
+
|
| 829 |
+
@impl(quantized_decomposed_lib, "dequantize_per_token", "Meta")
|
| 830 |
+
def dequantize_per_token_meta(
|
| 831 |
+
input: torch.Tensor,
|
| 832 |
+
scales: torch.Tensor,
|
| 833 |
+
zero_points: torch.Tensor,
|
| 834 |
+
quant_min: int,
|
| 835 |
+
quant_max: int,
|
| 836 |
+
dtype: torch.dtype,
|
| 837 |
+
output_dtype: torch.dtype = torch.float32,
|
| 838 |
+
):
|
| 839 |
+
_quant_min_max_bounds_check(quant_min, quant_max, dtype)
|
| 840 |
+
# TODO: support fp16
|
| 841 |
+
return torch.empty_like(input, dtype=output_dtype)
|
| 842 |
+
|
| 843 |
+
|
| 844 |
+
quantized_decomposed_lib.define(
|
| 845 |
+
"quantize_per_channel_group(Tensor input, Tensor scales, Tensor zero_points, int quant_min, "
|
| 846 |
+
"int quant_max, ScalarType dtype, int group_size) -> Tensor"
|
| 847 |
+
)
|
| 848 |
+
|
| 849 |
+
|
| 850 |
+
# TODO: dtype is ignored for now
|
| 851 |
+
@impl(
|
| 852 |
+
quantized_decomposed_lib, "quantize_per_channel_group", "CompositeExplicitAutograd"
|
| 853 |
+
)
|
| 854 |
+
def quantize_per_channel_group(
|
| 855 |
+
input: torch.Tensor,
|
| 856 |
+
scales: torch.Tensor,
|
| 857 |
+
zero_points: torch.Tensor,
|
| 858 |
+
quant_min: int,
|
| 859 |
+
quant_max: int,
|
| 860 |
+
dtype: torch.dtype,
|
| 861 |
+
group_size=128,
|
| 862 |
+
):
|
| 863 |
+
assert group_size > 1
|
| 864 |
+
# needed for GPTQ single column quantize
|
| 865 |
+
if group_size > input.shape[-1] and scales.shape[-1] == 1:
|
| 866 |
+
group_size = input.shape[-1]
|
| 867 |
+
|
| 868 |
+
assert input.shape[-1] % group_size == 0
|
| 869 |
+
assert input.dim() == 2
|
| 870 |
+
|
| 871 |
+
# TODO: check for dtype, currently we can't express torch.int4 so it's omitted
|
| 872 |
+
to_quant = input.reshape(-1, group_size)
|
| 873 |
+
assert torch.isnan(to_quant).sum() == 0
|
| 874 |
+
|
| 875 |
+
scales = scales.reshape(-1, 1)
|
| 876 |
+
zero_points = zero_points.reshape(-1, 1)
|
| 877 |
+
|
| 878 |
+
input_int8 = (
|
| 879 |
+
to_quant.mul(1.0 / scales)
|
| 880 |
+
.add(zero_points)
|
| 881 |
+
.round()
|
| 882 |
+
.clamp_(quant_min, quant_max)
|
| 883 |
+
.to(dtype)
|
| 884 |
+
.reshape_as(input)
|
| 885 |
+
)
|
| 886 |
+
|
| 887 |
+
return input_int8
|
| 888 |
+
|
| 889 |
+
|
| 890 |
+
@impl(quantized_decomposed_lib, "quantize_per_channel_group", "Meta")
|
| 891 |
+
def quantize_per_channel_group_meta(
|
| 892 |
+
input: torch.Tensor,
|
| 893 |
+
scales: torch.Tensor,
|
| 894 |
+
zero_points: torch.Tensor,
|
| 895 |
+
quant_min: int,
|
| 896 |
+
quant_max: int,
|
| 897 |
+
dtype: torch.dtype,
|
| 898 |
+
group_size=128,
|
| 899 |
+
):
|
| 900 |
+
"""Groupwise quantization within each channel for an 2-d Tensor using the quantization parameters
|
| 901 |
+
to map from floating point to quantized values. This means for each row of a 2-d Tensor
|
| 902 |
+
(M, N), we calculate scales/zero_points for each `group_size` elements
|
| 903 |
+
and quantize every `group_size` elements with the same quantization parameter.
|
| 904 |
+
The dimension for scales/zero_points will be (M * ceil(N, group_size),)
|
| 905 |
+
|
| 906 |
+
Args:
|
| 907 |
+
input (torch.Tensor): original float32 or bfloat16 Tensor
|
| 908 |
+
scales (float32 torch.Tensor): quantization parameter for per channel group affine quantization
|
| 909 |
+
zero_points (int32 torch.Tensor): quantization parameter for per channel group affine quantization
|
| 910 |
+
quant_min (int): minimum quantized value for output Tensor
|
| 911 |
+
quant_max (int): maximum quantized value for output Tensor
|
| 912 |
+
dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor
|
| 913 |
+
|
| 914 |
+
Returns:
|
| 915 |
+
Tensor with requested dtype (e.g. torch.uint8), note the quantization parameters
|
| 916 |
+
are not stored in the Tensor, we are storing them in function arguments instead
|
| 917 |
+
"""
|
| 918 |
+
assert group_size > 1
|
| 919 |
+
# needed for GPTQ single column quantize
|
| 920 |
+
if group_size > input.shape[-1] and scales.shape[-1] == 1:
|
| 921 |
+
group_size = input.shape[-1]
|
| 922 |
+
|
| 923 |
+
assert input.shape[-1] % group_size == 0
|
| 924 |
+
assert input.dim() == 2
|
| 925 |
+
return torch.empty_like(input, dtype=dtype)
|
| 926 |
+
|
| 927 |
+
|
| 928 |
+
quantized_decomposed_lib.define(
|
| 929 |
+
"dequantize_per_channel_group(Tensor input, Tensor scales, Tensor? zero_points, int quant_min, "
|
| 930 |
+
"int quant_max, ScalarType dtype, int group_size, ScalarType output_dtype) -> Tensor"
|
| 931 |
+
)
|
| 932 |
+
|
| 933 |
+
|
| 934 |
+
@impl(
|
| 935 |
+
quantized_decomposed_lib,
|
| 936 |
+
"dequantize_per_channel_group",
|
| 937 |
+
"CompositeExplicitAutograd",
|
| 938 |
+
)
|
| 939 |
+
def dequantize_per_channel_group(
|
| 940 |
+
w_int8: torch.Tensor,
|
| 941 |
+
scales: torch.Tensor,
|
| 942 |
+
zero_points: Optional[torch.Tensor],
|
| 943 |
+
quant_min: int,
|
| 944 |
+
quant_max: int,
|
| 945 |
+
dtype: torch.dtype,
|
| 946 |
+
group_size: int = 128,
|
| 947 |
+
output_dtype: torch.dtype = torch.float32,
|
| 948 |
+
):
|
| 949 |
+
"""Groupwise dequantization within each channel for an 2-d Tensor using the quantization parameters
|
| 950 |
+
to map from floating point to quantized values. This means for each row of a 2-d Tensor
|
| 951 |
+
(M, N), we calculate scales/zero_points for each `group_size` elements
|
| 952 |
+
and quantize every `group_size` elements with the same quantization parameter.
|
| 953 |
+
The dimension for scales/zero_points will be (M * ceil(N, group_size),)
|
| 954 |
+
|
| 955 |
+
Args:
|
| 956 |
+
input (torch.Tensor): quantized Tensor (uint8/int8 etc.)
|
| 957 |
+
scales (float32 torch.Tensor): quantization parameter for per channel group affine quantization
|
| 958 |
+
zero_points (int32 torch.Tensor): quantization parameter for per channel group affine quantization
|
| 959 |
+
quant_min (int): minimum quantized value for input Tensor
|
| 960 |
+
quant_max (int): maximum quantized value for input Tensor
|
| 961 |
+
dtype (torch.dtype): dtype (e.g. torch.uint8) for input Tensor
|
| 962 |
+
output_dtype (torch.dtype): dtype (e.g. torch.float32) for output Tensor
|
| 963 |
+
|
| 964 |
+
Returns:
|
| 965 |
+
dequantized Tensor with dtype `output_dtype`
|
| 966 |
+
"""
|
| 967 |
+
|
| 968 |
+
assert group_size > 1
|
| 969 |
+
# needed for GPTQ single column dequantize
|
| 970 |
+
if group_size > w_int8.shape[-1] and scales.shape[-1] == 1:
|
| 971 |
+
group_size = w_int8.shape[-1]
|
| 972 |
+
assert w_int8.shape[-1] % group_size == 0
|
| 973 |
+
assert w_int8.dim() == 2
|
| 974 |
+
|
| 975 |
+
w_int8_grouped = w_int8.reshape(-1, group_size)
|
| 976 |
+
scales = scales.reshape(-1, 1)
|
| 977 |
+
if zero_points is not None:
|
| 978 |
+
zp = zero_points.reshape(-1, 1)
|
| 979 |
+
else:
|
| 980 |
+
zp = torch.zeros([], dtype=torch.int32, device=scales.device)
|
| 981 |
+
w_dq = w_int8_grouped.sub(zp).mul(scales).reshape_as(w_int8).to(output_dtype)
|
| 982 |
+
return w_dq
|
| 983 |
+
|
| 984 |
+
|
| 985 |
+
quantized_decomposed_lib.define(
|
| 986 |
+
"fake_quant_per_channel(Tensor input, Tensor scales, Tensor zero_points, int axis, "
|
| 987 |
+
"int quant_min, int quant_max) -> Tensor")
|
| 988 |
+
|
| 989 |
+
class FakeQuantPerChannel(torch.autograd.Function):
|
| 990 |
+
@staticmethod
|
| 991 |
+
def forward(ctx, input, scales, zero_points, axis, quant_min, quant_max):
|
| 992 |
+
if scales.dtype != torch.float32:
|
| 993 |
+
scales = scales.to(torch.float32)
|
| 994 |
+
if zero_points.dtype != torch.int32:
|
| 995 |
+
zero_points = zero_points.to(torch.int32)
|
| 996 |
+
assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
|
| 997 |
+
assert axis < input.dim(), f"Expecting axis to be < {input.dim()}"
|
| 998 |
+
broadcast_dims = list(range(0, axis)) + list(range(axis + 1, input.ndim))
|
| 999 |
+
unsqueeze_scales = _unsqueeze_multiple(scales, broadcast_dims)
|
| 1000 |
+
unsqueeze_zero_points = _unsqueeze_multiple(zero_points, broadcast_dims)
|
| 1001 |
+
temp = torch.round(input * (1.0 / unsqueeze_scales)) + unsqueeze_zero_points
|
| 1002 |
+
out = (torch.clamp(temp, quant_min, quant_max) - unsqueeze_zero_points) * unsqueeze_scales
|
| 1003 |
+
mask = torch.logical_and((temp >= quant_min), (temp <= quant_max))
|
| 1004 |
+
|
| 1005 |
+
ctx.save_for_backward(mask)
|
| 1006 |
+
return out
|
| 1007 |
+
|
| 1008 |
+
@staticmethod
|
| 1009 |
+
def backward(ctx, gy):
|
| 1010 |
+
mask, = ctx.saved_tensors
|
| 1011 |
+
return gy * mask, None, None, None, None, None
|
| 1012 |
+
|
| 1013 |
+
@impl(quantized_decomposed_lib, "fake_quant_per_channel", "Autograd")
|
| 1014 |
+
def fake_quant_per_channel(
|
| 1015 |
+
input: torch.Tensor,
|
| 1016 |
+
scales: torch.Tensor,
|
| 1017 |
+
zero_points: torch.Tensor,
|
| 1018 |
+
axis: int,
|
| 1019 |
+
quant_min: int,
|
| 1020 |
+
quant_max: int,
|
| 1021 |
+
) -> torch.Tensor:
|
| 1022 |
+
return FakeQuantPerChannel.apply(input, scales, zero_points, axis, quant_min, quant_max)
|
| 1023 |
+
|
| 1024 |
+
@impl(quantized_decomposed_lib, "fake_quant_per_channel", "Meta")
|
| 1025 |
+
def fake_quant_per_channel_meta(
|
| 1026 |
+
input: torch.Tensor,
|
| 1027 |
+
scales: torch.Tensor,
|
| 1028 |
+
zero_points: torch.Tensor,
|
| 1029 |
+
axis: int,
|
| 1030 |
+
quant_min: int,
|
| 1031 |
+
quant_max: int,
|
| 1032 |
+
) -> torch.Tensor:
|
| 1033 |
+
return torch.empty_like(input)
|
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_lower_to_native_backend.py
ADDED
|
@@ -0,0 +1,1177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
from torch.fx import map_arg, Node
|
| 4 |
+
from torch.fx.graph import Graph
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
import torch.ao.nn.intrinsic as nni
|
| 8 |
+
import torch.ao.nn.intrinsic.quantized as nniq
|
| 9 |
+
import torch.ao.nn.intrinsic.quantized.dynamic as nniqd
|
| 10 |
+
import torch.ao.nn.quantized as nnq
|
| 11 |
+
import torch.ao.nn.quantized.dynamic as nnqd
|
| 12 |
+
import torch.ao.nn.quantized.reference as nnqr
|
| 13 |
+
from torch.ao.nn.quantized.modules.utils import WeightedQuantizedModule
|
| 14 |
+
from torch.fx import GraphModule
|
| 15 |
+
from .utils import (
|
| 16 |
+
collect_producer_nodes,
|
| 17 |
+
get_linear_prepack_op_for_dtype,
|
| 18 |
+
get_new_attr_name_with_prefix,
|
| 19 |
+
get_qconv_prepack_op,
|
| 20 |
+
graph_module_from_producer_nodes,
|
| 21 |
+
)
|
| 22 |
+
from ..utils import _parent_name
|
| 23 |
+
from ..qconfig import QConfigAny
|
| 24 |
+
from ..quantization_mappings import get_quantized_operator
|
| 25 |
+
from .utils import create_node_from_old_node_preserve_meta
|
| 26 |
+
from typing import Dict, Tuple, Type, List, Callable, Any, Union, Set, Optional
|
| 27 |
+
import operator
|
| 28 |
+
|
| 29 |
+
QOP_TO_ARG_NAMES_TO_SKIP = {
|
| 30 |
+
torch._ops.ops.quantized.hardswish: ['inplace'],
|
| 31 |
+
torch._ops.ops.quantized.elu: ['inplace'],
|
| 32 |
+
torch._ops.ops.quantized.dropout: ['inplace'],
|
| 33 |
+
torch._ops.ops.quantized.instance_norm:
|
| 34 |
+
['running_mean', 'running_var', 'use_input_stats', 'momentum'],
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
def _is_node_in_list(node, modules, func_list, method_list, module_type_list):
|
| 38 |
+
is_call_function = node.op == "call_function" and node.target in func_list
|
| 39 |
+
is_call_method = node.op == "call_method" and node.target in method_list
|
| 40 |
+
is_call_module = node.op == "call_module" and type(modules[str(node.target)]) in module_type_list
|
| 41 |
+
return is_call_function, is_call_method, is_call_module
|
| 42 |
+
|
| 43 |
+
def is_fixed_qparams_node(node, modules):
|
| 44 |
+
func_list = [
|
| 45 |
+
torch.nn.functional.hardsigmoid,
|
| 46 |
+
torch.nn.functional.sigmoid,
|
| 47 |
+
torch.sigmoid,
|
| 48 |
+
torch.tanh,
|
| 49 |
+
]
|
| 50 |
+
method_list = [
|
| 51 |
+
"hardsigmoid",
|
| 52 |
+
"hardsigmoid_",
|
| 53 |
+
"sigmoid",
|
| 54 |
+
"sigmoid_",
|
| 55 |
+
"tanh",
|
| 56 |
+
"tanh_",
|
| 57 |
+
]
|
| 58 |
+
module_type_list = [
|
| 59 |
+
torch.nn.Hardsigmoid,
|
| 60 |
+
torch.nn.Sigmoid,
|
| 61 |
+
torch.nn.Tanh,
|
| 62 |
+
torch.nn.Softmax,
|
| 63 |
+
]
|
| 64 |
+
return _is_node_in_list(node, modules, func_list, method_list, module_type_list)
|
| 65 |
+
|
| 66 |
+
def is_default_node(node, modules):
|
| 67 |
+
func_list = [
|
| 68 |
+
torch.nn.functional.elu,
|
| 69 |
+
torch.nn.functional.hardswish,
|
| 70 |
+
torch.nn.functional.instance_norm,
|
| 71 |
+
torch.nn.functional.layer_norm,
|
| 72 |
+
torch.nn.functional.leaky_relu,
|
| 73 |
+
torch.nn.functional.dropout,
|
| 74 |
+
]
|
| 75 |
+
method_list: List[Any] = []
|
| 76 |
+
module_type_list = [
|
| 77 |
+
nnqr.ConvTranspose1d,
|
| 78 |
+
nnqr.ConvTranspose2d,
|
| 79 |
+
nnqr.ConvTranspose3d,
|
| 80 |
+
torch.nn.ELU,
|
| 81 |
+
torch.nn.LeakyReLU,
|
| 82 |
+
torch.nn.Hardswish,
|
| 83 |
+
torch.nn.InstanceNorm1d,
|
| 84 |
+
torch.nn.InstanceNorm2d,
|
| 85 |
+
torch.nn.InstanceNorm3d,
|
| 86 |
+
torch.nn.LayerNorm,
|
| 87 |
+
torch.nn.Dropout,
|
| 88 |
+
torch.nn.PReLU,
|
| 89 |
+
torch.nn.BatchNorm2d,
|
| 90 |
+
torch.nn.BatchNorm3d,
|
| 91 |
+
torch.ao.nn.intrinsic.BNReLU2d,
|
| 92 |
+
torch.ao.nn.intrinsic.BNReLU3d,
|
| 93 |
+
]
|
| 94 |
+
return _is_node_in_list(node, modules, func_list, method_list, module_type_list)
|
| 95 |
+
|
| 96 |
+
def is_copy_node(node, modules):
|
| 97 |
+
func_list = [
|
| 98 |
+
torch.adaptive_avg_pool1d,
|
| 99 |
+
torch.nn.functional.adaptive_avg_pool2d,
|
| 100 |
+
torch.nn.functional.adaptive_avg_pool3d,
|
| 101 |
+
torch.nn.functional.hardtanh,
|
| 102 |
+
torch.nn.functional.hardtanh_,
|
| 103 |
+
torch.nn.functional.interpolate,
|
| 104 |
+
torch.nn.functional.max_pool1d,
|
| 105 |
+
torch.nn.functional.max_pool2d,
|
| 106 |
+
torch.nn.functional.max_pool3d,
|
| 107 |
+
torch.nn.functional.relu,
|
| 108 |
+
torch.nn.functional.relu6,
|
| 109 |
+
torch.avg_pool1d,
|
| 110 |
+
torch._C._nn.avg_pool2d,
|
| 111 |
+
torch._C._nn.avg_pool3d,
|
| 112 |
+
torch.clamp,
|
| 113 |
+
torch.flatten,
|
| 114 |
+
torch.mean,
|
| 115 |
+
operator.floordiv,
|
| 116 |
+
# F.channel_shuffle and torch.channel_shuffle are essentially the same thing
|
| 117 |
+
# so we only need to put one of them here
|
| 118 |
+
torch.channel_shuffle,
|
| 119 |
+
]
|
| 120 |
+
method_list = [
|
| 121 |
+
"clamp",
|
| 122 |
+
"mean",
|
| 123 |
+
"relu",
|
| 124 |
+
"relu_",
|
| 125 |
+
]
|
| 126 |
+
module_type_list = [
|
| 127 |
+
torch.nn.AdaptiveAvgPool1d,
|
| 128 |
+
torch.nn.AdaptiveAvgPool2d,
|
| 129 |
+
torch.nn.AdaptiveAvgPool3d,
|
| 130 |
+
torch.nn.AvgPool1d,
|
| 131 |
+
torch.nn.AvgPool2d,
|
| 132 |
+
torch.nn.AvgPool3d,
|
| 133 |
+
torch.nn.Hardtanh,
|
| 134 |
+
torch.nn.MaxPool1d,
|
| 135 |
+
torch.nn.MaxPool2d,
|
| 136 |
+
torch.nn.MaxPool3d,
|
| 137 |
+
torch.nn.ReLU,
|
| 138 |
+
torch.nn.ReLU6,
|
| 139 |
+
torch.nn.ChannelShuffle,
|
| 140 |
+
]
|
| 141 |
+
return _is_node_in_list(node, modules, func_list, method_list, module_type_list)
|
| 142 |
+
|
| 143 |
+
def is_general_tensor_shape_node(node, modules):
|
| 144 |
+
func_list = [
|
| 145 |
+
torch.narrow,
|
| 146 |
+
torch.transpose,
|
| 147 |
+
torch.repeat_interleave,
|
| 148 |
+
torch.squeeze,
|
| 149 |
+
torch.stack,
|
| 150 |
+
torch.unsqueeze,
|
| 151 |
+
torch.nn.functional.pixel_shuffle,
|
| 152 |
+
torch.nn.functional.pixel_unshuffle,
|
| 153 |
+
]
|
| 154 |
+
method_list = [
|
| 155 |
+
"contiguous",
|
| 156 |
+
"detach",
|
| 157 |
+
"detach_",
|
| 158 |
+
"permute",
|
| 159 |
+
"repeat",
|
| 160 |
+
"repeat_interleave",
|
| 161 |
+
"reshape",
|
| 162 |
+
"resize_",
|
| 163 |
+
"shape",
|
| 164 |
+
"size",
|
| 165 |
+
"squeeze",
|
| 166 |
+
"squeeze_",
|
| 167 |
+
"transpose",
|
| 168 |
+
"unsqueeze",
|
| 169 |
+
"unsqueeze_",
|
| 170 |
+
"view",
|
| 171 |
+
]
|
| 172 |
+
module_type_list = [
|
| 173 |
+
torch.nn.Identity,
|
| 174 |
+
torch.nn.PixelShuffle,
|
| 175 |
+
torch.nn.PixelUnshuffle,
|
| 176 |
+
]
|
| 177 |
+
return _is_node_in_list(node, modules, func_list, method_list, module_type_list)
|
| 178 |
+
|
| 179 |
+
def is_other_node(node, modules):
|
| 180 |
+
func_list = [
|
| 181 |
+
torch.cat,
|
| 182 |
+
]
|
| 183 |
+
method_list: List[Any] = []
|
| 184 |
+
module_type_list: List[Any] = []
|
| 185 |
+
return _is_node_in_list(node, modules, func_list, method_list, module_type_list)
|
| 186 |
+
|
| 187 |
+
def is_special_pattern_node(node, modules):
|
| 188 |
+
res_function, res_method, res_module = False, False, False
|
| 189 |
+
for checker in [is_fixed_qparams_node, is_default_node, is_copy_node, is_general_tensor_shape_node, is_other_node]:
|
| 190 |
+
is_call_function, is_call_method, is_call_module = checker(node, modules)
|
| 191 |
+
res_function = res_function or is_call_function
|
| 192 |
+
res_method = res_method or is_call_method
|
| 193 |
+
res_module = res_module or is_call_module
|
| 194 |
+
return res_function, res_method, res_module
|
| 195 |
+
|
| 196 |
+
def is_dequantize_node(node):
|
| 197 |
+
return isinstance(node, Node) and node.op == "call_method" and node.target == "dequantize"
|
| 198 |
+
|
| 199 |
+
def is_getattr_tensor_metadata_node(node):
|
| 200 |
+
return node.op == "call_function" and \
|
| 201 |
+
node.target == getattr and \
|
| 202 |
+
node.args[1] in ["shape"]
|
| 203 |
+
|
| 204 |
+
def is_get_tensor_info_node(node):
|
| 205 |
+
return node.op == "call_method" and \
|
| 206 |
+
node.target in ["shape", "size"]
|
| 207 |
+
|
| 208 |
+
def should_skip_lowering(op: torch.fx.node.Node, qconfig_map: Dict[str, QConfigAny]):
|
| 209 |
+
"""
|
| 210 |
+
Return True if the op is configured with a None qconfig, False otherwise.
|
| 211 |
+
Note: maybe need to generalize this to also check for the dtype, and we
|
| 212 |
+
only lower when dtype matches, but right now fbgemm/qnnpack only support
|
| 213 |
+
a single dtype, so it is OK for now.
|
| 214 |
+
"""
|
| 215 |
+
return op.name in qconfig_map and qconfig_map[op.name] is None
|
| 216 |
+
|
| 217 |
+
# Mapping from reference module class to the replacement static quantized module class for lowering
|
| 218 |
+
STATIC_LOWER_MODULE_MAP: Dict[Type[nn.Module], Type[WeightedQuantizedModule]] = {
|
| 219 |
+
nnqr.Linear: nnq.Linear,
|
| 220 |
+
nnqr.Conv1d: nnq.Conv1d,
|
| 221 |
+
nnqr.Conv2d: nnq.Conv2d,
|
| 222 |
+
nnqr.Conv3d: nnq.Conv3d,
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
# Mapping from reference module class to the replacement dynamic quantized module class for lowering
|
| 226 |
+
DYNAMIC_LOWER_MODULE_MAP: Dict[Type[nn.Module], Type[nn.Module]] = {
|
| 227 |
+
nnqr.Linear: nnqd.Linear,
|
| 228 |
+
nnqr.GRUCell: nnqd.GRUCell,
|
| 229 |
+
nnqr.LSTMCell: nnqd.LSTMCell,
|
| 230 |
+
nnqr.RNNCell: nnqd.RNNCell,
|
| 231 |
+
nnqr.LSTM: nnqd.LSTM,
|
| 232 |
+
nnqr.GRU: nnqd.GRU,
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
# Mapping from reference module class to the replacement weight only quantized module class for lowering
|
| 236 |
+
# TODO: correct the namespace for these modules
|
| 237 |
+
WEIGHT_ONLY_LOWER_MODULE_MAP: Dict[Type[nn.Module], Type[nn.Module]] = {
|
| 238 |
+
nnqr.Embedding: nnq.Embedding,
|
| 239 |
+
nnqr.EmbeddingBag: nnq.EmbeddingBag,
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
# TODO: merge with STATIC_LOWER_MODULE_MAP after we merge
|
| 243 |
+
# _lower_static_weighted_ref_module and special_pattern_replacement
|
| 244 |
+
SPECIAL_PATTERN_LOWER_MODULE_MAP = {
|
| 245 |
+
nn.BatchNorm2d: nnq.BatchNorm2d,
|
| 246 |
+
nn.BatchNorm3d: nnq.BatchNorm3d,
|
| 247 |
+
nnqr.ConvTranspose1d: nnq.ConvTranspose1d,
|
| 248 |
+
nnqr.ConvTranspose2d: nnq.ConvTranspose2d,
|
| 249 |
+
nnqr.ConvTranspose3d: nnq.ConvTranspose3d,
|
| 250 |
+
nn.ELU: nnq.ELU,
|
| 251 |
+
nn.LeakyReLU: nnq.LeakyReLU,
|
| 252 |
+
nn.Hardswish: nnq.Hardswish,
|
| 253 |
+
nn.InstanceNorm1d: nnq.InstanceNorm1d,
|
| 254 |
+
nn.InstanceNorm2d: nnq.InstanceNorm2d,
|
| 255 |
+
nn.InstanceNorm3d: nnq.InstanceNorm3d,
|
| 256 |
+
nn.LayerNorm: nnq.LayerNorm,
|
| 257 |
+
nn.Dropout: nnq.Dropout,
|
| 258 |
+
nn.Softmax: nnq.Softmax,
|
| 259 |
+
nn.PReLU: nnq.PReLU,
|
| 260 |
+
nni.BNReLU2d: nniq.BNReLU2d,
|
| 261 |
+
nni.BNReLU3d: nniq.BNReLU3d,
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
# Mapping from fused module class to a 2-tuple of:
|
| 265 |
+
# 1) The inner reference module class
|
| 266 |
+
# 2) The replacement static quantized module class for lowering
|
| 267 |
+
STATIC_LOWER_FUSED_MODULE_MAP: Dict[Type[nn.Module], Tuple[Type[nn.Module], Type[WeightedQuantizedModule]]] = {
|
| 268 |
+
nni.LinearReLU: (nnqr.Linear, nniq.LinearReLU),
|
| 269 |
+
# TODO: LinearLeakyReLU is registered as global but it is only fused and
|
| 270 |
+
# lowered when ondnn's backend config is used. Maybe need to separate
|
| 271 |
+
# registration and lowering functions for different backends in the future.
|
| 272 |
+
nni.LinearLeakyReLU: (nnqr.Linear, nniq.LinearLeakyReLU),
|
| 273 |
+
nni.LinearTanh: (nnqr.Linear, nniq.LinearTanh),
|
| 274 |
+
nni.ConvReLU1d: (nnqr.Conv1d, nniq.ConvReLU1d),
|
| 275 |
+
nni.ConvReLU2d: (nnqr.Conv2d, nniq.ConvReLU2d),
|
| 276 |
+
nni.ConvReLU3d: (nnqr.Conv3d, nniq.ConvReLU3d),
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
# The difference between STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP and STATIC_LOWER_FUSED_MODULE_MAP:
|
| 280 |
+
# The refer node inside STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP has 2 inputs.
|
| 281 |
+
# Mapping from fused module class to a 2-tuple of:
|
| 282 |
+
# 1) The inner reference module class
|
| 283 |
+
# 2) The replacement static quantized module class for lowering
|
| 284 |
+
STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP: Dict[Type[nn.Module], Tuple[Type[nn.Module], Type[WeightedQuantizedModule]]] = {
|
| 285 |
+
nni.ConvAdd2d: (nnqr.Conv2d, nniq.ConvAdd2d),
|
| 286 |
+
nni.ConvAddReLU2d: (nnqr.Conv2d, nniq.ConvAddReLU2d),
|
| 287 |
+
}
|
| 288 |
+
|
| 289 |
+
# Mapping from fused module class to a 2-tuple of:
|
| 290 |
+
# 1) The inner reference module class
|
| 291 |
+
# 2) The replacement dynamic quantized module class for lowering
|
| 292 |
+
DYNAMIC_LOWER_FUSED_MODULE_MAP: Dict[Type[nn.Module], Tuple[Type[nn.Module], Type[nn.Module]]] = {
|
| 293 |
+
nni.LinearReLU: (nnqr.Linear, nniqd.LinearReLU),
|
| 294 |
+
}
|
| 295 |
+
|
| 296 |
+
# Mapping from a functional to lower to a 2-tuple of
|
| 297 |
+
# 1) The quantized version of the op
|
| 298 |
+
# 2) The quantized version of the op fused with relu, if it exists, else None
|
| 299 |
+
STATIC_LOWER_FUNCTIONAL_MAP: Dict[Callable, Tuple[Callable, Optional[Callable]]] = {
|
| 300 |
+
F.linear: (torch.ops.quantized.linear, torch.ops.quantized.linear_relu),
|
| 301 |
+
F.conv1d: (torch.ops.quantized.conv1d, torch.ops.quantized.conv1d_relu),
|
| 302 |
+
F.conv2d: (torch.ops.quantized.conv2d, torch.ops.quantized.conv2d_relu),
|
| 303 |
+
F.conv3d: (torch.ops.quantized.conv3d, torch.ops.quantized.conv3d_relu),
|
| 304 |
+
F.conv_transpose1d: (torch.ops.quantized.conv_transpose1d, None),
|
| 305 |
+
F.conv_transpose2d: (torch.ops.quantized.conv_transpose2d, None),
|
| 306 |
+
F.conv_transpose3d: (torch.ops.quantized.conv_transpose3d, None),
|
| 307 |
+
}
|
| 308 |
+
|
| 309 |
+
WEIGHT_PREPACK_OPS: Set[Callable] = {
|
| 310 |
+
torch._ops.ops.quantized.linear_prepack,
|
| 311 |
+
torch._ops.ops.quantized.linear_prepack_fp16,
|
| 312 |
+
torch._ops.ops.quantized.conv1d_prepack,
|
| 313 |
+
torch._ops.ops.quantized.conv2d_prepack,
|
| 314 |
+
torch._ops.ops.quantized.conv3d_prepack,
|
| 315 |
+
torch.ops.quantized.conv_transpose1d_prepack,
|
| 316 |
+
torch.ops.quantized.conv_transpose2d_prepack,
|
| 317 |
+
torch.ops.quantized.conv_transpose3d_prepack,
|
| 318 |
+
}
|
| 319 |
+
|
| 320 |
+
# Mapping from a functional to a dictionary, where the key is a 2-tuple of
|
| 321 |
+
# (input_activation_dtype, weight_dtype) and the value is a 2-tuple of
|
| 322 |
+
# 1) The dynamically quantized version of the op
|
| 323 |
+
# 2) The dynamically quantized version of the op fused with relu, if it exists, else None
|
| 324 |
+
DYNAMIC_LOWER_FUNCTIONAL_MAP: Dict[Callable, Dict[Tuple[torch.dtype, torch.dtype], Tuple[Callable, Optional[Callable]]]] = {
|
| 325 |
+
F.linear: {
|
| 326 |
+
(torch.quint8, torch.qint8): (torch.ops.quantized.linear_dynamic,
|
| 327 |
+
torch.ops.quantized.linear_relu_dynamic),
|
| 328 |
+
(torch.float16, torch.float16): (torch.ops.quantized.linear_dynamic_fp16,
|
| 329 |
+
torch.ops.quantized.linear_relu_dynamic_fp16)
|
| 330 |
+
},
|
| 331 |
+
# dynamic conv + relu is not available yet
|
| 332 |
+
F.conv1d: {
|
| 333 |
+
(torch.quint8, torch.qint8): (torch.ops.quantized.conv1d_dynamic, None),
|
| 334 |
+
},
|
| 335 |
+
F.conv2d: {
|
| 336 |
+
(torch.quint8, torch.qint8): (torch.ops.quantized.conv2d_dynamic, None),
|
| 337 |
+
},
|
| 338 |
+
F.conv3d: {
|
| 339 |
+
(torch.quint8, torch.qint8): (torch.ops.quantized.conv3d_dynamic, None),
|
| 340 |
+
},
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
CONV_FUNCTIONAL_OPS: Set[Callable] = {
|
| 344 |
+
F.conv1d,
|
| 345 |
+
F.conv2d,
|
| 346 |
+
F.conv3d,
|
| 347 |
+
}
|
| 348 |
+
|
| 349 |
+
CONV_TRANSPOSE_FUNCTIONAL_OPS: Set[Callable] = {
|
| 350 |
+
F.conv_transpose1d,
|
| 351 |
+
F.conv_transpose2d,
|
| 352 |
+
F.conv_transpose3d,
|
| 353 |
+
}
|
| 354 |
+
|
| 355 |
+
# TODO: add tests for lowering these ops
|
| 356 |
+
QBIN_OP_MAPPING: Dict[Union[Callable, str], Callable] = {
|
| 357 |
+
operator.add: torch.ops.quantized.add,
|
| 358 |
+
torch.add: torch.ops.quantized.add,
|
| 359 |
+
operator.mul: torch.ops.quantized.mul,
|
| 360 |
+
operator.matmul: torch.ops.quantized.matmul,
|
| 361 |
+
torch.mul: torch.ops.quantized.mul,
|
| 362 |
+
torch.matmul: torch.ops.quantized.matmul,
|
| 363 |
+
}
|
| 364 |
+
QBIN_RELU_OP_MAPPING: Dict[Union[Callable, str], Callable] = {
|
| 365 |
+
operator.add: torch.ops.quantized.add_relu,
|
| 366 |
+
torch.add: torch.ops.quantized.add_relu,
|
| 367 |
+
operator.mul: torch.ops.quantized.mul_relu,
|
| 368 |
+
torch.mul: torch.ops.quantized.mul_relu,
|
| 369 |
+
}
|
| 370 |
+
|
| 371 |
+
def _save_packed_weight(self, destination, prefix, keep_vars):
|
| 372 |
+
for attr_name in dir(self):
|
| 373 |
+
if "_packed_weight" in attr_name and \
|
| 374 |
+
isinstance(getattr(self, attr_name), torch._C.ScriptObject): # type: ignore[attr-defined]
|
| 375 |
+
packed_weight = getattr(self, attr_name)
|
| 376 |
+
destination[prefix + attr_name] = packed_weight
|
| 377 |
+
|
| 378 |
+
def _load_packed_weight(self, state_dict, prefix, local_metadata, strict,
|
| 379 |
+
missing_keys, unexpected_keys, error_msgs):
|
| 380 |
+
attrs_to_pop = []
|
| 381 |
+
for attr_name in state_dict:
|
| 382 |
+
if attr_name.startswith("_packed_weight") and isinstance(state_dict[attr_name], torch._C.ScriptObject): # type: ignore[attr-defined] # noqa: B950
|
| 383 |
+
setattr(self, attr_name, state_dict[attr_name])
|
| 384 |
+
attrs_to_pop.append(attr_name)
|
| 385 |
+
|
| 386 |
+
# pop the packed param attributesn
|
| 387 |
+
for attr_name in attrs_to_pop:
|
| 388 |
+
state_dict.pop(attr_name)
|
| 389 |
+
|
| 390 |
+
def fold_weight(
|
| 391 |
+
quantized_model: GraphModule,
|
| 392 |
+
node_name_to_scope: Dict[str, Tuple[str, type]]
|
| 393 |
+
) -> GraphModule:
|
| 394 |
+
"""
|
| 395 |
+
Trace back from the weight node util we hit getattr, reconstruct the
|
| 396 |
+
graph module with the traced nodes and run the graph module to pack the
|
| 397 |
+
weight. then replace the original chain of ops with the packed weight.
|
| 398 |
+
"""
|
| 399 |
+
packed_weights = {}
|
| 400 |
+
# map from folded node name to the prepacked weight name
|
| 401 |
+
folded_nodes = {}
|
| 402 |
+
# get packed weights
|
| 403 |
+
for node in quantized_model.graph.nodes:
|
| 404 |
+
if node.op == 'call_function' and node.target in WEIGHT_PREPACK_OPS:
|
| 405 |
+
nodes_to_fold = collect_producer_nodes(node)
|
| 406 |
+
if nodes_to_fold is not None:
|
| 407 |
+
for node_to_fold in nodes_to_fold:
|
| 408 |
+
folded_nodes[node_to_fold.name] = node
|
| 409 |
+
|
| 410 |
+
prepacking_module = graph_module_from_producer_nodes(
|
| 411 |
+
quantized_model, nodes_to_fold)
|
| 412 |
+
packed_weight = prepacking_module()
|
| 413 |
+
packed_weights[node.name] = packed_weight
|
| 414 |
+
|
| 415 |
+
# remove folded nodes and replace the prepacking node with getattr
|
| 416 |
+
folded_graph = Graph()
|
| 417 |
+
env: Dict[Any, Any] = {}
|
| 418 |
+
|
| 419 |
+
def load_arg(a):
|
| 420 |
+
return map_arg(a, lambda node: env[node.name])
|
| 421 |
+
|
| 422 |
+
for node in quantized_model.graph.nodes:
|
| 423 |
+
prepack_node = folded_nodes.get(node.name, None)
|
| 424 |
+
if prepack_node is node:
|
| 425 |
+
packed_weight = packed_weights[node.name]
|
| 426 |
+
# add a prepacked attribute to root
|
| 427 |
+
op_node = next(iter(prepack_node.users))
|
| 428 |
+
module_path, _ = node_name_to_scope[op_node.name]
|
| 429 |
+
get_new_packed_weight_name = \
|
| 430 |
+
get_new_attr_name_with_prefix(module_path + '_packed_weight_')
|
| 431 |
+
packed_weight_name = get_new_packed_weight_name(quantized_model)
|
| 432 |
+
setattr(quantized_model, packed_weight_name, packed_weight)
|
| 433 |
+
# replace prepack node with a getattr node
|
| 434 |
+
env[node.name] = folded_graph.create_node(
|
| 435 |
+
'get_attr', packed_weight_name, (), {})
|
| 436 |
+
elif prepack_node is not None:
|
| 437 |
+
# remove the foled node
|
| 438 |
+
continue
|
| 439 |
+
else:
|
| 440 |
+
# copy other nodes
|
| 441 |
+
env[node.name] = folded_graph.node_copy(node, load_arg)
|
| 442 |
+
|
| 443 |
+
quantized_model = GraphModule(quantized_model, folded_graph)
|
| 444 |
+
quantized_model._register_state_dict_hook(_save_packed_weight)
|
| 445 |
+
quantized_model._register_load_state_dict_pre_hook(_load_packed_weight, with_module=True)
|
| 446 |
+
return quantized_model
|
| 447 |
+
|
| 448 |
+
def _get_module(node: Node, modules: Dict[str, nn.Module]) -> Optional[nn.Module]:
|
| 449 |
+
"""
|
| 450 |
+
Return the `torch.nn.Module` that corresponds to the specified node's target.
|
| 451 |
+
If no such node exists, return None.
|
| 452 |
+
"""
|
| 453 |
+
if node.op == "call_module" and str(node.target) in modules:
|
| 454 |
+
return modules[str(node.target)]
|
| 455 |
+
else:
|
| 456 |
+
return None
|
| 457 |
+
|
| 458 |
+
def _match_static_pattern(
|
| 459 |
+
node: Node,
|
| 460 |
+
modules: Dict[str, nn.Module],
|
| 461 |
+
qconfig_map: Dict[str, QConfigAny],
|
| 462 |
+
matching_modules_or_ops: List[Callable],
|
| 463 |
+
dequantize_node_arg_indices: List[int]
|
| 464 |
+
) -> Union[Tuple[Node, Node, Node], Tuple[None, None, None]]:
|
| 465 |
+
"""
|
| 466 |
+
Match the pattern (dequantize - ref node - quantize) against the node provided.
|
| 467 |
+
|
| 468 |
+
If there is a match, return a 3-tuple of:
|
| 469 |
+
1) q_node: the quantize node,
|
| 470 |
+
2) relu_node: a relu node wrapping the ref_node, and
|
| 471 |
+
3) ref_node: a reference module or functional node to replace with its quantized counterpart
|
| 472 |
+
Otherwise, if there is no match, return a 3-tuple of (None, None, None).
|
| 473 |
+
|
| 474 |
+
Parameters:
|
| 475 |
+
node: The `torch.fx.Node` to match against.
|
| 476 |
+
modules: A mapping from node names to modules in the model graph, used for module lookup.
|
| 477 |
+
qconfig_map: A mapping from node names to the qconfigs associated with the nodes.
|
| 478 |
+
If the corresponding qconfig for the reference node is None, then return no match.
|
| 479 |
+
matching_modules_or_ops: Either a list of functions or a list of `torch.nn.Module`s.
|
| 480 |
+
If the reference node is not in this list, then return no match.
|
| 481 |
+
dequantize_node_arg_indices: A list of indices in the reference node args where dequantize
|
| 482 |
+
nodes may be present. An empty list means skipping the check for dequantize nodes.
|
| 483 |
+
"""
|
| 484 |
+
SKIP_LOWERING_VALUE = (None, None, None)
|
| 485 |
+
|
| 486 |
+
# Match quantize node
|
| 487 |
+
if node.op != "call_function" or node.target != torch.quantize_per_tensor:
|
| 488 |
+
return SKIP_LOWERING_VALUE
|
| 489 |
+
q_node = node
|
| 490 |
+
ref_node = q_node.args[0]
|
| 491 |
+
assert isinstance(ref_node, Node)
|
| 492 |
+
|
| 493 |
+
# Handle cases where the node is wrapped in a ReLU
|
| 494 |
+
if (ref_node.op == "call_function" and ref_node.target in (F.relu, torch.relu)) or\
|
| 495 |
+
(ref_node.op == "call_module" and type(_get_module(ref_node, modules)) == nn.ReLU):
|
| 496 |
+
relu_node = ref_node
|
| 497 |
+
ref_node = relu_node.args[0]
|
| 498 |
+
assert isinstance(ref_node, Node)
|
| 499 |
+
else:
|
| 500 |
+
relu_node = None
|
| 501 |
+
if should_skip_lowering(ref_node, qconfig_map):
|
| 502 |
+
return SKIP_LOWERING_VALUE
|
| 503 |
+
|
| 504 |
+
# Match reference module or functional
|
| 505 |
+
if isinstance(matching_modules_or_ops[0], type) and issubclass(matching_modules_or_ops[0], nn.Module):
|
| 506 |
+
expected_op = "call_module"
|
| 507 |
+
match_key = type(_get_module(ref_node, modules))
|
| 508 |
+
else:
|
| 509 |
+
expected_op = "call_function"
|
| 510 |
+
match_key = ref_node.target
|
| 511 |
+
if ref_node.op != expected_op or match_key not in matching_modules_or_ops:
|
| 512 |
+
return SKIP_LOWERING_VALUE
|
| 513 |
+
|
| 514 |
+
# Match dequantize node(s). Both of the following conditions must pass:
|
| 515 |
+
# (1) All `torch.fx.Node`s at the matching indices must be a dequantize node
|
| 516 |
+
# (2) There must be at least one dequantize node
|
| 517 |
+
matched_dequantize = False
|
| 518 |
+
for i in dequantize_node_arg_indices:
|
| 519 |
+
assert i < len(ref_node.args), \
|
| 520 |
+
f"Dequantize index {i} exceeded reference node's arg length {len(ref_node.args)}"
|
| 521 |
+
arg = ref_node.args[i]
|
| 522 |
+
if is_dequantize_node(arg):
|
| 523 |
+
matched_dequantize = True
|
| 524 |
+
elif isinstance(arg, Node):
|
| 525 |
+
return SKIP_LOWERING_VALUE
|
| 526 |
+
if not matched_dequantize:
|
| 527 |
+
return SKIP_LOWERING_VALUE
|
| 528 |
+
|
| 529 |
+
return (q_node, relu_node, ref_node)
|
| 530 |
+
|
| 531 |
+
def _match_static_pattern_with_two_inputs(
|
| 532 |
+
node: Node,
|
| 533 |
+
modules: Dict[str, nn.Module],
|
| 534 |
+
qconfig_map: Dict[str, QConfigAny],
|
| 535 |
+
matching_modules_or_ops: List[Callable]
|
| 536 |
+
) -> Union[Tuple[Node, Node], Tuple[None, None]]:
|
| 537 |
+
"""
|
| 538 |
+
(dequantize \
|
| 539 |
+
Match the pattern (dequantize - ref node - quantize) against the node provided.
|
| 540 |
+
|
| 541 |
+
If there is a match, return a 2-tuple of:
|
| 542 |
+
1) q_node: the quantize node,
|
| 543 |
+
2) ref_node: a reference module or functional node to replace with its quantized counterpart
|
| 544 |
+
Otherwise, if there is no match, return a 2-tuple of (None, None).
|
| 545 |
+
|
| 546 |
+
Parameters:
|
| 547 |
+
node: The `torch.fx.Node` to match against.
|
| 548 |
+
modules: A mapping from node names to modules in the model graph, used for module lookup.
|
| 549 |
+
qconfig_map: A mapping from node names to the qconfigs associated with the nodes.
|
| 550 |
+
If the corresponding qconfig for the reference node is None, then return no match.
|
| 551 |
+
matching_modules_or_ops: Either a list of functions or a list of `torch.nn.Module`s.
|
| 552 |
+
If the reference node is not in this list, then return no match.
|
| 553 |
+
"""
|
| 554 |
+
SKIP_LOWERING_VALUE = (None, None)
|
| 555 |
+
|
| 556 |
+
# Match quantize node
|
| 557 |
+
if node.op != "call_function" or node.target != torch.quantize_per_tensor:
|
| 558 |
+
return SKIP_LOWERING_VALUE
|
| 559 |
+
q_node = node
|
| 560 |
+
ref_node = q_node.args[0]
|
| 561 |
+
assert isinstance(ref_node, Node)
|
| 562 |
+
|
| 563 |
+
if should_skip_lowering(ref_node, qconfig_map):
|
| 564 |
+
return SKIP_LOWERING_VALUE
|
| 565 |
+
|
| 566 |
+
# Match reference module or functional
|
| 567 |
+
if isinstance(matching_modules_or_ops[0], type) and issubclass(matching_modules_or_ops[0], nn.Module):
|
| 568 |
+
expected_op = "call_module"
|
| 569 |
+
match_key = type(_get_module(ref_node, modules))
|
| 570 |
+
else:
|
| 571 |
+
# This pass only support op of "call_module"
|
| 572 |
+
return SKIP_LOWERING_VALUE
|
| 573 |
+
|
| 574 |
+
if ref_node.op != expected_op or match_key not in matching_modules_or_ops:
|
| 575 |
+
return SKIP_LOWERING_VALUE
|
| 576 |
+
|
| 577 |
+
# Check ref_node has 2 input nodes, both are dq node.
|
| 578 |
+
if len(ref_node.args) != 2:
|
| 579 |
+
return SKIP_LOWERING_VALUE
|
| 580 |
+
for i in range(len(ref_node.args)):
|
| 581 |
+
arg = ref_node.args[i]
|
| 582 |
+
if not is_dequantize_node(arg):
|
| 583 |
+
return SKIP_LOWERING_VALUE
|
| 584 |
+
|
| 585 |
+
return (q_node, ref_node)
|
| 586 |
+
|
| 587 |
+
def _lower_static_weighted_ref_module(
|
| 588 |
+
model: GraphModule,
|
| 589 |
+
qconfig_map: Dict[str, QConfigAny]):
|
| 590 |
+
"""
|
| 591 |
+
Traverse the graph and find dequantize - ref module - quantize patterns
|
| 592 |
+
and replace them with the quantized version of the ref module.
|
| 593 |
+
"""
|
| 594 |
+
modules = dict(model.named_modules(remove_duplicate=False))
|
| 595 |
+
nodes = list(model.graph.nodes)
|
| 596 |
+
for n in model.graph.nodes:
|
| 597 |
+
# Step 0: Find nodes that match this pattern (dequantize - ref module - quantize)
|
| 598 |
+
matching_modules = list(STATIC_LOWER_MODULE_MAP.keys()) + list(STATIC_LOWER_FUSED_MODULE_MAP.keys())
|
| 599 |
+
(q_node, relu_node, ref_node) = _match_static_pattern(
|
| 600 |
+
n, modules, qconfig_map, matching_modules, dequantize_node_arg_indices=[0]) # type: ignore[arg-type]
|
| 601 |
+
if q_node is None:
|
| 602 |
+
continue
|
| 603 |
+
assert ref_node is not None
|
| 604 |
+
(_, scale_node, zero_point_node, _) = q_node.args
|
| 605 |
+
ref_module = _get_module(ref_node, modules)
|
| 606 |
+
ref_class = type(ref_module)
|
| 607 |
+
assert isinstance(scale_node, Node)
|
| 608 |
+
assert isinstance(zero_point_node, Node)
|
| 609 |
+
assert issubclass(ref_class, nn.Module)
|
| 610 |
+
|
| 611 |
+
# Step 1: Change this pattern to use the corresponding quantized module
|
| 612 |
+
# For fused modules, we also check whether the inner module is a reference module
|
| 613 |
+
# If so, we replace the entire fused module with the corresponding quantized module
|
| 614 |
+
if ref_class in STATIC_LOWER_FUSED_MODULE_MAP:
|
| 615 |
+
inner_ref_class, q_class = STATIC_LOWER_FUSED_MODULE_MAP[ref_class]
|
| 616 |
+
if type(ref_module[0]) != inner_ref_class: # type: ignore[index]
|
| 617 |
+
continue
|
| 618 |
+
else:
|
| 619 |
+
q_class = STATIC_LOWER_MODULE_MAP[ref_class]
|
| 620 |
+
output_scale = getattr(model, scale_node.target)
|
| 621 |
+
output_zero_point = getattr(model, zero_point_node.target)
|
| 622 |
+
q_module = q_class.from_reference(ref_module, output_scale, output_zero_point)
|
| 623 |
+
# replace reference module with quantized module
|
| 624 |
+
parent_name, module_name = _parent_name(ref_node.target)
|
| 625 |
+
setattr(modules[parent_name], module_name, q_module)
|
| 626 |
+
|
| 627 |
+
# Step 2: Reroute around dq_node, and remove q_node and its args
|
| 628 |
+
assert len(ref_node.args) == 1
|
| 629 |
+
dq_node = ref_node.args[0]
|
| 630 |
+
assert isinstance(dq_node, Node)
|
| 631 |
+
ref_node.replace_input_with(dq_node, dq_node.args[0])
|
| 632 |
+
q_node.replace_all_uses_with(ref_node)
|
| 633 |
+
model.graph.erase_node(q_node)
|
| 634 |
+
model.graph.erase_node(scale_node)
|
| 635 |
+
model.graph.erase_node(zero_point_node)
|
| 636 |
+
|
| 637 |
+
def _lower_static_weighted_ref_module_with_two_inputs(
|
| 638 |
+
model: GraphModule,
|
| 639 |
+
qconfig_map: Dict[str, QConfigAny]):
|
| 640 |
+
"""
|
| 641 |
+
Traverse the graph and find patterns
|
| 642 |
+
dequantize dequantize
|
| 643 |
+
\\ //
|
| 644 |
+
ref module
|
| 645 |
+
\\
|
| 646 |
+
quantize
|
| 647 |
+
and replace them with the quantized version of the ref module.
|
| 648 |
+
"""
|
| 649 |
+
modules = dict(model.named_modules(remove_duplicate=False))
|
| 650 |
+
nodes = list(model.graph.nodes)
|
| 651 |
+
for n in model.graph.nodes:
|
| 652 |
+
# (dequantize \
|
| 653 |
+
# Step 0: Find nodes that match this pattern (dequantize - ref module - quantize)
|
| 654 |
+
matching_modules = list(STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP.keys())
|
| 655 |
+
(q_node, ref_node) = _match_static_pattern_with_two_inputs(
|
| 656 |
+
n, modules, qconfig_map, matching_modules) # type: ignore[arg-type]
|
| 657 |
+
if q_node is None:
|
| 658 |
+
continue
|
| 659 |
+
assert ref_node is not None
|
| 660 |
+
(_, scale_node, zero_point_node, _) = q_node.args
|
| 661 |
+
ref_module = _get_module(ref_node, modules)
|
| 662 |
+
ref_class = type(ref_module)
|
| 663 |
+
assert isinstance(scale_node, Node)
|
| 664 |
+
assert isinstance(zero_point_node, Node)
|
| 665 |
+
assert issubclass(ref_class, nn.Module)
|
| 666 |
+
|
| 667 |
+
# Step 1: Change this pattern to use the corresponding quantized module
|
| 668 |
+
# For fused modules, we also check whether the inner module is a reference module
|
| 669 |
+
# If so, we replace the entire fused module with the corresponding quantized module
|
| 670 |
+
if ref_class in STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP:
|
| 671 |
+
inner_ref_class, q_class = STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP[ref_class]
|
| 672 |
+
if type(ref_module[0]) != inner_ref_class: # type: ignore[index]
|
| 673 |
+
continue
|
| 674 |
+
else:
|
| 675 |
+
continue
|
| 676 |
+
output_scale = getattr(model, scale_node.target)
|
| 677 |
+
output_zero_point = getattr(model, zero_point_node.target)
|
| 678 |
+
q_module = q_class.from_reference(ref_module, output_scale, output_zero_point)
|
| 679 |
+
# replace reference module with quantized module
|
| 680 |
+
parent_name, module_name = _parent_name(ref_node.target)
|
| 681 |
+
setattr(modules[parent_name], module_name, q_module)
|
| 682 |
+
|
| 683 |
+
# Step 2: Reroute around dq_node, and remove q_node and its args
|
| 684 |
+
assert len(ref_node.args) == 2
|
| 685 |
+
for arg in ref_node.args:
|
| 686 |
+
if not is_dequantize_node(arg):
|
| 687 |
+
continue
|
| 688 |
+
dq_node = arg
|
| 689 |
+
assert isinstance(dq_node, Node)
|
| 690 |
+
ref_node.replace_input_with(dq_node, dq_node.args[0])
|
| 691 |
+
|
| 692 |
+
q_node.replace_all_uses_with(ref_node)
|
| 693 |
+
model.graph.erase_node(q_node)
|
| 694 |
+
model.graph.erase_node(scale_node)
|
| 695 |
+
model.graph.erase_node(zero_point_node)
|
| 696 |
+
|
| 697 |
+
def _lower_dynamic_weighted_ref_module(model: GraphModule):
|
| 698 |
+
"""
|
| 699 |
+
Traverse the graph and find quantize_per_tensor_dynamic - dequantize - ref_module patterns
|
| 700 |
+
and replace them with the dynamically quantized version of the ref module.
|
| 701 |
+
"""
|
| 702 |
+
named_modules = dict(model.named_modules(remove_duplicate=False))
|
| 703 |
+
for n in model.graph.nodes:
|
| 704 |
+
if n.op != "call_module" or \
|
| 705 |
+
type(named_modules[str(n.target)]) not in \
|
| 706 |
+
set(DYNAMIC_LOWER_MODULE_MAP.keys()).union(
|
| 707 |
+
set(DYNAMIC_LOWER_FUSED_MODULE_MAP.keys())):
|
| 708 |
+
continue
|
| 709 |
+
ref_node = n
|
| 710 |
+
dq_node = ref_node.args[0]
|
| 711 |
+
if dq_node.op != "call_method" or dq_node.target != "dequantize":
|
| 712 |
+
continue
|
| 713 |
+
|
| 714 |
+
input_dynamic_q_node = dq_node.args[0]
|
| 715 |
+
|
| 716 |
+
if input_dynamic_q_node.op != "call_function" or \
|
| 717 |
+
input_dynamic_q_node.target != torch.quantize_per_tensor_dynamic:
|
| 718 |
+
continue
|
| 719 |
+
|
| 720 |
+
activation_dtype = input_dynamic_q_node.args[1]
|
| 721 |
+
is_fp16 = activation_dtype == torch.float16
|
| 722 |
+
is_int8 = activation_dtype in [torch.quint8, torch.qint8]
|
| 723 |
+
if not is_int8 and not is_fp16:
|
| 724 |
+
continue
|
| 725 |
+
|
| 726 |
+
ref_module = named_modules[str(ref_node.target)]
|
| 727 |
+
ref_class = type(ref_module)
|
| 728 |
+
if ref_class in DYNAMIC_LOWER_FUSED_MODULE_MAP:
|
| 729 |
+
inner_ref_class, q_class = DYNAMIC_LOWER_FUSED_MODULE_MAP[ref_class]
|
| 730 |
+
if type(ref_module[0]) != inner_ref_class:
|
| 731 |
+
continue
|
| 732 |
+
else:
|
| 733 |
+
q_class = DYNAMIC_LOWER_MODULE_MAP.get(ref_class) # type: ignore[assignment]
|
| 734 |
+
# TODO: maybe define a WeightedDynamicallyQuantizedModule
|
| 735 |
+
q_module = q_class.from_reference(ref_module) # type: ignore[attr-defined]
|
| 736 |
+
|
| 737 |
+
# replace reference module with dynamically quantized module
|
| 738 |
+
parent_name, module_name = _parent_name(ref_node.target)
|
| 739 |
+
setattr(named_modules[parent_name], module_name, q_module)
|
| 740 |
+
ref_node.replace_input_with(dq_node, input_dynamic_q_node.args[0])
|
| 741 |
+
|
| 742 |
+
def _lower_weight_only_weighted_ref_module(model: GraphModule):
|
| 743 |
+
"""
|
| 744 |
+
Traverse the graph and find ref_module patterns
|
| 745 |
+
and replace them with the weight only quantized version of the ref module.
|
| 746 |
+
"""
|
| 747 |
+
named_modules = dict(model.named_modules(remove_duplicate=False))
|
| 748 |
+
for n in model.graph.nodes:
|
| 749 |
+
if n.op != "call_module" or \
|
| 750 |
+
type(named_modules[str(n.target)]) not in \
|
| 751 |
+
set(WEIGHT_ONLY_LOWER_MODULE_MAP.keys()):
|
| 752 |
+
continue
|
| 753 |
+
ref_node = n
|
| 754 |
+
ref_module = named_modules[str(ref_node.target)]
|
| 755 |
+
ref_class = type(ref_module)
|
| 756 |
+
q_class = WEIGHT_ONLY_LOWER_MODULE_MAP.get(ref_class)
|
| 757 |
+
# TODO: WeightedQuantizedModule is currently assuming static quant apis
|
| 758 |
+
# with output_scale, output_zero_point in from_reference, we may want to
|
| 759 |
+
# relax that, or rename this
|
| 760 |
+
# TODO: maybe define a WeightedWeightOnlyQuantizedModule
|
| 761 |
+
q_module = q_class.from_reference(ref_module) # type: ignore[union-attr]
|
| 762 |
+
|
| 763 |
+
# replace reference module with dynamically quantized module
|
| 764 |
+
parent_name, module_name = _parent_name(ref_node.target)
|
| 765 |
+
setattr(named_modules[parent_name], module_name, q_module)
|
| 766 |
+
|
| 767 |
+
def _lower_static_weighted_ref_functional(
|
| 768 |
+
model: GraphModule,
|
| 769 |
+
qconfig_map: Dict[str, QConfigAny]):
|
| 770 |
+
"""
|
| 771 |
+
Traverse the graph and replace functional reference patterns with their quantized versions.
|
| 772 |
+
"""
|
| 773 |
+
modules = dict(model.named_modules(remove_duplicate=False))
|
| 774 |
+
nodes = list(model.graph.nodes)
|
| 775 |
+
for n in model.graph.nodes:
|
| 776 |
+
# Step 0: Find nodes that match this pattern (dequantize - functional op - quantize)
|
| 777 |
+
matching_ops = list(STATIC_LOWER_FUNCTIONAL_MAP.keys())
|
| 778 |
+
(q_node, relu_node, func_node) = _match_static_pattern(
|
| 779 |
+
n, modules, qconfig_map, matching_ops, dequantize_node_arg_indices=[0, 1])
|
| 780 |
+
if q_node is None:
|
| 781 |
+
continue
|
| 782 |
+
assert func_node is not None
|
| 783 |
+
(_, output_scale_node, output_zp_node, _) = q_node.args
|
| 784 |
+
(input_dq_node, weight_dq_node, *remaining_func_args) = func_node.args
|
| 785 |
+
assert isinstance(output_zp_node, Node)
|
| 786 |
+
assert isinstance(input_dq_node, Node)
|
| 787 |
+
assert isinstance(weight_dq_node, Node)
|
| 788 |
+
quantized_weight = weight_dq_node.args[0]
|
| 789 |
+
assert isinstance(quantized_weight, Node)
|
| 790 |
+
if quantized_weight.op != "call_function" or\
|
| 791 |
+
quantized_weight.target not in (torch.quantize_per_tensor, torch.quantize_per_channel):
|
| 792 |
+
continue
|
| 793 |
+
|
| 794 |
+
# Step 1: Replace quantized weights with packed weights, which will be folded later
|
| 795 |
+
# Use the right prepack op and prepare the corresponding args
|
| 796 |
+
# Linear prepack args: (quantized weights[, bias])
|
| 797 |
+
# Conv prepack args: (quantized weights[, bias, stride, padding, dilation, groups])
|
| 798 |
+
prepack_args = [quantized_weight] + remaining_func_args
|
| 799 |
+
if func_node.target == F.linear:
|
| 800 |
+
weight_dtype = quantized_weight.args[-1]
|
| 801 |
+
prepack_op = get_linear_prepack_op_for_dtype(weight_dtype)
|
| 802 |
+
elif func_node.target in CONV_FUNCTIONAL_OPS:
|
| 803 |
+
prepack_op = get_qconv_prepack_op(func_node.target) # type: ignore[arg-type]
|
| 804 |
+
# For conv1d, the stride, padding, and dilation args may be ints,
|
| 805 |
+
# in which case we need to convert them to tuples
|
| 806 |
+
if func_node.target == F.conv1d:
|
| 807 |
+
for i in [2, 3, 4]:
|
| 808 |
+
if len(prepack_args) > i and isinstance(prepack_args[i], int):
|
| 809 |
+
prepack_args[i] = (prepack_args[i],)
|
| 810 |
+
elif func_node.target in CONV_TRANSPOSE_FUNCTIONAL_OPS:
|
| 811 |
+
prepack_op = get_qconv_prepack_op(func_node.target) # type: ignore[arg-type]
|
| 812 |
+
# For conv_transpose1d, the stride, padding, and dilation args may be ints,
|
| 813 |
+
# in which case we need to convert them to tuples
|
| 814 |
+
if func_node.target == F.conv_transpose1d:
|
| 815 |
+
# Note prepack_args[5] is groups.
|
| 816 |
+
for i in [2, 3, 4, 6]:
|
| 817 |
+
if len(prepack_args) > i and isinstance(prepack_args[i], int):
|
| 818 |
+
prepack_args[i] = (prepack_args[i],)
|
| 819 |
+
# swap dilation and groups
|
| 820 |
+
# prepack op has arguments: {w, b, stride, padding, output_padding, dilation, groups}
|
| 821 |
+
# transposed conv op has arguments: {x, w, b, stride, padding, output_padding, groups, dilation}
|
| 822 |
+
if (len(prepack_args) > 6):
|
| 823 |
+
prepack_args[5], prepack_args[6] = prepack_args[6], prepack_args[5]
|
| 824 |
+
else:
|
| 825 |
+
raise ValueError(f"Lowering is not supported for op '{func_node.target}'")
|
| 826 |
+
with model.graph.inserting_before(output_scale_node):
|
| 827 |
+
# kwargs of the func node are needed for prepack op (i.e., quantized::linear_prepack)
|
| 828 |
+
# They are not needed for compute op (i.e., quantized::linear)
|
| 829 |
+
kwargs = func_node.kwargs
|
| 830 |
+
# F.linear uses 'bias' key for bias while qlinear_prepack uses 'B' for bias
|
| 831 |
+
if func_node.target == F.linear and 'bias' in kwargs:
|
| 832 |
+
kwargs = kwargs.copy()
|
| 833 |
+
kwargs['B'] = kwargs['bias']
|
| 834 |
+
del kwargs['bias']
|
| 835 |
+
packed_weight = model.graph.create_node("call_function", prepack_op, tuple(prepack_args), kwargs)
|
| 836 |
+
|
| 837 |
+
# Step 2: Replace reference pattern with the corresponding quantized op
|
| 838 |
+
(q_func, q_relu_func) = STATIC_LOWER_FUNCTIONAL_MAP[func_node.target] # type: ignore[index]
|
| 839 |
+
# conv_transpose does not support fusion with relu yet. q_relu_func is None in such cases
|
| 840 |
+
if q_relu_func is not None:
|
| 841 |
+
func_node.target = q_relu_func if relu_node is not None else q_func
|
| 842 |
+
else:
|
| 843 |
+
func_node.target = q_func
|
| 844 |
+
func_node.args = (input_dq_node.args[0], packed_weight, output_scale_node, output_zp_node)
|
| 845 |
+
# kwargs for func_node has been moved to kwargs for prepack op
|
| 846 |
+
func_node.kwargs = {}
|
| 847 |
+
q_node.replace_all_uses_with(func_node)
|
| 848 |
+
# Move func_node after output_zp_node in the graph
|
| 849 |
+
output_zp_node.append(func_node)
|
| 850 |
+
|
| 851 |
+
# Clean up: Remove quantize node, and the relu node if it exists
|
| 852 |
+
model.graph.erase_node(q_node)
|
| 853 |
+
if relu_node is not None and q_relu_func is not None:
|
| 854 |
+
model.graph.erase_node(relu_node)
|
| 855 |
+
|
| 856 |
+
def _lower_dynamic_weighted_ref_functional(
|
| 857 |
+
model: GraphModule,
|
| 858 |
+
qconfig_map: Dict[str, QConfigAny]):
|
| 859 |
+
"""
|
| 860 |
+
Traverse the graph and replace functional reference patterns with their dynamically
|
| 861 |
+
quantized versions.
|
| 862 |
+
Examples:
|
| 863 |
+
quantize_per_tensor_dynamic - dequantize - functional linear --> linear_dynamic
|
| 864 |
+
to(torch.float16) - dequantize - functional linear --> linear_dynamic_fp16
|
| 865 |
+
"""
|
| 866 |
+
modules = dict(model.named_modules(remove_duplicate=False))
|
| 867 |
+
nodes = list(model.graph.nodes)
|
| 868 |
+
# we want to search in reserved order so that we can match the larger patterns first
|
| 869 |
+
# e.g. we want to match linear - relu before linear.
|
| 870 |
+
for n in reversed(model.graph.nodes):
|
| 871 |
+
|
| 872 |
+
# Step 0: Find nodes that match this pattern
|
| 873 |
+
# (quantize_per_tensor_dynamic - dequantize - dynamically quantized op)
|
| 874 |
+
# We search for the pattern backwards, starting with the quantize node
|
| 875 |
+
# Quantize node args: (func, scale, zp, dtype)
|
| 876 |
+
func_node = n
|
| 877 |
+
# Handle cases where the functional op is wrapped in a ReLU
|
| 878 |
+
if func_node.op == "call_function" and func_node.target == F.relu or \
|
| 879 |
+
func_node.op == "call_module" and \
|
| 880 |
+
type(modules[str(func_node.target)]) == torch.nn.ReLU:
|
| 881 |
+
relu_node = func_node
|
| 882 |
+
func_node = relu_node.args[0]
|
| 883 |
+
else:
|
| 884 |
+
relu_node = None
|
| 885 |
+
if should_skip_lowering(func_node, qconfig_map):
|
| 886 |
+
continue
|
| 887 |
+
# Linear args: (dequantized inputs, dequantized weights[, bias])
|
| 888 |
+
# Conv args: (dequantized inputs, dequantized weights[, bias, stride, padding, dilation, groups])
|
| 889 |
+
if func_node.op != "call_function" or func_node.target not in DYNAMIC_LOWER_FUNCTIONAL_MAP:
|
| 890 |
+
continue
|
| 891 |
+
(input_dq_node, weight_dq_node, *remaining_func_args) = func_node.args
|
| 892 |
+
if input_dq_node.op != "call_method" or input_dq_node.target != "dequantize" or \
|
| 893 |
+
weight_dq_node.op != "call_method" or weight_dq_node.target != "dequantize":
|
| 894 |
+
continue
|
| 895 |
+
|
| 896 |
+
input_dynamic_q_node = input_dq_node.args[0]
|
| 897 |
+
|
| 898 |
+
if input_dynamic_q_node.op != "call_function" or \
|
| 899 |
+
input_dynamic_q_node.target != torch.quantize_per_tensor_dynamic:
|
| 900 |
+
continue
|
| 901 |
+
|
| 902 |
+
reduce_range_node = None
|
| 903 |
+
(pattern_input, activation_dtype, reduce_range_node) = input_dynamic_q_node.args
|
| 904 |
+
is_fp16 = activation_dtype == torch.float16
|
| 905 |
+
is_int8 = activation_dtype in [torch.quint8, torch.qint8]
|
| 906 |
+
if not is_int8 and not is_fp16:
|
| 907 |
+
continue
|
| 908 |
+
|
| 909 |
+
quantized_weight = weight_dq_node.args[0]
|
| 910 |
+
weight_dtype = quantized_weight.args[-1]
|
| 911 |
+
|
| 912 |
+
# Step 1: Try to select reference pattern with the corresponding quantized op
|
| 913 |
+
dynamic_quant_dtype_key = (activation_dtype, weight_dtype)
|
| 914 |
+
if dynamic_quant_dtype_key not in DYNAMIC_LOWER_FUNCTIONAL_MAP[func_node.target]:
|
| 915 |
+
print(f"Didn't find dtype combination {dynamic_quant_dtype_key} during "
|
| 916 |
+
f"dynamic quantized op lowering for {func_node.target}")
|
| 917 |
+
continue
|
| 918 |
+
(q_func, q_relu_func) = DYNAMIC_LOWER_FUNCTIONAL_MAP[func_node.target][dynamic_quant_dtype_key]
|
| 919 |
+
|
| 920 |
+
if q_func is None or q_relu_func is None:
|
| 921 |
+
print("Didn't find corresponding quantized function or quantized relu function "
|
| 922 |
+
f"for {func_node.target}, {dynamic_quant_dtype_key}")
|
| 923 |
+
continue
|
| 924 |
+
|
| 925 |
+
# Step 2: Replace quantized weights with packed weights, which will be folded later
|
| 926 |
+
# Use the right prepack op and prepare the corresponding args
|
| 927 |
+
# Linear prepack args: (quantized weights[, bias])
|
| 928 |
+
# Conv prepack args: (quantized weights[, bias, stride, padding, dilation, groups])
|
| 929 |
+
prepack_args = [quantized_weight] + remaining_func_args
|
| 930 |
+
prepack_kwargs = {}
|
| 931 |
+
if func_node.target == F.linear:
|
| 932 |
+
prepack_op = get_linear_prepack_op_for_dtype(weight_dtype)
|
| 933 |
+
kwargs = func_node.kwargs.copy()
|
| 934 |
+
if 'bias' in kwargs:
|
| 935 |
+
prepack_kwargs['B'] = kwargs['bias']
|
| 936 |
+
del kwargs['bias']
|
| 937 |
+
func_node.kwargs = kwargs
|
| 938 |
+
elif func_node.target in CONV_FUNCTIONAL_OPS:
|
| 939 |
+
prepack_op = get_qconv_prepack_op(func_node.target)
|
| 940 |
+
# For conv1d, the stride, padding, and dilation args may be ints,
|
| 941 |
+
# in which case we need to convert them to tuples
|
| 942 |
+
if func_node.target == F.conv1d:
|
| 943 |
+
for i in [2, 3, 4]:
|
| 944 |
+
if len(prepack_args) > i and isinstance(prepack_args[i], int):
|
| 945 |
+
prepack_args[i] = (prepack_args[i],)
|
| 946 |
+
else:
|
| 947 |
+
raise ValueError(f"Lowering is not supported for op '{func_node.target}'")
|
| 948 |
+
with model.graph.inserting_before(func_node):
|
| 949 |
+
packed_weight = model.graph.create_node("call_function", prepack_op, tuple(prepack_args), prepack_kwargs)
|
| 950 |
+
|
| 951 |
+
# Step 3: Replace reference pattern with the corresponding quantized op
|
| 952 |
+
func_node.target = q_relu_func if relu_node is not None else q_func
|
| 953 |
+
if is_int8:
|
| 954 |
+
func_node.args = (pattern_input, packed_weight, reduce_range_node)
|
| 955 |
+
else:
|
| 956 |
+
func_node.args = (pattern_input, packed_weight)
|
| 957 |
+
|
| 958 |
+
if relu_node is not None:
|
| 959 |
+
relu_node.replace_all_uses_with(func_node)
|
| 960 |
+
|
| 961 |
+
# Step 4: Remove the relu node if it exists
|
| 962 |
+
if relu_node is not None:
|
| 963 |
+
model.graph.erase_node(relu_node)
|
| 964 |
+
|
| 965 |
+
def _lower_quantized_binary_op(
|
| 966 |
+
model: GraphModule,
|
| 967 |
+
qconfig_map: Dict[str, QConfigAny]):
|
| 968 |
+
binary_ops_to_lower: List[Callable] = [operator.add, torch.add, operator.mul, torch.mul, torch.matmul]
|
| 969 |
+
modules = dict(model.named_modules(remove_duplicate=False))
|
| 970 |
+
for n in model.graph.nodes:
|
| 971 |
+
# Step 0: Find nodes that match this pattern (dequantize - ref module - quantize)
|
| 972 |
+
(q_node, relu_node, bop_node) = _match_static_pattern(
|
| 973 |
+
n, modules, qconfig_map, binary_ops_to_lower, dequantize_node_arg_indices=[0, 1])
|
| 974 |
+
if q_node is None:
|
| 975 |
+
continue
|
| 976 |
+
assert bop_node is not None
|
| 977 |
+
(_, scale_node, zero_point_node, _) = q_node.args
|
| 978 |
+
|
| 979 |
+
# Step 1: Remove dequant nodes
|
| 980 |
+
num_dq_nodes = 0
|
| 981 |
+
for arg in bop_node.args:
|
| 982 |
+
if not is_dequantize_node(arg):
|
| 983 |
+
continue
|
| 984 |
+
dq_node = arg
|
| 985 |
+
assert isinstance(dq_node, Node)
|
| 986 |
+
dn_input = dq_node.args[0]
|
| 987 |
+
bop_node.replace_input_with(dq_node, dn_input)
|
| 988 |
+
num_dq_nodes += 1
|
| 989 |
+
assert num_dq_nodes > 0
|
| 990 |
+
|
| 991 |
+
# Step 2: Swap binary op to quantized binary op
|
| 992 |
+
assert bop_node.target in QBIN_OP_MAPPING
|
| 993 |
+
binop_to_qbinop = QBIN_OP_MAPPING if relu_node is None else QBIN_RELU_OP_MAPPING
|
| 994 |
+
qbin_op = binop_to_qbinop[bop_node.target]
|
| 995 |
+
# prepare the args for quantized binary op
|
| 996 |
+
# (x, y)
|
| 997 |
+
qop_node_args = list(bop_node.args)
|
| 998 |
+
# (x, y, scale, zero_point)
|
| 999 |
+
# add scale and zero_point arguments for Tensor - Tensor operation
|
| 1000 |
+
if num_dq_nodes == 2:
|
| 1001 |
+
qop_node_args.extend([scale_node, zero_point_node])
|
| 1002 |
+
# insert a call to quantized binary op and remove the original binary op
|
| 1003 |
+
with model.graph.inserting_after(q_node):
|
| 1004 |
+
qop_node = create_node_from_old_node_preserve_meta(
|
| 1005 |
+
model.graph,
|
| 1006 |
+
("call_function", qbin_op, tuple(qop_node_args), {}),
|
| 1007 |
+
bop_node)
|
| 1008 |
+
q_node.replace_all_uses_with(qop_node)
|
| 1009 |
+
|
| 1010 |
+
# Step 3: Remove quantize node, binary op node, and relu node if any
|
| 1011 |
+
model.graph.erase_node(q_node)
|
| 1012 |
+
if relu_node is not None:
|
| 1013 |
+
model.graph.erase_node(relu_node)
|
| 1014 |
+
model.graph.erase_node(bop_node)
|
| 1015 |
+
|
| 1016 |
+
def special_pattern_replacement(model: GraphModule):
|
| 1017 |
+
modules = dict(model.named_modules(remove_duplicate=False))
|
| 1018 |
+
for n in model.graph.nodes:
|
| 1019 |
+
q_node = n
|
| 1020 |
+
is_quantize = q_node.target == torch.quantize_per_tensor
|
| 1021 |
+
is_to_fp16 = q_node.op == "call_method" and q_node.target == "to" and \
|
| 1022 |
+
len(q_node.args) == 2 and q_node.args[1] == torch.float16
|
| 1023 |
+
if not (is_quantize or is_to_fp16):
|
| 1024 |
+
continue
|
| 1025 |
+
ref_node = q_node.args[0]
|
| 1026 |
+
# get output scale/zero_point/dtype from the quantize node
|
| 1027 |
+
# ref_node, scale_node, zero_point_node, dtype = q_node.args
|
| 1028 |
+
# TODO: add safety checks that users for the ref_node and dq_node needs to be one
|
| 1029 |
+
is_call_function, is_call_method, is_call_module = is_fixed_qparams_node(ref_node, modules)
|
| 1030 |
+
if is_to_fp16 and (is_call_function or is_call_method or is_call_module):
|
| 1031 |
+
# TODO: add a warning or error out here? (bc-breaking if error out)
|
| 1032 |
+
# warnings.warn(
|
| 1033 |
+
# "Only reference patterns are currently supported for {dtype} dtype with {op} op"
|
| 1034 |
+
# "".format(dtype=dtypes, op=ref_node))
|
| 1035 |
+
continue
|
| 1036 |
+
|
| 1037 |
+
is_call_function, is_call_method, is_call_module = is_default_node(ref_node, modules)
|
| 1038 |
+
if is_to_fp16 and (is_call_function or is_call_method or is_call_module):
|
| 1039 |
+
# TODO: add a warning or error out here? (bc-breaking if error out)
|
| 1040 |
+
continue
|
| 1041 |
+
|
| 1042 |
+
# This check includes all supported ops
|
| 1043 |
+
is_call_function, is_call_method, is_call_module = is_special_pattern_node(ref_node, modules)
|
| 1044 |
+
if not (is_call_module or is_call_function or is_call_method):
|
| 1045 |
+
continue
|
| 1046 |
+
assert len(ref_node.args) > 0 or len(ref_node.kwargs) > 0
|
| 1047 |
+
dq_node_or_nodes = ref_node.args[0] if len(ref_node.args) > 0 else next(iter(ref_node.kwargs.values()))
|
| 1048 |
+
assert isinstance(dq_node_or_nodes, (Node, tuple, list))
|
| 1049 |
+
is_dequantize = False
|
| 1050 |
+
if isinstance(dq_node_or_nodes, Node):
|
| 1051 |
+
is_dequantize = dq_node_or_nodes.op == 'call_method' and \
|
| 1052 |
+
dq_node_or_nodes.target == 'dequantize'
|
| 1053 |
+
elif isinstance(dq_node_or_nodes, (tuple, list)):
|
| 1054 |
+
is_dequantize = all(
|
| 1055 |
+
x.op == 'call_method' and x.target == 'dequantize'
|
| 1056 |
+
for x in dq_node_or_nodes)
|
| 1057 |
+
|
| 1058 |
+
if not is_dequantize:
|
| 1059 |
+
continue
|
| 1060 |
+
|
| 1061 |
+
# TODO: enable we have patterns that needs to swap the modules
|
| 1062 |
+
if is_call_module:
|
| 1063 |
+
ref_module = modules[ref_node.target]
|
| 1064 |
+
if type(ref_module) in SPECIAL_PATTERN_LOWER_MODULE_MAP and is_quantize:
|
| 1065 |
+
qmodule_cls = SPECIAL_PATTERN_LOWER_MODULE_MAP.get(type(ref_module))
|
| 1066 |
+
scale_node = q_node.args[1]
|
| 1067 |
+
zero_point_node = q_node.args[2]
|
| 1068 |
+
output_scale = getattr(model, scale_node.target)
|
| 1069 |
+
output_zero_point = getattr(model, zero_point_node.target)
|
| 1070 |
+
|
| 1071 |
+
qmodule = qmodule_cls.from_reference(ref_module, output_scale, output_zero_point) # type:ignore[union-attr]
|
| 1072 |
+
# replace reference module with quantized module
|
| 1073 |
+
parent_name, module_name = _parent_name(ref_node.target)
|
| 1074 |
+
setattr(modules[parent_name], module_name, qmodule)
|
| 1075 |
+
|
| 1076 |
+
# reroute around dq node:
|
| 1077 |
+
dq_nodes: List[Node] = []
|
| 1078 |
+
if isinstance(dq_node_or_nodes, Node):
|
| 1079 |
+
dq_nodes = [dq_node_or_nodes]
|
| 1080 |
+
elif isinstance(dq_node_or_nodes, (tuple, list)):
|
| 1081 |
+
dq_nodes = list(dq_node_or_nodes)
|
| 1082 |
+
|
| 1083 |
+
for dq_node in dq_nodes:
|
| 1084 |
+
dn_input = dq_node.args[0]
|
| 1085 |
+
ref_node.replace_input_with(dq_node, dn_input)
|
| 1086 |
+
|
| 1087 |
+
# store q node args
|
| 1088 |
+
qnode_qparams = list(q_node.args)[1:]
|
| 1089 |
+
# replace uses of q node with input and remove q node
|
| 1090 |
+
q_node_input = q_node.args[0]
|
| 1091 |
+
q_node.replace_all_uses_with(q_node_input)
|
| 1092 |
+
model.graph.erase_node(q_node)
|
| 1093 |
+
|
| 1094 |
+
is_call_function, is_call_method, is_call_module = is_default_node(ref_node, modules)
|
| 1095 |
+
if is_call_function:
|
| 1096 |
+
# pass scale/zer_point arguments from quantize_per_tensor to the default node operator
|
| 1097 |
+
# insert an op after the zero_point node so that the scale/zero_point
|
| 1098 |
+
# nodes are is available
|
| 1099 |
+
qop = get_quantized_operator(ref_node.target)
|
| 1100 |
+
args = list(ref_node.args)
|
| 1101 |
+
kwargs = dict(ref_node.kwargs)
|
| 1102 |
+
if qop in QOP_TO_ARG_NAMES_TO_SKIP:
|
| 1103 |
+
args_to_skip = QOP_TO_ARG_NAMES_TO_SKIP[qop]
|
| 1104 |
+
for arg in args_to_skip:
|
| 1105 |
+
if arg in kwargs:
|
| 1106 |
+
kwargs.pop(arg)
|
| 1107 |
+
kwargs["output_scale"] = qnode_qparams[0]
|
| 1108 |
+
kwargs["output_zero_point"] = qnode_qparams[1]
|
| 1109 |
+
with model.graph.inserting_after(qnode_qparams[1]):
|
| 1110 |
+
qop_node = create_node_from_old_node_preserve_meta(
|
| 1111 |
+
model.graph,
|
| 1112 |
+
("call_function", qop, tuple(args), kwargs),
|
| 1113 |
+
ref_node)
|
| 1114 |
+
ref_node.replace_all_uses_with(qop_node)
|
| 1115 |
+
model.graph.erase_node(ref_node)
|
| 1116 |
+
else:
|
| 1117 |
+
# remove scale/zero_point node for quantize node
|
| 1118 |
+
for n in qnode_qparams:
|
| 1119 |
+
if isinstance(n, Node):
|
| 1120 |
+
model.graph.erase_node(n)
|
| 1121 |
+
|
| 1122 |
+
return model
|
| 1123 |
+
|
| 1124 |
+
def _lower_getattr_tensor_metadta_op(model: GraphModule):
|
| 1125 |
+
""" Modified the graph of the model inplace, to skip extra dequantize op before
|
| 1126 |
+
the general tensor shape ops when possible
|
| 1127 |
+
"""
|
| 1128 |
+
for n in model.graph.nodes:
|
| 1129 |
+
if is_getattr_tensor_metadata_node(n):
|
| 1130 |
+
maybe_dq = n.args[0]
|
| 1131 |
+
if maybe_dq.op != "call_method" or maybe_dq.target != "dequantize":
|
| 1132 |
+
continue
|
| 1133 |
+
# skip the dequantize node
|
| 1134 |
+
args = list(n.args)
|
| 1135 |
+
args[0] = n.args[0].args[0]
|
| 1136 |
+
n.args = tuple(args)
|
| 1137 |
+
|
| 1138 |
+
def _lower_get_tensor_info_op(model: GraphModule):
|
| 1139 |
+
""" Modified the graph of the model inplace, to skip extra dequantize op before
|
| 1140 |
+
the general tensor shape ops when possible
|
| 1141 |
+
"""
|
| 1142 |
+
for n in model.graph.nodes:
|
| 1143 |
+
if not is_get_tensor_info_node(n):
|
| 1144 |
+
continue
|
| 1145 |
+
maybe_dq = n.args[0]
|
| 1146 |
+
if maybe_dq.op != "call_method" or maybe_dq.target != "dequantize":
|
| 1147 |
+
continue
|
| 1148 |
+
# skip the dequantize node
|
| 1149 |
+
args = list(n.args)
|
| 1150 |
+
args[0] = n.args[0].args[0]
|
| 1151 |
+
n.args = tuple(args)
|
| 1152 |
+
|
| 1153 |
+
def _lower_to_native_backend(
|
| 1154 |
+
model: GraphModule,
|
| 1155 |
+
qconfig_map: Dict[str, QConfigAny],
|
| 1156 |
+
node_name_to_scope: Dict[str, Tuple[str, type]]
|
| 1157 |
+
) -> GraphModule:
|
| 1158 |
+
""" Lower a quantized reference model (with reference quantized operator patterns)
|
| 1159 |
+
to the native backend in PyTorch (fbgemm/qnnpack), both backends shares the same
|
| 1160 |
+
operator signature so they can be lowered with the same function
|
| 1161 |
+
"""
|
| 1162 |
+
_lower_static_weighted_ref_module(model, qconfig_map)
|
| 1163 |
+
_lower_static_weighted_ref_module_with_two_inputs(model, qconfig_map)
|
| 1164 |
+
_lower_dynamic_weighted_ref_module(model)
|
| 1165 |
+
_lower_weight_only_weighted_ref_module(model)
|
| 1166 |
+
_lower_static_weighted_ref_functional(model, qconfig_map)
|
| 1167 |
+
_lower_dynamic_weighted_ref_functional(model, qconfig_map)
|
| 1168 |
+
_lower_quantized_binary_op(model, qconfig_map)
|
| 1169 |
+
_lower_getattr_tensor_metadta_op(model)
|
| 1170 |
+
_lower_get_tensor_info_op(model)
|
| 1171 |
+
special_pattern_replacement(model)
|
| 1172 |
+
model.graph.eliminate_dead_code()
|
| 1173 |
+
model = fold_weight(model, node_name_to_scope)
|
| 1174 |
+
model.graph.eliminate_dead_code()
|
| 1175 |
+
model.recompile()
|
| 1176 |
+
model.graph.lint()
|
| 1177 |
+
return model
|
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/convert.py
ADDED
|
@@ -0,0 +1,1143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
from typing import Any, Dict, List, Optional, Set, Tuple, Union, Type, Callable
|
| 4 |
+
from torch.ao.quantization.quant_type import QuantType
|
| 5 |
+
import torch
|
| 6 |
+
import copy
|
| 7 |
+
import warnings
|
| 8 |
+
from torch.fx import (
|
| 9 |
+
GraphModule,
|
| 10 |
+
)
|
| 11 |
+
from torch.fx.graph import (
|
| 12 |
+
Graph,
|
| 13 |
+
Node,
|
| 14 |
+
Argument,
|
| 15 |
+
)
|
| 16 |
+
from ..utils import (
|
| 17 |
+
activation_is_statically_quantized,
|
| 18 |
+
weight_is_quantized,
|
| 19 |
+
get_qparam_dict,
|
| 20 |
+
_parent_name,
|
| 21 |
+
get_swapped_custom_module_class,
|
| 22 |
+
)
|
| 23 |
+
from ..qconfig import (
|
| 24 |
+
QConfigAny,
|
| 25 |
+
qconfig_equals
|
| 26 |
+
)
|
| 27 |
+
from ..qconfig_mapping import QConfigMapping
|
| 28 |
+
from .qconfig_mapping_utils import (
|
| 29 |
+
_generate_node_name_to_qconfig,
|
| 30 |
+
_compare_prepare_convert_qconfig_mappings,
|
| 31 |
+
_update_qconfig_for_fusion,
|
| 32 |
+
_is_qconfig_supported_by_dtype_configs,
|
| 33 |
+
_update_qconfig_for_qat,
|
| 34 |
+
)
|
| 35 |
+
from torch.ao.quantization.backend_config.utils import (
|
| 36 |
+
get_root_module_to_quantized_reference_module,
|
| 37 |
+
get_pattern_to_dtype_configs,
|
| 38 |
+
get_fused_module_classes,
|
| 39 |
+
get_qat_module_classes,
|
| 40 |
+
)
|
| 41 |
+
from torch.ao.quantization.backend_config import (
|
| 42 |
+
BackendConfig,
|
| 43 |
+
get_native_backend_config,
|
| 44 |
+
)
|
| 45 |
+
from torch.ao.quantization.observer import _is_activation_post_process
|
| 46 |
+
from .graph_module import (
|
| 47 |
+
_is_observed_module,
|
| 48 |
+
_is_observed_standalone_module,
|
| 49 |
+
)
|
| 50 |
+
from ._equalize import update_obs_for_equalization, convert_eq_obs
|
| 51 |
+
from torch.nn.utils.parametrize import type_before_parametrizations
|
| 52 |
+
from .utils import (
|
| 53 |
+
_get_module,
|
| 54 |
+
_is_custom_module_lstm,
|
| 55 |
+
_is_custom_module_mha,
|
| 56 |
+
assert_and_get_unique_device,
|
| 57 |
+
get_custom_module_class_keys,
|
| 58 |
+
create_getattr_from_value,
|
| 59 |
+
collect_producer_nodes,
|
| 60 |
+
graph_module_from_producer_nodes,
|
| 61 |
+
node_arg_is_weight,
|
| 62 |
+
)
|
| 63 |
+
from torch.ao.quantization.utils import (
|
| 64 |
+
is_per_channel,
|
| 65 |
+
to_underlying_dtype,
|
| 66 |
+
)
|
| 67 |
+
from torch.ao.quantization.quantize import (
|
| 68 |
+
_remove_qconfig,
|
| 69 |
+
)
|
| 70 |
+
from torch.ao.quantization.stubs import DeQuantStub
|
| 71 |
+
from .custom_config import (
|
| 72 |
+
ConvertCustomConfig,
|
| 73 |
+
PrepareCustomConfig,
|
| 74 |
+
)
|
| 75 |
+
from .lower_to_fbgemm import lower_to_fbgemm
|
| 76 |
+
# importing the lib so that the quantized_decomposed ops are registered
|
| 77 |
+
from ._decomposed import quantized_decomposed_lib # noqa: F401
|
| 78 |
+
import operator
|
| 79 |
+
|
| 80 |
+
__all__ = [
|
| 81 |
+
"convert",
|
| 82 |
+
"convert_custom_module",
|
| 83 |
+
"convert_standalone_module",
|
| 84 |
+
"convert_weighted_module",
|
| 85 |
+
]
|
| 86 |
+
|
| 87 |
+
SUPPORTED_QDTYPES = [
|
| 88 |
+
torch.quint8,
|
| 89 |
+
torch.qint8,
|
| 90 |
+
torch.qint32,
|
| 91 |
+
torch.uint8,
|
| 92 |
+
torch.int8,
|
| 93 |
+
torch.int16,
|
| 94 |
+
torch.int32,
|
| 95 |
+
torch.float8_e5m2,
|
| 96 |
+
torch.float8_e4m3fn,
|
| 97 |
+
]
|
| 98 |
+
|
| 99 |
+
_QSCHEME_TO_CHOOSE_QPARAMS_OP = {
|
| 100 |
+
torch.per_tensor_affine: torch.ops.quantized_decomposed.choose_qparams.tensor,
|
| 101 |
+
torch.per_tensor_symmetric: torch.ops.quantized_decomposed.choose_qparams_symmetric.tensor,
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
def _replace_observer_with_quantize_dequantize_node_decomposed(
|
| 105 |
+
model: torch.fx.GraphModule,
|
| 106 |
+
node: Node,
|
| 107 |
+
modules: Dict[str, torch.nn.Module],
|
| 108 |
+
node_name_to_scope: Dict[str, Tuple[str, type]],
|
| 109 |
+
node_name_to_qconfig: Dict[str, QConfigAny]) -> None:
|
| 110 |
+
""" Replace activation_post_process module call node with quantize and
|
| 111 |
+
dequantize node working with decomposed Tensor
|
| 112 |
+
|
| 113 |
+
Before:
|
| 114 |
+
... -> observer_0(x) -> ...
|
| 115 |
+
After:
|
| 116 |
+
... -> torch.ops.quantized_decomposed.quantize_per_tensor(x, ...) ->
|
| 117 |
+
torch.ops.quantized_decomposed.dequantize_per_tensor() -> ...
|
| 118 |
+
|
| 119 |
+
or quantize_per_channel and dequantize_per_channel
|
| 120 |
+
"""
|
| 121 |
+
graph = model.graph
|
| 122 |
+
assert modules is not None
|
| 123 |
+
assert isinstance(node.target, str)
|
| 124 |
+
module_path, prefix = _get_module_path_and_prefix(node, node_name_to_scope, node_name_to_qconfig)
|
| 125 |
+
activation_post_process = modules[node.target]
|
| 126 |
+
if hasattr(activation_post_process, "convert"):
|
| 127 |
+
activation_post_process.convert(model, node)
|
| 128 |
+
return
|
| 129 |
+
# skip replacing observers to quant/dequant nodes if the qconfigs of all
|
| 130 |
+
# consumers and producers of this observer are None
|
| 131 |
+
skip_replacement = all(_has_none_qconfig(n, node_name_to_qconfig) for n in
|
| 132 |
+
list(node.args) + list(node.users.keys()))
|
| 133 |
+
if skip_replacement or not _is_conversion_supported(activation_post_process):
|
| 134 |
+
# didn't find corresponding quantize op and info for the activation_post_process
|
| 135 |
+
# so we just remove the observer
|
| 136 |
+
with graph.inserting_before(node):
|
| 137 |
+
node.replace_all_uses_with(node.args[0])
|
| 138 |
+
graph.erase_node(node)
|
| 139 |
+
return
|
| 140 |
+
|
| 141 |
+
# otherwise, we can convert the activation_post_process module call to quantize/dequantize node
|
| 142 |
+
|
| 143 |
+
# 1. extract the information from activation_post_process module for generating
|
| 144 |
+
# the quantize and dequantize operator
|
| 145 |
+
dtype = activation_post_process.dtype # type: ignore[attr-defined]
|
| 146 |
+
|
| 147 |
+
is_dynamic = False
|
| 148 |
+
if hasattr(activation_post_process, "is_dynamic"):
|
| 149 |
+
is_dynamic = activation_post_process.is_dynamic # type: ignore[assignment]
|
| 150 |
+
|
| 151 |
+
if dtype in SUPPORTED_QDTYPES and (not is_dynamic):
|
| 152 |
+
# TODO: probably should cleanup this condition check, it's hard
|
| 153 |
+
# to reason about this if and the following elif
|
| 154 |
+
|
| 155 |
+
# uint8/int8/int32 static quantization branch
|
| 156 |
+
|
| 157 |
+
# 1. extract information for inserting q/dq node from activation_post_process
|
| 158 |
+
node_type = "call_function"
|
| 159 |
+
quantize_op : Optional[Callable] = None
|
| 160 |
+
scale, zero_point = activation_post_process.calculate_qparams() # type: ignore[attr-defined, operator]
|
| 161 |
+
if is_per_channel(activation_post_process.qscheme): # type: ignore[attr-defined]
|
| 162 |
+
ch_axis = int(activation_post_process.ch_axis) # type: ignore[attr-defined, arg-type]
|
| 163 |
+
quantize_op = torch.ops.quantized_decomposed.quantize_per_channel.default
|
| 164 |
+
dequantize_op = torch.ops.quantized_decomposed.dequantize_per_channel.default
|
| 165 |
+
quant_min = activation_post_process.quant_min
|
| 166 |
+
quant_max = activation_post_process.quant_max
|
| 167 |
+
dtype_ = to_underlying_dtype(dtype)
|
| 168 |
+
qparams = {
|
| 169 |
+
"_scale_": scale,
|
| 170 |
+
"_zero_point_": zero_point,
|
| 171 |
+
"_axis_": ch_axis,
|
| 172 |
+
"_quant_min_": quant_min,
|
| 173 |
+
"_quant_max_": quant_max,
|
| 174 |
+
"_dtype_": dtype_
|
| 175 |
+
}
|
| 176 |
+
else:
|
| 177 |
+
quantize_op = torch.ops.quantized_decomposed.quantize_per_tensor.default
|
| 178 |
+
dequantize_op = torch.ops.quantized_decomposed.dequantize_per_tensor.default
|
| 179 |
+
scale = float(scale)
|
| 180 |
+
zero_point = int(zero_point)
|
| 181 |
+
quant_min = activation_post_process.quant_min # type: ignore[attr-defined]
|
| 182 |
+
quant_max = activation_post_process.quant_max # type: ignore[attr-defined]
|
| 183 |
+
dtype_ = to_underlying_dtype(dtype)
|
| 184 |
+
qparams = {
|
| 185 |
+
"_scale_": scale,
|
| 186 |
+
"_zero_point_": zero_point,
|
| 187 |
+
"_quant_min_": quant_min,
|
| 188 |
+
"_quant_max_": quant_max,
|
| 189 |
+
"_dtype_": dtype_
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
# 2. replace activation_post_process node with quantize and dequantize
|
| 193 |
+
with graph.inserting_before(node):
|
| 194 |
+
input_node = node.args[0]
|
| 195 |
+
quantize_op_inputs = [input_node]
|
| 196 |
+
for key, value_or_node in qparams.items():
|
| 197 |
+
# TODO: we can add the information of whether a value needs to
|
| 198 |
+
# be registered as an attribute in qparams dict itself
|
| 199 |
+
if key in ['_scale_', '_zero_point_'] and (not isinstance(value_or_node, (float, int))):
|
| 200 |
+
# For scale and zero_point values we register them as buffers in the root module.
|
| 201 |
+
# However, note that when the values are not tensors, as in the case of
|
| 202 |
+
# per_tensor quantization, they will be treated as literals.
|
| 203 |
+
# However, registering them as a node seems to cause issue with dynamo
|
| 204 |
+
# tracing where it may consider tensor overload as opposed to default.
|
| 205 |
+
# With extra check of scale and zero_point being scalar, it makes
|
| 206 |
+
# sure that the default overload can be used.
|
| 207 |
+
# TODO: maybe need more complex attr name here
|
| 208 |
+
qparam_node = create_getattr_from_value(
|
| 209 |
+
model, graph, module_path + prefix + key, value_or_node)
|
| 210 |
+
quantize_op_inputs.append(qparam_node)
|
| 211 |
+
else:
|
| 212 |
+
# for qparams that are not scale/zero_point (like axis, dtype) we store them as literals in the graph.
|
| 213 |
+
quantize_op_inputs.append(value_or_node)
|
| 214 |
+
|
| 215 |
+
quantized_node = graph.create_node(node_type, quantize_op, tuple(quantize_op_inputs), {})
|
| 216 |
+
# use the same qparams from quantize op
|
| 217 |
+
dq_inputs = [quantized_node] + quantize_op_inputs[1:]
|
| 218 |
+
dequantized_node = graph.call_function(
|
| 219 |
+
dequantize_op,
|
| 220 |
+
tuple(dq_inputs),
|
| 221 |
+
{}
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
def remap_fn(x):
|
| 225 |
+
return dequantized_node if x is node else x
|
| 226 |
+
|
| 227 |
+
# remap numeric_debug_handle
|
| 228 |
+
for user_node in node.users:
|
| 229 |
+
if "numeric_debug_handle" in user_node.meta:
|
| 230 |
+
numeric_debug_handle = user_node.meta["numeric_debug_handle"]
|
| 231 |
+
user_node.meta["numeric_debug_handle"] = {remap_fn(k): v for k, v in numeric_debug_handle.items()}
|
| 232 |
+
node.replace_all_uses_with(dequantized_node)
|
| 233 |
+
graph.erase_node(node)
|
| 234 |
+
elif is_dynamic:
|
| 235 |
+
|
| 236 |
+
# uint8/int8/fp16 dynamic quantization
|
| 237 |
+
|
| 238 |
+
# 1. extract information for inserting q/dq node from activation_post_process
|
| 239 |
+
node_type = "call_function"
|
| 240 |
+
quantize_op = torch.ops.quantized_decomposed.quantize_per_tensor.tensor
|
| 241 |
+
# we only use choose_qparams for is_decomposed now,
|
| 242 |
+
# but we should probably align the non-decomposed path with this as well,
|
| 243 |
+
# and that can be done after we remove reduce_range flag
|
| 244 |
+
# 1. extract qparams from activation_post_process module
|
| 245 |
+
dtype_ = to_underlying_dtype(dtype)
|
| 246 |
+
assert dtype_ in [torch.uint8, torch.int8], \
|
| 247 |
+
"only uint8 and int8 are supported in reference flow for " \
|
| 248 |
+
"dynamic quantization right now"
|
| 249 |
+
quant_min = activation_post_process.quant_min # type: ignore[attr-defined]
|
| 250 |
+
quant_max = activation_post_process.quant_max # type: ignore[attr-defined]
|
| 251 |
+
qscheme = getattr(activation_post_process, "qscheme", torch.per_tensor_affine) # type: ignore[attr-defined]
|
| 252 |
+
eps = getattr(activation_post_process, "eps", torch.finfo(torch.float32).eps) # type: ignore[attr-defined]
|
| 253 |
+
# note: scale and zero_point are missing for quantize_per_tensor op
|
| 254 |
+
# we'll need to get this from choose_qparams op, which we'll add after
|
| 255 |
+
# this step
|
| 256 |
+
qparams = {
|
| 257 |
+
"_quant_min_": quant_min,
|
| 258 |
+
"_quant_max_": quant_max,
|
| 259 |
+
"_eps_": eps,
|
| 260 |
+
"_dtype_": dtype_
|
| 261 |
+
}
|
| 262 |
+
|
| 263 |
+
choose_qparams_op = _QSCHEME_TO_CHOOSE_QPARAMS_OP[qscheme]
|
| 264 |
+
# 2. insert choose_qparams op and update the qparams list
|
| 265 |
+
with graph.inserting_before(node):
|
| 266 |
+
input_node = node.args[0]
|
| 267 |
+
choose_qparams_op_inputs = [node.args[0]]
|
| 268 |
+
for key, value in qparams.items():
|
| 269 |
+
# we have quant_min, quant_max and dtype, all should be stored
|
| 270 |
+
# as literals
|
| 271 |
+
choose_qparams_op_inputs.append(value)
|
| 272 |
+
choose_qparams_node = graph.create_node(
|
| 273 |
+
"call_function",
|
| 274 |
+
choose_qparams_op,
|
| 275 |
+
tuple(choose_qparams_op_inputs),
|
| 276 |
+
{}
|
| 277 |
+
)
|
| 278 |
+
# choose_qparms returns (scale, zero_point)
|
| 279 |
+
scale_node = graph.create_node(
|
| 280 |
+
"call_function",
|
| 281 |
+
operator.getitem,
|
| 282 |
+
(choose_qparams_node, 0),
|
| 283 |
+
{}
|
| 284 |
+
)
|
| 285 |
+
zero_point_node = graph.create_node(
|
| 286 |
+
"call_function",
|
| 287 |
+
operator.getitem,
|
| 288 |
+
(choose_qparams_node, 1),
|
| 289 |
+
{}
|
| 290 |
+
)
|
| 291 |
+
quant_min = qparams["_quant_min_"]
|
| 292 |
+
quant_max = qparams["_quant_max_"]
|
| 293 |
+
dtype = qparams["_dtype_"]
|
| 294 |
+
qparams = {
|
| 295 |
+
"_scale_": scale_node,
|
| 296 |
+
"_zero_point_": zero_point_node,
|
| 297 |
+
"_quant_min_": quant_min,
|
| 298 |
+
"_quant_max_": quant_max,
|
| 299 |
+
"_dtype_": dtype
|
| 300 |
+
}
|
| 301 |
+
|
| 302 |
+
# 3. replace activation_post_process node to quantize and dequantize node
|
| 303 |
+
with graph.inserting_before(node):
|
| 304 |
+
input_node = node.args[0]
|
| 305 |
+
quantize_op_inputs = [input_node]
|
| 306 |
+
for key, value_or_node in qparams.items():
|
| 307 |
+
# TODO: we can add the information of whether a value needs to
|
| 308 |
+
# be registered as an attribute in qparams dict itself
|
| 309 |
+
if key in ['_scale_', '_zero_point_']:
|
| 310 |
+
# in this case we have a node in the graph since it's dynamically
|
| 311 |
+
# computed from the input, with choose_qparams op
|
| 312 |
+
qparam_node = value_or_node
|
| 313 |
+
quantize_op_inputs.append(qparam_node)
|
| 314 |
+
else:
|
| 315 |
+
# for qparams that are not scale/zero_point (like axis, dtype) we
|
| 316 |
+
# store them as literals in the graph.
|
| 317 |
+
quantize_op_inputs.append(value_or_node)
|
| 318 |
+
|
| 319 |
+
quantized_node = graph.create_node(node_type, quantize_op, tuple(quantize_op_inputs), {})
|
| 320 |
+
# use the same qparams from quantize op
|
| 321 |
+
dq_inputs = [quantized_node] + quantize_op_inputs[1:]
|
| 322 |
+
# need to use the tensor variant of this op, since scale and zero_point
|
| 323 |
+
# from choose_qparam are Tensors, instead of float/int, this is to
|
| 324 |
+
# prevent these nodes being traced away by downstream systems
|
| 325 |
+
dequantize_op = torch.ops.quantized_decomposed.dequantize_per_tensor.tensor
|
| 326 |
+
dequantized_node = graph.call_function(
|
| 327 |
+
dequantize_op,
|
| 328 |
+
tuple(dq_inputs),
|
| 329 |
+
{}
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
def remap_fn(x):
|
| 333 |
+
return dequantized_node if x is node else x
|
| 334 |
+
|
| 335 |
+
# remap numeric_debug_handle
|
| 336 |
+
for user_node in node.users:
|
| 337 |
+
if "numeric_debug_handle" in user_node.meta:
|
| 338 |
+
numeric_debug_handle = user_node.meta["numeric_debug_handle"]
|
| 339 |
+
user_node.meta["numeric_debug_handle"] = {remap_fn(k): v for k, v in numeric_debug_handle.items()}
|
| 340 |
+
node.replace_all_uses_with(dequantized_node)
|
| 341 |
+
graph.erase_node(node)
|
| 342 |
+
elif dtype == torch.float16:
|
| 343 |
+
raise NotImplementedError("decomposed to float16 op not implemented yet")
|
| 344 |
+
|
| 345 |
+
# should not reach since we have checks in the beginning to make sure the
|
| 346 |
+
# activation_post_process is supported
|
| 347 |
+
|
| 348 |
+
def _replace_observer_with_quantize_dequantize_node(
|
| 349 |
+
model: torch.fx.GraphModule,
|
| 350 |
+
node: Node,
|
| 351 |
+
modules: Dict[str, torch.nn.Module],
|
| 352 |
+
node_name_to_scope: Dict[str, Tuple[str, type]],
|
| 353 |
+
node_name_to_qconfig: Dict[str, QConfigAny]) -> None:
|
| 354 |
+
""" Replace activation_post_process module call node with quantize and
|
| 355 |
+
dequantize node
|
| 356 |
+
|
| 357 |
+
Before:
|
| 358 |
+
... -> observer_0(x) -> ...
|
| 359 |
+
After:
|
| 360 |
+
... -> torch.quantize_per_tensor(x, ...) -> x.dequantize() -> ...
|
| 361 |
+
"""
|
| 362 |
+
assert modules is not None
|
| 363 |
+
assert isinstance(node.target, str)
|
| 364 |
+
graph = model.graph
|
| 365 |
+
module_path, prefix = _get_module_path_and_prefix(node, node_name_to_scope, node_name_to_qconfig)
|
| 366 |
+
activation_post_process = modules[node.target]
|
| 367 |
+
# skip replacing observers to quant/dequant nodes if the qconfigs of all
|
| 368 |
+
# consumers and producers of this observer are None
|
| 369 |
+
skip_replacement = all(_has_none_qconfig(n, node_name_to_qconfig) for n in
|
| 370 |
+
list(node.args) + list(node.users.keys()))
|
| 371 |
+
if skip_replacement or not _is_conversion_supported(activation_post_process):
|
| 372 |
+
# didn't find corresponding quantize op and info for the activation_post_process
|
| 373 |
+
# so we just remove the observer
|
| 374 |
+
with graph.inserting_before(node):
|
| 375 |
+
node.replace_all_uses_with(node.args[0])
|
| 376 |
+
graph.erase_node(node)
|
| 377 |
+
return
|
| 378 |
+
|
| 379 |
+
# otherwise, we can convert the activation_post_process module call to quantize/dequantize node
|
| 380 |
+
dtype = activation_post_process.dtype # type: ignore[attr-defined]
|
| 381 |
+
|
| 382 |
+
is_dynamic = False
|
| 383 |
+
if hasattr(activation_post_process, "is_dynamic"):
|
| 384 |
+
is_dynamic = activation_post_process.is_dynamic # type: ignore[attr-defined, assignment]
|
| 385 |
+
|
| 386 |
+
if dtype in [torch.quint8, torch.qint8, torch.qint32, torch.float8_e5m2, torch.float8_e4m3fn] and \
|
| 387 |
+
(not is_dynamic):
|
| 388 |
+
# TODO: probably should cleanup this condition check, it's hard
|
| 389 |
+
# to reason about this if and the following elif
|
| 390 |
+
|
| 391 |
+
# uint8/int8/int32 static quantization branch
|
| 392 |
+
|
| 393 |
+
# 1. extract the information from activation_post_process module for generating
|
| 394 |
+
# the quantize and dequantize operator
|
| 395 |
+
node_type = "call_function"
|
| 396 |
+
quantize_op : Optional[Callable] = None
|
| 397 |
+
scale, zero_point = activation_post_process.calculate_qparams() # type: ignore[attr-defined, operator]
|
| 398 |
+
if is_per_channel(activation_post_process.qscheme): # type: ignore[attr-defined]
|
| 399 |
+
ch_axis = int(activation_post_process.ch_axis) # type: ignore[attr-defined, arg-type]
|
| 400 |
+
qparams = {"_scale_": scale, "_zero_point_": zero_point, "_axis_": ch_axis, "_dtype_": dtype}
|
| 401 |
+
quantize_op = torch.quantize_per_channel
|
| 402 |
+
else:
|
| 403 |
+
scale = float(scale)
|
| 404 |
+
zero_point = int(zero_point)
|
| 405 |
+
qparams = {"_scale_": scale, "_zero_point_": zero_point, "_dtype_": dtype}
|
| 406 |
+
quantize_op = torch.quantize_per_tensor
|
| 407 |
+
|
| 408 |
+
# 2. replace activation_post_process node with quantize and dequantize
|
| 409 |
+
with graph.inserting_before(node):
|
| 410 |
+
input_node = node.args[0]
|
| 411 |
+
quantize_op_inputs = [input_node]
|
| 412 |
+
for key, value_or_node in qparams.items():
|
| 413 |
+
# TODO: we can add the information of whether a value needs to
|
| 414 |
+
# be registered as an attribute in qparams dict itself
|
| 415 |
+
if key in ['_scale_', '_zero_point_']:
|
| 416 |
+
# For scale and zero_point values we register them as buffers in the root module.
|
| 417 |
+
# TODO: maybe need more complex attr name here
|
| 418 |
+
qparam_node = create_getattr_from_value(
|
| 419 |
+
model, graph, module_path + prefix + key, value_or_node)
|
| 420 |
+
quantize_op_inputs.append(qparam_node)
|
| 421 |
+
else:
|
| 422 |
+
# for qparams that are not scale/zero_point (like axis, dtype) we store them as literals in the graph.
|
| 423 |
+
quantize_op_inputs.append(value_or_node)
|
| 424 |
+
|
| 425 |
+
quantized_node = graph.create_node(node_type, quantize_op, tuple(quantize_op_inputs), {})
|
| 426 |
+
dequantized_node = graph.call_method("dequantize", args=(quantized_node,))
|
| 427 |
+
node.replace_all_uses_with(dequantized_node)
|
| 428 |
+
graph.erase_node(node)
|
| 429 |
+
elif is_dynamic:
|
| 430 |
+
|
| 431 |
+
# uint8/int8/fp16 dynamic quantization branch
|
| 432 |
+
|
| 433 |
+
node_type = "call_function"
|
| 434 |
+
quantize_op = torch.quantize_per_tensor_dynamic
|
| 435 |
+
# TODO: get reduce range from observer
|
| 436 |
+
# reduce_range = activation_post_process.reduce_range
|
| 437 |
+
reduce_range = torch.backends.quantized.engine in ("fbgemm", "x86")
|
| 438 |
+
qparams = {"_dtype_": dtype, "_reduce_range_": reduce_range}
|
| 439 |
+
|
| 440 |
+
with graph.inserting_before(node):
|
| 441 |
+
input_node = node.args[0]
|
| 442 |
+
quantize_op_inputs = [input_node]
|
| 443 |
+
for key, value in qparams.items():
|
| 444 |
+
quantize_op_inputs.append(value)
|
| 445 |
+
|
| 446 |
+
quantized_node = graph.create_node(node_type, quantize_op, tuple(quantize_op_inputs), {})
|
| 447 |
+
dequantized_node = graph.call_method("dequantize", args=(quantized_node,))
|
| 448 |
+
node.replace_all_uses_with(dequantized_node)
|
| 449 |
+
graph.erase_node(node)
|
| 450 |
+
elif dtype == torch.float16:
|
| 451 |
+
node_type = "call_method"
|
| 452 |
+
quantize_op = "to" # type: ignore[assignment]
|
| 453 |
+
qparams = {"_dtype_": dtype}
|
| 454 |
+
with graph.inserting_before(node):
|
| 455 |
+
input_node = node.args[0]
|
| 456 |
+
quantize_op_inputs = [input_node]
|
| 457 |
+
for key, value in qparams.items():
|
| 458 |
+
# TODO: we can add the information of whether a value needs to
|
| 459 |
+
# be registered as an attribute in qparams dict itself
|
| 460 |
+
quantize_op_inputs.append(value)
|
| 461 |
+
|
| 462 |
+
quantized_node = graph.create_node(node_type, quantize_op, tuple(quantize_op_inputs), {})
|
| 463 |
+
dequantized_node = graph.call_method("dequantize", args=(quantized_node,))
|
| 464 |
+
node.replace_all_uses_with(dequantized_node)
|
| 465 |
+
graph.erase_node(node)
|
| 466 |
+
|
| 467 |
+
# should not reach since we have checks in the beginning to make sure the
|
| 468 |
+
# activation_post_process is supported
|
| 469 |
+
|
| 470 |
+
# this is a temporary hack for custom module, we may want to implement
|
| 471 |
+
# this properly after the custom module class design is finalized
|
| 472 |
+
# TODO: DeQuantStubs are currently inserted only after custom module LSTM, while observers are inserted
|
| 473 |
+
# after all other custom modules. In the future, we should simply insert QuantStubs before and DeQuantStubs
|
| 474 |
+
# after custom modules in general, and replace these with "quantize" and "dequantize" nodes respectively.
|
| 475 |
+
def _replace_observer_or_dequant_stub_with_dequantize_node(node: Node, graph: Graph) -> None:
|
| 476 |
+
call_custom_module_node = node.args[0]
|
| 477 |
+
assert isinstance(call_custom_module_node, Node), \
|
| 478 |
+
f"Expecting the for call custom module node to be a Node, but got {call_custom_module_node}"
|
| 479 |
+
node.replace_all_uses_with(call_custom_module_node)
|
| 480 |
+
graph.erase_node(node)
|
| 481 |
+
_insert_dequantize_node(call_custom_module_node, graph)
|
| 482 |
+
|
| 483 |
+
def _is_conversion_supported(activation_post_process: torch.nn.Module) -> bool:
|
| 484 |
+
dtype = activation_post_process.dtype # type: ignore[attr-defined]
|
| 485 |
+
|
| 486 |
+
is_dynamic = False
|
| 487 |
+
if hasattr(activation_post_process, "is_dynamic"):
|
| 488 |
+
is_dynamic = activation_post_process.is_dynamic # type: ignore[attr-defined, assignment]
|
| 489 |
+
|
| 490 |
+
return (
|
| 491 |
+
(dtype in SUPPORTED_QDTYPES and (not is_dynamic)) or # type: ignore[return-value]
|
| 492 |
+
is_dynamic or
|
| 493 |
+
dtype == torch.float16
|
| 494 |
+
)
|
| 495 |
+
|
| 496 |
+
def _has_none_qconfig(node: Argument, node_name_to_qconfig: Dict[str, QConfigAny]) -> bool:
|
| 497 |
+
""" Check if a node has a qconfig of None, i.e. user requested to not quantize
|
| 498 |
+
the node
|
| 499 |
+
"""
|
| 500 |
+
return isinstance(node, Node) and node.name in node_name_to_qconfig and node_name_to_qconfig[node.name] is None
|
| 501 |
+
|
| 502 |
+
def _run_weight_observers(observed: GraphModule, backend_config: BackendConfig) -> None:
|
| 503 |
+
""" Extract the subgraph that produces the weight for dynamic quant
|
| 504 |
+
or weight only quant node and run the subgraph to observe the weight.
|
| 505 |
+
Note that the observers of dynamic quant or weight only quant ops are
|
| 506 |
+
run during the convert step.
|
| 507 |
+
"""
|
| 508 |
+
for node in observed.graph.nodes:
|
| 509 |
+
if node.op != "call_function":
|
| 510 |
+
continue
|
| 511 |
+
for node_arg in node.args:
|
| 512 |
+
# node_arg is weight
|
| 513 |
+
if node_arg and node_arg_is_weight(node, node_arg):
|
| 514 |
+
weight_observer_nodes = collect_producer_nodes(node_arg)
|
| 515 |
+
if weight_observer_nodes is None:
|
| 516 |
+
continue
|
| 517 |
+
weight_observer_module = \
|
| 518 |
+
graph_module_from_producer_nodes(
|
| 519 |
+
observed, weight_observer_nodes)
|
| 520 |
+
# run the weight observer
|
| 521 |
+
weight_observer_module()
|
| 522 |
+
|
| 523 |
+
def _maybe_recursive_remove_dequantize(arg: Any, node: Node, graph: Graph) -> None:
|
| 524 |
+
""" If the arg is a dequantize Node, or a list/tuple/dict of dequantize Node,
|
| 525 |
+
we'll recursively remove the dequantize Node
|
| 526 |
+
"""
|
| 527 |
+
if isinstance(arg, Node) and \
|
| 528 |
+
arg.op == "call_method" and \
|
| 529 |
+
arg.target == "dequantize":
|
| 530 |
+
quantize_node = arg.args[0]
|
| 531 |
+
# we only replace the specific use since dequantize could be used by other nodes
|
| 532 |
+
# as well
|
| 533 |
+
node.replace_input_with(arg, quantize_node)
|
| 534 |
+
elif isinstance(arg, (list, tuple)):
|
| 535 |
+
for arg_element in arg:
|
| 536 |
+
_maybe_recursive_remove_dequantize(arg_element, node, graph)
|
| 537 |
+
elif isinstance(arg, dict):
|
| 538 |
+
for arg_element in arg.values():
|
| 539 |
+
_maybe_recursive_remove_dequantize(arg_element, node, graph)
|
| 540 |
+
else:
|
| 541 |
+
warnings.warn(f"Unsupported node type in recursive remove dequantize: {type(arg)}")
|
| 542 |
+
|
| 543 |
+
def _get_module_path_and_prefix(
|
| 544 |
+
obs_node: Node,
|
| 545 |
+
node_name_to_scope: Dict[str, Tuple[str, type]],
|
| 546 |
+
node_name_to_qconfig: Dict[str, QConfigAny]) -> Tuple[str, str]:
|
| 547 |
+
""" Given and observer node, get the `Scope` or the fully qualified name for
|
| 548 |
+
the submodule containing the observed node, also return a prefix of "_input"
|
| 549 |
+
when the observed node is an input of a F.linear op, and not the output of another
|
| 550 |
+
quantized op.
|
| 551 |
+
TODO: this logic is hacky, we should think about how to remove it or make it more
|
| 552 |
+
general
|
| 553 |
+
"""
|
| 554 |
+
observed_node = obs_node.args[0]
|
| 555 |
+
# an observer can be inserted for both input of the next operator or output of the previous
|
| 556 |
+
# operator (they can be the same)
|
| 557 |
+
# this flag identifies if the observer is inserted only because the observed node is
|
| 558 |
+
# the input of the next operator
|
| 559 |
+
assert isinstance(observed_node, Node), \
|
| 560 |
+
f"Expecting observed node to be a Node, but got {observed_node}"
|
| 561 |
+
is_input_observer_only = node_name_to_qconfig[observed_node.name] is None \
|
| 562 |
+
if observed_node.name in node_name_to_qconfig else None
|
| 563 |
+
if is_input_observer_only:
|
| 564 |
+
# if the quantize function is at the input of op, then we find the first user of the observer_node
|
| 565 |
+
# to get the path. If a linear call_function is in the user list, we return the first instance
|
| 566 |
+
# of linear node to get the FQN.
|
| 567 |
+
users = list(obs_node.users)
|
| 568 |
+
first_linear_use_or_first_use = users[0] if users else None
|
| 569 |
+
linear_node = None
|
| 570 |
+
for n in users:
|
| 571 |
+
if n.op == "call_function" and n.target == torch.nn.functional.linear:
|
| 572 |
+
linear_node = n
|
| 573 |
+
break
|
| 574 |
+
if linear_node:
|
| 575 |
+
first_linear_use_or_first_use = linear_node
|
| 576 |
+
prefix = "_input"
|
| 577 |
+
else:
|
| 578 |
+
# if the quantize function is at the output of the op, we use the observer input node to get the path
|
| 579 |
+
first_linear_use_or_first_use = observed_node
|
| 580 |
+
prefix = ""
|
| 581 |
+
|
| 582 |
+
if first_linear_use_or_first_use and first_linear_use_or_first_use.name in node_name_to_scope:
|
| 583 |
+
module_path, _ = node_name_to_scope[first_linear_use_or_first_use.name]
|
| 584 |
+
else:
|
| 585 |
+
# TODO: it's not used, so actually we can skip quantization
|
| 586 |
+
# but this requires changing return type of quantize_node
|
| 587 |
+
# we can fix it later if needed
|
| 588 |
+
module_path = ""
|
| 589 |
+
return module_path, prefix
|
| 590 |
+
|
| 591 |
+
def _insert_dequantize_node(
|
| 592 |
+
node: Node,
|
| 593 |
+
graph: Graph) -> None:
|
| 594 |
+
""" Inserts dequantize node for `node` in `graph`
|
| 595 |
+
"""
|
| 596 |
+
with graph.inserting_after(node):
|
| 597 |
+
dequantize_node = graph.call_method("dequantize", (node,))
|
| 598 |
+
for user_node in dict(node.users):
|
| 599 |
+
if user_node is not dequantize_node:
|
| 600 |
+
user_node.replace_input_with(node, dequantize_node)
|
| 601 |
+
|
| 602 |
+
def _maybe_get_observer_for_node(
|
| 603 |
+
node: Node,
|
| 604 |
+
modules: Dict[str, torch.nn.Module]
|
| 605 |
+
) -> Optional[torch.nn.Module]:
|
| 606 |
+
"""
|
| 607 |
+
If the node is observed, return the observer
|
| 608 |
+
instance. Otherwise, return None.
|
| 609 |
+
"""
|
| 610 |
+
for maybe_obs_node in node.users.keys():
|
| 611 |
+
if maybe_obs_node.op == 'call_module':
|
| 612 |
+
maybe_obs = modules[str(maybe_obs_node.target)]
|
| 613 |
+
if _is_activation_post_process(maybe_obs):
|
| 614 |
+
return maybe_obs
|
| 615 |
+
return None
|
| 616 |
+
|
| 617 |
+
def convert_standalone_module(
|
| 618 |
+
node: Node,
|
| 619 |
+
modules: Dict[str, torch.nn.Module],
|
| 620 |
+
model: torch.fx.GraphModule,
|
| 621 |
+
is_reference: bool,
|
| 622 |
+
backend_config: Optional[BackendConfig]) -> None:
|
| 623 |
+
""" Converts a observed standalone module to a quantized standalone module by calling
|
| 624 |
+
the fx convert api, currently using the same `is_reference` flag as parent, but we may
|
| 625 |
+
changing this behavior in the future (e.g. separating quantization and lowering for
|
| 626 |
+
standalone module as well)
|
| 627 |
+
|
| 628 |
+
Args:
|
| 629 |
+
- node: The call_module node of the observed standalone module
|
| 630 |
+
- modules: named_module of original model
|
| 631 |
+
- model: original model
|
| 632 |
+
- is_reference: a flag from parent provided by user to decide if we want to
|
| 633 |
+
produce a reference model or a fbgemm/qnnpack model
|
| 634 |
+
- backend_config: backend configuration of the target backend of quantization
|
| 635 |
+
"""
|
| 636 |
+
# TODO: remove is_reference flag
|
| 637 |
+
if is_reference:
|
| 638 |
+
convert_fn = torch.ao.quantization.quantize_fx.convert_to_reference_fx
|
| 639 |
+
else:
|
| 640 |
+
convert_fn = torch.ao.quantization.quantize_fx.convert_fx # type: ignore[attr-defined]
|
| 641 |
+
# We know that observed standalone module is a GraphModule since
|
| 642 |
+
# it's produced by us
|
| 643 |
+
observed_standalone_module : GraphModule = modules[str(node.target)] # type: ignore[assignment]
|
| 644 |
+
sm_input_quantized_idxs = \
|
| 645 |
+
observed_standalone_module \
|
| 646 |
+
.meta["_observed_graph_module_attrs"].standalone_module_input_quantized_idxs
|
| 647 |
+
# remove the dequantize nodes for inputs
|
| 648 |
+
args = list(node.args)
|
| 649 |
+
for idx in range(len(args)):
|
| 650 |
+
if idx in sm_input_quantized_idxs:
|
| 651 |
+
arg = args[idx]
|
| 652 |
+
if arg.op == "call_method" and arg.target == "dequantize": # type: ignore[union-attr]
|
| 653 |
+
quantize_node = arg.args[0] # type: ignore[union-attr]
|
| 654 |
+
node.replace_input_with(arg, quantize_node)
|
| 655 |
+
if len(arg.users) == 0: # type: ignore[union-attr]
|
| 656 |
+
model.graph.erase_node(arg)
|
| 657 |
+
# add dequantize node for output
|
| 658 |
+
sm_output_quantized_idxs = \
|
| 659 |
+
observed_standalone_module \
|
| 660 |
+
.meta["_observed_graph_module_attrs"].standalone_module_output_quantized_idxs
|
| 661 |
+
if len(sm_output_quantized_idxs) > 0:
|
| 662 |
+
assert sm_output_quantized_idxs[0] == 0, "Currently only quantized"
|
| 663 |
+
"output idxs = [0] is supported"
|
| 664 |
+
|
| 665 |
+
# if it's non-empty, then it means the output is kept in quantized form
|
| 666 |
+
# we'll just add a dequantize node after this node
|
| 667 |
+
_insert_dequantize_node(node, model.graph)
|
| 668 |
+
|
| 669 |
+
# TODO: allow convert_custom_config to override backend_config
|
| 670 |
+
# for standalone module
|
| 671 |
+
quantized_standalone_module = convert_fn(
|
| 672 |
+
observed_standalone_module,
|
| 673 |
+
backend_config=backend_config)
|
| 674 |
+
parent_name, name = _parent_name(node.target)
|
| 675 |
+
# update the modules dict
|
| 676 |
+
setattr(modules[parent_name], name, quantized_standalone_module)
|
| 677 |
+
modules[str(node.target)] = quantized_standalone_module
|
| 678 |
+
|
| 679 |
+
def convert_weighted_module(
|
| 680 |
+
node: Node,
|
| 681 |
+
modules: Dict[str, torch.nn.Module],
|
| 682 |
+
observed_node_names: Set[str],
|
| 683 |
+
node_name_to_qconfig: Dict[str, QConfigAny],
|
| 684 |
+
backend_config: BackendConfig,
|
| 685 |
+
is_decomposed: bool = False,
|
| 686 |
+
is_reference: bool = False,
|
| 687 |
+
) -> None:
|
| 688 |
+
""" Convert a weighted module to reference quantized module in the model
|
| 689 |
+
If the QConfig of a QAT module is not set, the module will still be converted to
|
| 690 |
+
a float module.
|
| 691 |
+
|
| 692 |
+
Args:
|
| 693 |
+
- node: The call_module node of the observed standalone module
|
| 694 |
+
- modules: named_module of original model
|
| 695 |
+
- observed_node_names: names for the set of observed fx node, we can skip
|
| 696 |
+
this conversion if the node is not observed
|
| 697 |
+
"""
|
| 698 |
+
original_module = modules[str(node.target)]
|
| 699 |
+
qconfig: QConfigAny = original_module.qconfig # type: ignore[assignment]
|
| 700 |
+
weight_post_process = None
|
| 701 |
+
qat_module_classes = get_qat_module_classes(backend_config)
|
| 702 |
+
|
| 703 |
+
if isinstance(
|
| 704 |
+
original_module,
|
| 705 |
+
qat_module_classes):
|
| 706 |
+
# Converting qat module to a float module, we need to attach
|
| 707 |
+
# weight fake_quant to the module, weight fake_quant is assumed to be run during
|
| 708 |
+
# QAT so we don't need to run it again here
|
| 709 |
+
weight_post_process = original_module.weight_fake_quant
|
| 710 |
+
original_module = original_module.to_float() # type: ignore[operator]
|
| 711 |
+
# change qat module to float module
|
| 712 |
+
parent_name, name = _parent_name(node.target)
|
| 713 |
+
setattr(modules[parent_name], name, original_module)
|
| 714 |
+
|
| 715 |
+
is_observed = node.name in observed_node_names
|
| 716 |
+
# If a qconfig is not defined for this node, then skip converting to a reference module
|
| 717 |
+
if qconfig is None or _has_none_qconfig(node, node_name_to_qconfig) or not is_observed:
|
| 718 |
+
return
|
| 719 |
+
|
| 720 |
+
# skip converting to reference quantized module if the qconfig is not supported
|
| 721 |
+
pattern_to_dtype_configs = get_pattern_to_dtype_configs(backend_config)
|
| 722 |
+
dtype_configs = pattern_to_dtype_configs.get(type(original_module), [])
|
| 723 |
+
if not _is_qconfig_supported_by_dtype_configs(qconfig, dtype_configs):
|
| 724 |
+
return
|
| 725 |
+
|
| 726 |
+
# TODO: rename weight_is_statically_quantized to weight_is_int8_quantized
|
| 727 |
+
is_weight_quantized = weight_is_quantized(qconfig)
|
| 728 |
+
|
| 729 |
+
# the condition for swapping the module to reference quantized module is:
|
| 730 |
+
# weights need to be quantized
|
| 731 |
+
if not is_weight_quantized:
|
| 732 |
+
return
|
| 733 |
+
|
| 734 |
+
fused_module = None
|
| 735 |
+
float_module = original_module
|
| 736 |
+
# extract the individual float_module and fused module
|
| 737 |
+
if isinstance(original_module, torch.ao.nn.intrinsic._FusedModule):
|
| 738 |
+
fused_module = float_module
|
| 739 |
+
float_module = fused_module[0] # type: ignore[index]
|
| 740 |
+
|
| 741 |
+
# TODO: move this to the reference quantized module
|
| 742 |
+
# weight_qparams or weight_qparams dict
|
| 743 |
+
wq_or_wq_dict = {"is_decomposed": is_decomposed}
|
| 744 |
+
if isinstance(float_module, torch.nn.RNNCellBase):
|
| 745 |
+
weight_post_process_ih = qconfig.weight() # type: ignore[union-attr, operator]
|
| 746 |
+
weight_post_process_hh = qconfig.weight() # type: ignore[union-attr, operator]
|
| 747 |
+
weight_post_process_ih(float_module.weight_ih)
|
| 748 |
+
weight_post_process_hh(float_module.weight_hh)
|
| 749 |
+
weight_qparams_ih = get_qparam_dict(weight_post_process_ih)
|
| 750 |
+
weight_qparams_hh = get_qparam_dict(weight_post_process_hh)
|
| 751 |
+
wq_or_wq_dict.update({
|
| 752 |
+
"weight_ih": weight_qparams_ih,
|
| 753 |
+
"weight_hh": weight_qparams_hh,
|
| 754 |
+
})
|
| 755 |
+
elif isinstance(float_module, (torch.nn.LSTM, torch.nn.GRU)):
|
| 756 |
+
# format for wq_or_wq_dict (flattened attributes):
|
| 757 |
+
# {"weight_ih_l0_scale": ..., "weight_ih_l0_qscheme": ..., ...}
|
| 758 |
+
for wn in float_module._flat_weights_names:
|
| 759 |
+
if hasattr(float_module, wn) and wn.startswith("weight"):
|
| 760 |
+
weight = getattr(float_module, wn)
|
| 761 |
+
weight_post_process = qconfig.weight() # type: ignore[union-attr, operator]
|
| 762 |
+
if weight_post_process.dtype == torch.qint8: # type: ignore[union-attr]
|
| 763 |
+
weight_post_process(weight) # type: ignore[operator, misc]
|
| 764 |
+
wq_or_wq_dict[wn] = get_qparam_dict(weight_post_process)
|
| 765 |
+
else:
|
| 766 |
+
# weight_post_process is None means the original module is not a QAT module
|
| 767 |
+
# we need to get weight_post_process from qconfig in this case
|
| 768 |
+
is_ptq = weight_post_process is None
|
| 769 |
+
if is_ptq:
|
| 770 |
+
weight_post_process = qconfig.weight() # type: ignore[union-attr, operator]
|
| 771 |
+
device = assert_and_get_unique_device(float_module)
|
| 772 |
+
if device:
|
| 773 |
+
weight_post_process.to(device)
|
| 774 |
+
|
| 775 |
+
# Call weight observer/fake_quant at least once to ensure the scales and zero points
|
| 776 |
+
# have the right shapes. Note: there are two cases where we don't have to do this:
|
| 777 |
+
#
|
| 778 |
+
# (1) QAT: The model's forward method already calls the weight observer/fake_quant,
|
| 779 |
+
# and this typically happens during training, so we don't need to do it here.
|
| 780 |
+
#
|
| 781 |
+
# (2) Non-reference (lowered) case: The quantized module's from_float method already
|
| 782 |
+
# calls the weight observer/fake_quant, so we don't have to do it here.
|
| 783 |
+
#
|
| 784 |
+
# Currently we ignore both cases and call the weight observer/fake_quant here
|
| 785 |
+
# regardless, which is technically incorrect. For (1), this is mainly to preserve BC
|
| 786 |
+
# in test code, which may not always train before convert. In the future, we should
|
| 787 |
+
# break BC for these two cases. See https://github.com/pytorch/pytorch/issues/73941.
|
| 788 |
+
#
|
| 789 |
+
# For PT2, however, we don't need to preserve BC here, so we can skip this hack
|
| 790 |
+
# for QAT. We identify this case as (is_decomposed + is_reference + is_qat).
|
| 791 |
+
# Note that we still need it for PTQ in the PT2 flow since the model's forward
|
| 792 |
+
# method doesn't call the weight observer.
|
| 793 |
+
is_qat = not is_ptq
|
| 794 |
+
if not (is_decomposed and is_reference and is_qat):
|
| 795 |
+
weight_post_process(float_module.weight) # type: ignore[operator]
|
| 796 |
+
|
| 797 |
+
wq_or_wq_dict.update(get_qparam_dict(weight_post_process))
|
| 798 |
+
|
| 799 |
+
# We use the same reference module for all modes of quantization: static, dynamic, weight_only
|
| 800 |
+
# root_module_to_quantized_reference_module: module mapping from root (floating point) module class
|
| 801 |
+
# to quantized reference module class, e.g. nn.Conv2d to nn.quantized._reference.Conv2d
|
| 802 |
+
root_module_to_quantized_reference_module = get_root_module_to_quantized_reference_module(backend_config)
|
| 803 |
+
ref_qmodule_cls = root_module_to_quantized_reference_module.get(type_before_parametrizations(float_module), None)
|
| 804 |
+
assert (
|
| 805 |
+
ref_qmodule_cls is not None
|
| 806 |
+
), f"No reference quantized module class configured for {type_before_parametrizations(float_module)}"
|
| 807 |
+
ref_qmodule = ref_qmodule_cls.from_float(float_module, wq_or_wq_dict) # type: ignore[attr-defined]
|
| 808 |
+
if fused_module is not None:
|
| 809 |
+
fused_module[0] = ref_qmodule # type: ignore[operator]
|
| 810 |
+
else:
|
| 811 |
+
parent_name, name = _parent_name(node.target)
|
| 812 |
+
setattr(modules[parent_name], name, ref_qmodule)
|
| 813 |
+
|
| 814 |
+
def _remove_previous_dequantize_in_custom_module(node: Node, prev_node: Node, graph: Graph) -> None:
|
| 815 |
+
"""
|
| 816 |
+
Given a custom module `node`, if the previous node is a dequantize, reroute the custom as follows:
|
| 817 |
+
|
| 818 |
+
Before: quantize - dequantize - custom_module
|
| 819 |
+
After: quantize - custom_module
|
| 820 |
+
\\ - dequantize
|
| 821 |
+
"""
|
| 822 |
+
# expecting the input node for a custom module node to be a Node
|
| 823 |
+
assert isinstance(prev_node, Node), \
|
| 824 |
+
f"Expecting the argument for custom module node to be a Node, but got {prev_node}"
|
| 825 |
+
if prev_node.op == "call_method" and prev_node.target == "dequantize":
|
| 826 |
+
node.replace_input_with(prev_node, prev_node.args[0])
|
| 827 |
+
# Remove the dequantize node if it doesn't have other users
|
| 828 |
+
if len(prev_node.users) == 0:
|
| 829 |
+
graph.erase_node(prev_node)
|
| 830 |
+
|
| 831 |
+
def convert_custom_module(
|
| 832 |
+
node: Node,
|
| 833 |
+
graph: Graph,
|
| 834 |
+
modules: Dict[str, torch.nn.Module],
|
| 835 |
+
custom_module_class_mapping: Dict[QuantType, Dict[Type, Type]],
|
| 836 |
+
statically_quantized_custom_module_nodes: Set[Node]) -> None:
|
| 837 |
+
""" Converts an observed custom module to a quantized custom module based on
|
| 838 |
+
`custom_module_class_mapping`
|
| 839 |
+
For static quantization, we'll also remove the previous `dequantize` node and
|
| 840 |
+
attach the observer node for output to the module, the observer for the node
|
| 841 |
+
will be converted to a dequantize node instead of quantize-dequantize pairs
|
| 842 |
+
later in the graph. In the end we would have a quantized custom module that
|
| 843 |
+
has the same interface as a default quantized module in nn.quantized namespace,
|
| 844 |
+
i.e. quantized input and quantized output.
|
| 845 |
+
|
| 846 |
+
Args:
|
| 847 |
+
- node: The call_module node of the observed standalone module
|
| 848 |
+
- graph: The graph containing the node
|
| 849 |
+
- modules: named_module of original model
|
| 850 |
+
- custom_module_class_mapping: mapping from observed custom module class to
|
| 851 |
+
quantized custom module class, used to swap custom modules
|
| 852 |
+
- statically_quantized_custom_module_nodes: we'll add the custom module node
|
| 853 |
+
if we find it is statically quantized, this will be used later when converting
|
| 854 |
+
observers to quant/dequant node pairs, if the observed node is a statically
|
| 855 |
+
quantized custom module nodes, we'll convert the observer to a dequantize node,
|
| 856 |
+
this is to keep the interface the same as the default quantized module.
|
| 857 |
+
TODO: maybe we want to redesign this part to align with reference model design
|
| 858 |
+
as well, but there has been some discussions around the interface, so we can do
|
| 859 |
+
it later.
|
| 860 |
+
"""
|
| 861 |
+
observed_custom_module = modules[str(node.target)]
|
| 862 |
+
maybe_obs = _maybe_get_observer_for_node(node, modules)
|
| 863 |
+
qconfig = observed_custom_module.qconfig
|
| 864 |
+
if activation_is_statically_quantized(qconfig):
|
| 865 |
+
statically_quantized_custom_module_nodes.add(node)
|
| 866 |
+
if _is_custom_module_lstm(node, modules):
|
| 867 |
+
# The inputs are tuples in the form (input, (hidden0, hidden1))
|
| 868 |
+
# Ensure all three input nodes are quantized
|
| 869 |
+
assert (
|
| 870 |
+
len(node.args) == 2 and
|
| 871 |
+
isinstance(node.args[1], tuple) and
|
| 872 |
+
len(node.args[1]) == 2
|
| 873 |
+
)
|
| 874 |
+
(inputs, (hidden0, hidden1)) = node.args # type: ignore[misc]
|
| 875 |
+
assert isinstance(inputs, Node)
|
| 876 |
+
assert isinstance(hidden0, Node)
|
| 877 |
+
assert isinstance(hidden1, Node)
|
| 878 |
+
_remove_previous_dequantize_in_custom_module(node, inputs, graph)
|
| 879 |
+
_remove_previous_dequantize_in_custom_module(node, hidden0, graph)
|
| 880 |
+
_remove_previous_dequantize_in_custom_module(node, hidden1, graph)
|
| 881 |
+
elif _is_custom_module_mha(node, modules):
|
| 882 |
+
# Inputs are in the form (query, key, value)
|
| 883 |
+
# TODO: This is the first step in enabling the full fx custom module
|
| 884 |
+
# quantization path for MultiheadAttention, and only covers the inputs
|
| 885 |
+
# to the module.
|
| 886 |
+
# Additional handling is yet to be implemented for the outputs, similar
|
| 887 |
+
# to LSTM custom module
|
| 888 |
+
assert len(node.args) == 3
|
| 889 |
+
query, key, value = node.args
|
| 890 |
+
assert isinstance(query, Node)
|
| 891 |
+
assert isinstance(key, Node)
|
| 892 |
+
assert isinstance(value, Node)
|
| 893 |
+
_remove_previous_dequantize_in_custom_module(node, query, graph)
|
| 894 |
+
_remove_previous_dequantize_in_custom_module(node, key, graph)
|
| 895 |
+
_remove_previous_dequantize_in_custom_module(node, value, graph)
|
| 896 |
+
else:
|
| 897 |
+
# remove the previous dequant node to ensure the inputs are quantized
|
| 898 |
+
arg = node.args[0]
|
| 899 |
+
assert isinstance(arg, Node)
|
| 900 |
+
_remove_previous_dequantize_in_custom_module(node, arg, graph)
|
| 901 |
+
# absorb the following observer into the module conversion
|
| 902 |
+
activation_post_process = _maybe_get_observer_for_node(node, modules)
|
| 903 |
+
assert activation_post_process is not None
|
| 904 |
+
observed_custom_module.activation_post_process = activation_post_process
|
| 905 |
+
|
| 906 |
+
# swap the observed custom module to quantized custom module
|
| 907 |
+
quantized_custom_module_class = get_swapped_custom_module_class(
|
| 908 |
+
observed_custom_module, custom_module_class_mapping, qconfig)
|
| 909 |
+
quantized_custom_module = \
|
| 910 |
+
quantized_custom_module_class.from_observed(observed_custom_module)
|
| 911 |
+
parent_name, name = _parent_name(node.target)
|
| 912 |
+
setattr(modules[parent_name], name, quantized_custom_module)
|
| 913 |
+
|
| 914 |
+
def convert(
|
| 915 |
+
model: GraphModule, is_reference: bool = False,
|
| 916 |
+
convert_custom_config: Union[ConvertCustomConfig, Dict[str, Any], None] = None,
|
| 917 |
+
is_standalone_module: bool = False,
|
| 918 |
+
_remove_qconfig_flag: bool = True,
|
| 919 |
+
qconfig_mapping: Union[QConfigMapping, Dict[str, Any], None] = None,
|
| 920 |
+
backend_config: Union[BackendConfig, Dict[str, Any], None] = None,
|
| 921 |
+
is_decomposed: bool = False) -> GraphModule:
|
| 922 |
+
"""
|
| 923 |
+
We will convert an observed model (a module with observer calls) to a reference
|
| 924 |
+
quantized model, the rule is simple:
|
| 925 |
+
1. for each observer module call in the graph, we'll convert it to calls to
|
| 926 |
+
quantize and dequantize functions based on the observer instance
|
| 927 |
+
2. for weighted operations like linear/conv, we need to convert them to reference
|
| 928 |
+
quantized module, this requires us to know whether the dtype configured for the
|
| 929 |
+
weight is supported in the backend, this is done in prepare step and the result
|
| 930 |
+
is stored in observed_node_names, we can decide whether we need to swap the
|
| 931 |
+
module based on this set
|
| 932 |
+
|
| 933 |
+
Args:
|
| 934 |
+
* `is_standalone_module`: when this flag is True, it means we are quantizing
|
| 935 |
+
a submodule that is not inlined in parent module, and will be quantized
|
| 936 |
+
separately as one unit.
|
| 937 |
+
|
| 938 |
+
* `is_decomposed`: a boolean flag to indicate whether we want to use the
|
| 939 |
+
quantize operator for decomposed quantized tensor
|
| 940 |
+
(torch.ops.quantized_decomposed.quantize_per_tensor) or default/standalone
|
| 941 |
+
quantized tensor (torch.quantize_per_tensor)
|
| 942 |
+
|
| 943 |
+
Returns:
|
| 944 |
+
a quantized standalone module, whether input/output is quantized is
|
| 945 |
+
specified by prepare_custom_config, with
|
| 946 |
+
input_quantized_idxs, output_quantized_idxs, please
|
| 947 |
+
see docs for :func:`~torch.ao.quantization.prepare_fx` for details
|
| 948 |
+
"""
|
| 949 |
+
if convert_custom_config is None:
|
| 950 |
+
convert_custom_config = ConvertCustomConfig()
|
| 951 |
+
|
| 952 |
+
if isinstance(convert_custom_config, dict):
|
| 953 |
+
warnings.warn(
|
| 954 |
+
"Passing a convert_custom_config_dict to convert is deprecated and will not be supported "
|
| 955 |
+
"in a future version. Please pass in a ConvertCustomConfig instead.",
|
| 956 |
+
FutureWarning,
|
| 957 |
+
stacklevel=2,
|
| 958 |
+
)
|
| 959 |
+
convert_custom_config = ConvertCustomConfig.from_dict(convert_custom_config)
|
| 960 |
+
|
| 961 |
+
if isinstance(qconfig_mapping, dict):
|
| 962 |
+
warnings.warn(
|
| 963 |
+
"Passing a QConfig dictionary to convert is deprecated and will not be supported "
|
| 964 |
+
"in a future version. Please pass in a QConfigMapping instead.",
|
| 965 |
+
FutureWarning,
|
| 966 |
+
stacklevel=2,
|
| 967 |
+
)
|
| 968 |
+
qconfig_mapping = QConfigMapping.from_dict(qconfig_mapping) if qconfig_mapping else None
|
| 969 |
+
qconfig_mapping = copy.deepcopy(qconfig_mapping)
|
| 970 |
+
assert qconfig_mapping is None or isinstance(qconfig_mapping, QConfigMapping)
|
| 971 |
+
|
| 972 |
+
if isinstance(backend_config, dict):
|
| 973 |
+
warnings.warn(
|
| 974 |
+
"Passing a backend_config_dict to prepare is deprecated and will not be supported "
|
| 975 |
+
"in a future version. Please pass in a BackendConfig instead.",
|
| 976 |
+
FutureWarning,
|
| 977 |
+
stacklevel=2,
|
| 978 |
+
)
|
| 979 |
+
backend_config = BackendConfig.from_dict(backend_config)
|
| 980 |
+
|
| 981 |
+
if backend_config is None:
|
| 982 |
+
backend_config = get_native_backend_config()
|
| 983 |
+
|
| 984 |
+
assert _is_observed_module(model), \
|
| 985 |
+
'incoming model must be produced by prepare_fx'
|
| 986 |
+
observed_graph_module_attrs = model.meta["_observed_graph_module_attrs"]
|
| 987 |
+
node_name_to_scope: Dict[str, Tuple[str, type]] = observed_graph_module_attrs.node_name_to_scope
|
| 988 |
+
prepare_custom_config: PrepareCustomConfig = observed_graph_module_attrs.prepare_custom_config
|
| 989 |
+
observed_node_names: Set[str] = observed_graph_module_attrs.observed_node_names
|
| 990 |
+
node_name_to_qconfig: Dict[str, QConfigAny] = observed_graph_module_attrs.node_name_to_qconfig # type: ignore[assignment]
|
| 991 |
+
|
| 992 |
+
# mapping from fully qualified module name to module instance
|
| 993 |
+
# for example,
|
| 994 |
+
# {
|
| 995 |
+
# '': Model(...),
|
| 996 |
+
# 'linear': Linear(...),
|
| 997 |
+
# 'linear.weight_fake_quant': PerChannelMinMaxObserver(...),
|
| 998 |
+
# }
|
| 999 |
+
# We use remove_duplicate=False here because torch.cat uses
|
| 1000 |
+
# the same activation_post_process module instance but different names
|
| 1001 |
+
modules = dict(model.named_modules(remove_duplicate=False))
|
| 1002 |
+
|
| 1003 |
+
# TODO refactor this code once we update the prepare logic to have additional information on
|
| 1004 |
+
# which graph nodes have been observed and share that with convert to decide which observers to ignore.
|
| 1005 |
+
if qconfig_mapping:
|
| 1006 |
+
prepare_qconfig_mapping: QConfigMapping = observed_graph_module_attrs.qconfig_mapping # type: ignore[assignment]
|
| 1007 |
+
modules_copy = copy.deepcopy(modules)
|
| 1008 |
+
|
| 1009 |
+
if observed_graph_module_attrs.is_qat:
|
| 1010 |
+
_update_qconfig_for_qat(qconfig_mapping, backend_config)
|
| 1011 |
+
_update_qconfig_for_fusion(model, qconfig_mapping)
|
| 1012 |
+
|
| 1013 |
+
_compare_prepare_convert_qconfig_mappings(prepare_qconfig_mapping, qconfig_mapping) # type: ignore[arg-type]
|
| 1014 |
+
convert_node_name_to_qconfig = _generate_node_name_to_qconfig(
|
| 1015 |
+
model, modules_copy, model.graph, qconfig_mapping, node_name_to_scope)
|
| 1016 |
+
# check the convert_node_name_to_qconfig generated and ensure that
|
| 1017 |
+
# all the values either match what was set in prepare node_name_to_qconfig
|
| 1018 |
+
# or are set to None in the convert_node_name_to_qconfig.
|
| 1019 |
+
for k, v in node_name_to_qconfig.items():
|
| 1020 |
+
assert k in convert_node_name_to_qconfig, f'Expected key {k} in convert node_name_to_qconfig'
|
| 1021 |
+
if convert_node_name_to_qconfig[k] is not None:
|
| 1022 |
+
assert qconfig_equals(v, convert_node_name_to_qconfig[k]), \
|
| 1023 |
+
f"Expected k {k} to have the same value in prepare and convert QConfigMappings, " \
|
| 1024 |
+
f"but {v} was updated to {convert_node_name_to_qconfig[k]}"
|
| 1025 |
+
node_name_to_qconfig = convert_node_name_to_qconfig
|
| 1026 |
+
|
| 1027 |
+
custom_module_classes = get_custom_module_class_keys(convert_custom_config.observed_to_quantized_mapping)
|
| 1028 |
+
custom_module_class_mapping = convert_custom_config.observed_to_quantized_mapping
|
| 1029 |
+
|
| 1030 |
+
if observed_graph_module_attrs.equalization_node_name_to_qconfig is not None:
|
| 1031 |
+
# If we want to do equalization then do the following:
|
| 1032 |
+
# Calculate the equalization scale, update the observers with the scaled
|
| 1033 |
+
# inputs, and scale the weight
|
| 1034 |
+
weight_eq_obs_dict = update_obs_for_equalization(model, modules)
|
| 1035 |
+
convert_eq_obs(model, modules, weight_eq_obs_dict)
|
| 1036 |
+
|
| 1037 |
+
# always run weight observers in the top level forward method
|
| 1038 |
+
# for dynamic quant ops or weight only quant ops
|
| 1039 |
+
_run_weight_observers(model, backend_config)
|
| 1040 |
+
|
| 1041 |
+
graph_inputs: List[str] = []
|
| 1042 |
+
for node in model.graph.nodes:
|
| 1043 |
+
if node.op == 'placeholder':
|
| 1044 |
+
graph_inputs.append(node.name)
|
| 1045 |
+
|
| 1046 |
+
# additional state to override inputs to be quantized, if specified
|
| 1047 |
+
# by the user
|
| 1048 |
+
placeholder_node_seen_cnt = 0
|
| 1049 |
+
input_quantized_idxs: List[int] = prepare_custom_config.input_quantized_indexes
|
| 1050 |
+
output_quantized_idxs: List[int] = prepare_custom_config.output_quantized_indexes
|
| 1051 |
+
|
| 1052 |
+
root_module_to_quantized_reference_module = get_root_module_to_quantized_reference_module(backend_config)
|
| 1053 |
+
# convert tuples so that it can work with isinstance(module, tuple_of_classes)
|
| 1054 |
+
root_module_classes = tuple(root_module_to_quantized_reference_module.keys())
|
| 1055 |
+
qat_module_classes = get_qat_module_classes(backend_config)
|
| 1056 |
+
fused_module_classes = get_fused_module_classes(backend_config)
|
| 1057 |
+
statically_quantized_custom_module_nodes: Set[Node] = set()
|
| 1058 |
+
|
| 1059 |
+
for node in list(model.graph.nodes):
|
| 1060 |
+
if node.op == 'placeholder':
|
| 1061 |
+
cur_placeholder_node_idx = placeholder_node_seen_cnt
|
| 1062 |
+
placeholder_node_seen_cnt += 1
|
| 1063 |
+
if cur_placeholder_node_idx in input_quantized_idxs:
|
| 1064 |
+
# Inputs are assumed to be quantized if the user specified the
|
| 1065 |
+
# input_quantized_idxs override.
|
| 1066 |
+
# we need to dequantize the inputs since all operators took
|
| 1067 |
+
# floating point inputs in reference quantized models
|
| 1068 |
+
_insert_dequantize_node(node, model.graph)
|
| 1069 |
+
elif node.op == "output":
|
| 1070 |
+
# If the argument is empty we don't need to do anything
|
| 1071 |
+
if len(output_quantized_idxs) == 0:
|
| 1072 |
+
continue
|
| 1073 |
+
# Result are kept quantized if the user specified the
|
| 1074 |
+
# output_quantized_idxs override.
|
| 1075 |
+
# Remove the dequantize operator for the node in the end if any
|
| 1076 |
+
return_node = node
|
| 1077 |
+
output = node.args[0]
|
| 1078 |
+
# outputs can be Node, list, tuple, dict, other cases are not supported yet
|
| 1079 |
+
if isinstance(output, (list, tuple)):
|
| 1080 |
+
for idx in output_quantized_idxs:
|
| 1081 |
+
_maybe_recursive_remove_dequantize(output[idx], return_node, model.graph)
|
| 1082 |
+
elif isinstance(output, (Node, dict)):
|
| 1083 |
+
# we treat dict as a single argument currently, but it can be extended
|
| 1084 |
+
# to support {"key": dtype} after we change output_quantized_idxs to
|
| 1085 |
+
# dict
|
| 1086 |
+
if 0 in output_quantized_idxs:
|
| 1087 |
+
_maybe_recursive_remove_dequantize(output, return_node, model.graph)
|
| 1088 |
+
else:
|
| 1089 |
+
warnings.warn(f"Unsupported node type for output_quantized_idxs: {type(output)}")
|
| 1090 |
+
elif node.op == "call_module":
|
| 1091 |
+
mod = _get_module(node, modules)
|
| 1092 |
+
assert mod is not None
|
| 1093 |
+
if _is_activation_post_process(mod):
|
| 1094 |
+
observed_node = node.args[0]
|
| 1095 |
+
if observed_node in statically_quantized_custom_module_nodes:
|
| 1096 |
+
_replace_observer_or_dequant_stub_with_dequantize_node(node, model.graph)
|
| 1097 |
+
else:
|
| 1098 |
+
if is_decomposed:
|
| 1099 |
+
_replace_observer_with_quantize_dequantize_node_decomposed(
|
| 1100 |
+
model, node, modules, node_name_to_scope,
|
| 1101 |
+
node_name_to_qconfig)
|
| 1102 |
+
else:
|
| 1103 |
+
_replace_observer_with_quantize_dequantize_node(
|
| 1104 |
+
model, node, modules, node_name_to_scope,
|
| 1105 |
+
node_name_to_qconfig)
|
| 1106 |
+
elif isinstance(mod, DeQuantStub):
|
| 1107 |
+
_replace_observer_or_dequant_stub_with_dequantize_node(node, model.graph)
|
| 1108 |
+
elif _is_observed_standalone_module(mod):
|
| 1109 |
+
convert_standalone_module(
|
| 1110 |
+
node, modules, model, is_reference, backend_config)
|
| 1111 |
+
# below this point `type_before_parametrizations` is used
|
| 1112 |
+
# instead of `type` to handle situations with fx quant + sparsity
|
| 1113 |
+
elif type_before_parametrizations(mod) in set(
|
| 1114 |
+
root_module_classes).union(qat_module_classes).union(fused_module_classes):
|
| 1115 |
+
# extra check for fused module classes to make sure they are fused module classes
|
| 1116 |
+
# of target modules
|
| 1117 |
+
if type_before_parametrizations(mod) in fused_module_classes and \
|
| 1118 |
+
type_before_parametrizations(mod[0]) not in root_module_classes: # type: ignore[index]
|
| 1119 |
+
continue
|
| 1120 |
+
convert_weighted_module(
|
| 1121 |
+
node, modules, observed_node_names, node_name_to_qconfig, backend_config,
|
| 1122 |
+
is_decomposed, is_reference)
|
| 1123 |
+
elif type_before_parametrizations(mod) in custom_module_classes:
|
| 1124 |
+
convert_custom_module(
|
| 1125 |
+
node, model.graph, modules, custom_module_class_mapping,
|
| 1126 |
+
statically_quantized_custom_module_nodes)
|
| 1127 |
+
|
| 1128 |
+
# remove deadcode after converting observers to quant/dequant ops
|
| 1129 |
+
model.graph.eliminate_dead_code()
|
| 1130 |
+
model = GraphModule(model, model.graph)
|
| 1131 |
+
|
| 1132 |
+
# TODO: maybe move this to quantize_fx.py
|
| 1133 |
+
if not is_reference:
|
| 1134 |
+
model = lower_to_fbgemm(model, node_name_to_qconfig, node_name_to_scope)
|
| 1135 |
+
|
| 1136 |
+
# TODO: this looks hacky, we want to check why we need this and see if we can
|
| 1137 |
+
# remove this
|
| 1138 |
+
# removes qconfig and activation_post_process modules
|
| 1139 |
+
if _remove_qconfig_flag:
|
| 1140 |
+
_remove_qconfig(model)
|
| 1141 |
+
model.delete_all_unused_submodules()
|
| 1142 |
+
model.meta.pop("_observed_graph_module_attrs", None)
|
| 1143 |
+
return model
|