ZTWHHH commited on
Commit
b0c1b12
·
verified ·
1 Parent(s): 73fc776

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. parrot/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__init__.py +23 -0
  2. parrot/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/__init__.cpython-310.pyc +0 -0
  3. parrot/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/x86.cpython-310.pyc +0 -0
  4. parrot/lib/python3.10/site-packages/torch/ao/quantization/backend_config/_qnnpack_pt2e.py +161 -0
  5. parrot/lib/python3.10/site-packages/torch/ao/quantization/backend_config/backend_config.py +662 -0
  6. parrot/lib/python3.10/site-packages/torch/ao/quantization/backend_config/native.py +205 -0
  7. parrot/lib/python3.10/site-packages/torch/ao/quantization/backend_config/qnnpack.py +160 -0
  8. parrot/lib/python3.10/site-packages/torch/ao/quantization/backend_config/tensorrt.py +82 -0
  9. parrot/lib/python3.10/site-packages/torch/ao/quantization/backend_config/utils.py +280 -0
  10. parrot/lib/python3.10/site-packages/torch/ao/quantization/backend_config/x86.py +113 -0
  11. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/__init__.cpython-310.pyc +0 -0
  12. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_decomposed.cpython-310.pyc +0 -0
  13. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_equalize.cpython-310.pyc +0 -0
  14. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_lower_to_native_backend.cpython-310.pyc +0 -0
  15. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/convert.cpython-310.pyc +0 -0
  16. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/custom_config.cpython-310.pyc +0 -0
  17. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/fuse.cpython-310.pyc +0 -0
  18. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/fuse_handler.cpython-310.pyc +0 -0
  19. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/graph_module.cpython-310.pyc +0 -0
  20. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/lower_to_fbgemm.cpython-310.pyc +0 -0
  21. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/lower_to_qnnpack.cpython-310.pyc +0 -0
  22. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/lstm_utils.cpython-310.pyc +0 -0
  23. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/match_utils.cpython-310.pyc +0 -0
  24. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/pattern_utils.cpython-310.pyc +0 -0
  25. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/prepare.cpython-310.pyc +0 -0
  26. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/qconfig_mapping_utils.cpython-310.pyc +0 -0
  27. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/quantize_handler.cpython-310.pyc +0 -0
  28. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/tracer.cpython-310.pyc +0 -0
  29. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/utils.cpython-310.pyc +0 -0
  30. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__init__.py +0 -0
  31. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/__init__.cpython-310.pyc +0 -0
  32. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/detector.cpython-310.pyc +0 -0
  33. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/model_report.cpython-310.pyc +0 -0
  34. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/model_report_observer.cpython-310.pyc +0 -0
  35. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/model_report_visualizer.cpython-310.pyc +0 -0
  36. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/detector.py +1539 -0
  37. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/model_report.py +607 -0
  38. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/model_report_observer.py +266 -0
  39. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/model_report_visualizer.py +667 -0
  40. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/custom_config.py +420 -0
  41. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/lstm_utils.py +183 -0
  42. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/quantize_handler.py +198 -0
  43. parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/tracer.py +45 -0
  44. parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__init__.py +0 -0
  45. parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/qat_utils.cpython-310.pyc +0 -0
  46. parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/duplicate_dq_pass.py +84 -0
  47. parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/generate_numeric_debug_handle.py +17 -0
  48. parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/port_metadata_pass.py +214 -0
  49. parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/prepare.py +492 -0
  50. parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/qat_utils.py +808 -0
parrot/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__init__.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .backend_config import BackendConfig, BackendPatternConfig, DTypeConfig, DTypeWithConstraints, ObservationType
2
+ from .fbgemm import get_fbgemm_backend_config
3
+ from .native import get_native_backend_config, get_native_backend_config_dict
4
+ from .qnnpack import get_qnnpack_backend_config
5
+ from .tensorrt import get_tensorrt_backend_config, get_tensorrt_backend_config_dict
6
+ from .executorch import get_executorch_backend_config
7
+ from .onednn import get_onednn_backend_config
8
+
9
+ __all__ = [
10
+ "get_fbgemm_backend_config",
11
+ "get_native_backend_config",
12
+ "get_native_backend_config_dict",
13
+ "get_qnnpack_backend_config",
14
+ "get_tensorrt_backend_config",
15
+ "get_tensorrt_backend_config_dict",
16
+ "get_executorch_backend_config",
17
+ "BackendConfig",
18
+ "BackendPatternConfig",
19
+ "DTypeConfig",
20
+ "DTypeWithConstraints",
21
+ "ObservationType",
22
+ "get_onednn_backend_config",
23
+ ]
parrot/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (870 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/backend_config/__pycache__/x86.cpython-310.pyc ADDED
Binary file (2.18 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/backend_config/_qnnpack_pt2e.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import operator
3
+ import torch
4
+ from torch.ao.quantization.backend_config import (
5
+ BackendConfig,
6
+ DTypeConfig,
7
+ ObservationType,
8
+ BackendPatternConfig,
9
+ )
10
+
11
+ weighted_op_quint8_dtype_config = DTypeConfig(
12
+ input_dtype=torch.quint8,
13
+ output_dtype=torch.quint8,
14
+ weight_dtype=torch.qint8,
15
+ bias_dtype=torch.float,
16
+ )
17
+ from typing import List
18
+
19
+ def get_linear_configs():
20
+ linear_configs = []
21
+ observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
22
+ dtype_configs = [weighted_op_quint8_dtype_config]
23
+
24
+ # TODO: need to fix the way we insert observers for this pattern
25
+ # should be solved in the new fusion API
26
+ # reason that this doesn't work: the pattern is a bit complicated and we don't
27
+ # have a way to specify which input of the pattern we would like to observe
28
+ # pattern:
29
+ # bias input weight
30
+ # \ | /
31
+ # \ | t
32
+ # \ | /
33
+ # addmm
34
+ # we want to observe "weight" as weight, but there is not way to convey this
35
+ # information with current pattern language
36
+ #
37
+ # right now:
38
+ # original:
39
+ # weight - t \
40
+ # input - addmm
41
+ # observed (no hack):
42
+ # weight - t - observer \
43
+ # input - observer - addmm
44
+ # target:
45
+ # weight - observer - t \
46
+ # input - observer - addmm
47
+
48
+ # def root_node_getter(node_pattern):
49
+ # addmm, bias, act, weight = node_pattern
50
+ # return addmm
51
+
52
+ # linear_configs.append(
53
+ # BackendPatternConfig((torch.ops.aten.addmm.default, MatchAllNode, MatchAllNode, torch.ops.aten.t.default))
54
+ # .set_observation_type(observation_type) # noqa: E131
55
+ # .set_dtype_configs(dtype_configs)
56
+ # ._set_root_node_getter(root_node_getter))
57
+
58
+ linear_configs.append(
59
+ BackendPatternConfig(torch.ops.aten.addmm.default)
60
+ .set_observation_type(observation_type) # noqa: E131
61
+ .set_dtype_configs(dtype_configs)
62
+ ._set_input_type_to_index({"weight": 2, "bias": 0})
63
+ )
64
+ # linear is decomposed to `t - mm` if bias is not present
65
+ linear_configs.append(
66
+ BackendPatternConfig(torch.ops.aten.mm.default)
67
+ .set_observation_type(observation_type) # noqa: E131
68
+ .set_dtype_configs(dtype_configs)
69
+ ._set_input_type_to_index({"weight": 1})
70
+ )
71
+ return linear_configs
72
+
73
+ def get_conv_configs():
74
+ conv_configs = []
75
+ observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
76
+ dtype_configs = [weighted_op_quint8_dtype_config]
77
+ conv_configs.append(
78
+ BackendPatternConfig(torch.ops.aten.convolution.default)
79
+ .set_observation_type(observation_type) # noqa: E131
80
+ .set_dtype_configs(dtype_configs)
81
+ ._set_input_type_to_index({"weight": 1, "bias": 2})
82
+ )
83
+ conv_configs.append(
84
+ BackendPatternConfig((torch.ops.aten.convolution.default, torch.ops.aten.relu.default))
85
+ .set_observation_type(observation_type) # noqa: E131
86
+ .set_dtype_configs(dtype_configs)
87
+ ._set_input_type_to_index({"weight": 1, "bias": 2})
88
+ )
89
+ # TODO: remove when functionalization is supported in PT2 mode
90
+ conv_configs.append(
91
+ BackendPatternConfig((torch.ops.aten.convolution.default, torch.ops.aten.relu_.default))
92
+ .set_observation_type(observation_type) # noqa: E131
93
+ .set_dtype_configs(dtype_configs)
94
+ ._set_input_type_to_index({"weight": 1, "bias": 2})
95
+ )
96
+ return conv_configs
97
+
98
+ def get_pooling_configs():
99
+ backend_pattern_configs = []
100
+ observation_type = ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT
101
+ dtype_configs = [weighted_op_quint8_dtype_config]
102
+
103
+ def root_node_getter(node_pattern):
104
+ getitem, maxpool, index = node_pattern
105
+ return maxpool
106
+
107
+ backend_pattern_configs.append(
108
+ BackendPatternConfig()
109
+ ._set_pattern_complex_format((operator.getitem, torch.ops.aten.max_pool2d_with_indices.default, 0))
110
+ .set_observation_type(observation_type) # noqa: E131
111
+ .set_dtype_configs(dtype_configs)
112
+ ._set_root_node_getter(root_node_getter)
113
+ )
114
+
115
+ return backend_pattern_configs
116
+
117
+ def get_relu_configs():
118
+ backend_pattern_configs = []
119
+ observation_type = ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT
120
+ dtype_configs = [weighted_op_quint8_dtype_config]
121
+ backend_pattern_configs.append(
122
+ BackendPatternConfig(torch.ops.aten.relu.default)
123
+ .set_observation_type(observation_type) # noqa: E131
124
+ .set_dtype_configs(dtype_configs))
125
+ return backend_pattern_configs
126
+
127
+ def get_binary_op_configs():
128
+ binary_op_configs: List[BackendPatternConfig] = []
129
+ dtype_configs = [weighted_op_quint8_dtype_config]
130
+ num_tensor_args_to_observation_type_mapping = {
131
+ # TODO: this is not used right now since we have extra check in prepare
132
+ # will need to change this to NO_OBSERVER later after we implemented
133
+ # Tensor dtype inference properly
134
+ 0: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
135
+ 1: ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT,
136
+ 2: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
137
+ }
138
+ for op_with_quantized_bop_scalar_variant in [torch.ops.aten.add.Tensor, torch.ops.aten.add_.Tensor]:
139
+ bop_patterns = [
140
+ (op_with_quantized_bop_scalar_variant, torch.ops.aten.relu.default),
141
+ op_with_quantized_bop_scalar_variant,
142
+ # TODO: remove when functionalization is supported in pt2_mode
143
+ (op_with_quantized_bop_scalar_variant, torch.ops.aten.relu_.default),
144
+ ]
145
+ for bop_pattern in bop_patterns:
146
+ binary_op_configs.append(
147
+ BackendPatternConfig(bop_pattern)
148
+ .set_dtype_configs(dtype_configs) # noqa: E131
149
+ ._set_num_tensor_args_to_observation_type(num_tensor_args_to_observation_type_mapping))
150
+
151
+ return binary_op_configs
152
+
153
+ def get_qnnpack_pt2e_backend_config():
154
+ return (
155
+ BackendConfig("qnnpack_pytorch_2.0_export")
156
+ .set_backend_pattern_configs(get_linear_configs())
157
+ .set_backend_pattern_configs(get_binary_op_configs())
158
+ .set_backend_pattern_configs(get_conv_configs())
159
+ .set_backend_pattern_configs(get_pooling_configs())
160
+ .set_backend_pattern_configs(get_relu_configs())
161
+ )
parrot/lib/python3.10/site-packages/torch/ao/quantization/backend_config/backend_config.py ADDED
@@ -0,0 +1,662 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from __future__ import annotations
3
+ from dataclasses import dataclass
4
+ from typing import Any, Callable, Dict, List, Optional, Type, Union, TYPE_CHECKING
5
+
6
+ import torch
7
+ from enum import Enum
8
+
9
+ if TYPE_CHECKING:
10
+ from torch.ao.quantization.utils import Pattern
11
+
12
+
13
+ __all__ = [
14
+ "BackendConfig",
15
+ "BackendPatternConfig",
16
+ "DTypeConfig",
17
+ "DTypeWithConstraints",
18
+ "ObservationType",
19
+ ]
20
+
21
+
22
+ # DTypeConfig dict keys
23
+ INPUT_DTYPE_DICT_KEY = "input_dtype"
24
+ OUTPUT_DTYPE_DICT_KEY = "output_dtype"
25
+ WEIGHT_DTYPE_DICT_KEY = "weight_dtype"
26
+ BIAS_DTYPE_DICT_KEY = "bias_dtype"
27
+ IS_DYNAMIC_DICT_KEY = "is_dynamic"
28
+
29
+ # BackendConfig dict keys
30
+ NAME_DICT_KEY = "name"
31
+ CONFIGS_DICT_KEY = "configs"
32
+
33
+ # BackendPatternConfig dict keys
34
+ PATTERN_DICT_KEY = "pattern"
35
+ PATTERN_COMPLEX_FORMAT_DICT_KEY = "pattern_complex_format"
36
+ OBSERVATION_TYPE_DICT_KEY = "observation_type"
37
+ DTYPE_CONFIGS_DICT_KEY = "dtype_configs"
38
+ ROOT_MODULE_DICT_KEY = "root_module"
39
+ QAT_MODULE_DICT_KEY = "qat_module"
40
+ REFERENCE_QUANTIZED_MODULE_DICT_KEY = "reference_quantized_module_for_root"
41
+ FUSED_MODULE_DICT_KEY = "fused_module"
42
+ FUSER_METHOD_DICT_KEY = "fuser_method"
43
+ ROOT_NODE_GETTER_DICT_KEY = "root_node_getter"
44
+ EXTRA_INPUTS_GETTER_DICT_KEY = "extra_inputs_getter"
45
+ NUM_TENSOR_ARGS_TO_OBSERVATION_TYPE_DICT_KEY = "num_tensor_args_to_observation_type"
46
+ INPUT_TYPE_TO_INDEX_DICT_KEY = "input_type_to_index"
47
+
48
+
49
+ # TODO: maybe rename this to something that's not related to observer
50
+ # e.g. QParamsType
51
+ class ObservationType(Enum):
52
+ """ An enum that represents different ways of how an operator/operator pattern
53
+ should be observed
54
+ """
55
+
56
+ OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT = 0
57
+ """this means input and output are observed with different observers, based
58
+ on qconfig.activation
59
+ example: conv, linear, softmax
60
+ """
61
+
62
+ OUTPUT_SHARE_OBSERVER_WITH_INPUT = 1
63
+ """this means the output will use the same observer instance as input, based
64
+ on qconfig.activation
65
+ example: torch.cat, maxpool
66
+ """
67
+
68
+ INPUT_OUTPUT_NOT_OBSERVED = 2
69
+ """this means the input and output are never observed
70
+ example: x.shape, x.size
71
+ """
72
+
73
+
74
+ @dataclass
75
+ class DTypeWithConstraints:
76
+ """
77
+ Config for specifying additional constraints for a given dtype, such as quantization
78
+ value ranges, scale value ranges, and fixed quantization params, to be used in
79
+ :class:`~torch.ao.quantization.backend_config.DTypeConfig`.
80
+
81
+ The constraints currently supported are:
82
+
83
+ * `quant_min_lower_bound` and `quant_max_upper_bound`: Lower and upper
84
+ bounds for the minimum and maximum quantized values respectively. If
85
+ the QConfig's `quant_min` and `quant_max` fall outside this range,
86
+ then the QConfig will be ignored.
87
+
88
+ * `scale_min_lower_bound` and `scale_max_upper_bound`: Lower and upper
89
+ bounds for the minimum and maximum scale values respectively. If the
90
+ QConfig's minimum scale value (currently exposed as `eps`) falls below
91
+ the lower bound, then the QConfig will be ignored. Note that the upper
92
+ bound is currently not enforced.
93
+
94
+ * `scale_exact_match` and `zero_point_exact_match`: Exact match requirements
95
+ for scale and zero point, to be used for operators with fixed quantization
96
+ parameters such as sigmoid and tanh. If the observer specified in the QConfig
97
+ is neither `FixedQParamsObserver` nor `FixedQParamsFakeQuantize`, or if
98
+ the quantization parameters don't match, then the QConfig will be ignored.
99
+ """
100
+ dtype: Optional[torch.dtype] = None
101
+ quant_min_lower_bound: Union[int, float, None] = None
102
+ quant_max_upper_bound: Union[int, float, None] = None
103
+ scale_min_lower_bound: Union[int, float, None] = None
104
+ scale_max_upper_bound: Union[int, float, None] = None
105
+ scale_exact_match: Optional[float] = None
106
+ zero_point_exact_match: Optional[int] = None
107
+
108
+
109
+ @dataclass
110
+ class DTypeConfig:
111
+ """
112
+ Config object that specifies the supported data types passed as arguments to
113
+ quantize ops in the reference model spec, for input and output activations,
114
+ weights, and biases.
115
+
116
+ For example, consider the following reference model:
117
+
118
+ quant1 - [dequant1 - fp32_linear - quant2] - dequant2
119
+
120
+ The pattern in the square brackets refers to the reference pattern of
121
+ statically quantized linear. Setting the input dtype as `torch.quint8`
122
+ in the DTypeConfig means we pass in `torch.quint8` as the dtype argument
123
+ to the first quantize op (quant1). Similarly, setting the output dtype as
124
+ `torch.quint8` means we pass in `torch.quint8` as the dtype argument to
125
+ the second quantize op (quant2).
126
+
127
+ Note that the dtype here does not refer to the interface dtypes of the
128
+ op. For example, the "input dtype" here is not the dtype of the input
129
+ tensor passed to the quantized linear op. Though it can still be the
130
+ same as the interface dtype, this is not always the case, e.g. the
131
+ interface dtype is fp32 in dynamic quantization but the "input dtype"
132
+ specified in the DTypeConfig would still be quint8. The semantics of
133
+ dtypes here are the same as the semantics of the dtypes specified in
134
+ the observers.
135
+
136
+ These dtypes are matched against the ones specified in the user's
137
+ QConfig. If there is a match, and the QConfig satisfies the constraints
138
+ specified in the DTypeConfig (if any), then we will quantize the given
139
+ pattern using this DTypeConfig. Otherwise, the QConfig is ignored and
140
+ the pattern will not be quantized.
141
+
142
+ Example usage::
143
+
144
+ >>> # xdoctest: +SKIP(failing)
145
+ >>> dtype_config1 = DTypeConfig(
146
+ ... input_dtype=torch.quint8,
147
+ ... output_dtype=torch.quint8,
148
+ ... weight_dtype=torch.qint8,
149
+ ... bias_dtype=torch.float)
150
+
151
+ >>> dtype_config2 = DTypeConfig(
152
+ ... input_dtype=DTypeWithConstraints(
153
+ ... dtype=torch.quint8,
154
+ ... quant_min_lower_bound=0,
155
+ ... quant_max_upper_bound=255,
156
+ ... ),
157
+ ... output_dtype=DTypeWithConstraints(
158
+ ... dtype=torch.quint8,
159
+ ... quant_min_lower_bound=0,
160
+ ... quant_max_upper_bound=255,
161
+ ... ),
162
+ ... weight_dtype=DTypeWithConstraints(
163
+ ... dtype=torch.qint8,
164
+ ... quant_min_lower_bound=-128,
165
+ ... quant_max_upper_bound=127,
166
+ ... ),
167
+ ... bias_dtype=torch.float)
168
+
169
+ >>> dtype_config1.input_dtype
170
+ torch.quint8
171
+
172
+ >>> dtype_config2.input_dtype
173
+ torch.quint8
174
+
175
+ >>> dtype_config2.input_dtype_with_constraints
176
+ DTypeWithConstraints(dtype=torch.quint8, quant_min_lower_bound=0, quant_max_upper_bound=255, \
177
+ scale_min_lower_bound=None, scale_max_upper_bound=None)
178
+ """
179
+ input_dtype_with_constraints: DTypeWithConstraints
180
+ output_dtype_with_constraints: DTypeWithConstraints
181
+ weight_dtype_with_constraints: DTypeWithConstraints
182
+ bias_dtype: Optional[torch.dtype]
183
+ is_dynamic: Optional[bool]
184
+
185
+ def __init__(
186
+ self,
187
+ input_dtype: Union[torch.dtype, DTypeWithConstraints, None] = None,
188
+ output_dtype: Union[torch.dtype, DTypeWithConstraints, None] = None,
189
+ weight_dtype: Union[torch.dtype, DTypeWithConstraints, None] = None,
190
+ bias_dtype: Optional[torch.dtype] = None,
191
+ is_dynamic: Optional[bool] = None,
192
+ ):
193
+ if isinstance(input_dtype, DTypeWithConstraints):
194
+ self.input_dtype_with_constraints = input_dtype
195
+ else:
196
+ self.input_dtype_with_constraints = DTypeWithConstraints(dtype=input_dtype)
197
+
198
+ if isinstance(output_dtype, DTypeWithConstraints):
199
+ self.output_dtype_with_constraints = output_dtype
200
+ else:
201
+ self.output_dtype_with_constraints = DTypeWithConstraints(dtype=output_dtype)
202
+
203
+ if isinstance(weight_dtype, DTypeWithConstraints):
204
+ self.weight_dtype_with_constraints = weight_dtype
205
+ else:
206
+ self.weight_dtype_with_constraints = DTypeWithConstraints(dtype=weight_dtype)
207
+
208
+ self.bias_dtype = bias_dtype
209
+ self.is_dynamic = is_dynamic
210
+
211
+ @property
212
+ def input_dtype(self) -> Optional[torch.dtype]:
213
+ return self.input_dtype_with_constraints.dtype
214
+
215
+ @property
216
+ def output_dtype(self) -> Optional[torch.dtype]:
217
+ return self.output_dtype_with_constraints.dtype
218
+
219
+ @property
220
+ def weight_dtype(self) -> Optional[torch.dtype]:
221
+ return self.weight_dtype_with_constraints.dtype
222
+
223
+ @classmethod
224
+ def from_dict(cls, dtype_config_dict: Dict[str, Any]) -> DTypeConfig:
225
+ """
226
+ Create a ``DTypeConfig`` from a dictionary with the following items (all optional):
227
+ "input_dtype": torch.dtype or ``DTypeWithConstraints``
228
+ "output_dtype": torch.dtype or ``DTypeWithConstraints``
229
+ "weight_dtype": torch.dtype or ``DTypeWithConstraints``
230
+ "bias_type": torch.dtype
231
+ "is_dynamic": bool
232
+ """
233
+ input_dtype = dtype_config_dict.get(INPUT_DTYPE_DICT_KEY, None)
234
+ if input_dtype is not None and not isinstance(input_dtype, (torch.dtype, DTypeWithConstraints)):
235
+ raise ValueError("Expected input_dtype to be a torch.dtype or DTypeWithConstraints")
236
+ output_dtype = dtype_config_dict.get(OUTPUT_DTYPE_DICT_KEY, None)
237
+ if output_dtype is not None and not isinstance(output_dtype, (torch.dtype, DTypeWithConstraints)):
238
+ raise ValueError("Expected output_dtype to be a torch.dtype or DTypeWithConstraints")
239
+ weight_dtype = dtype_config_dict.get(WEIGHT_DTYPE_DICT_KEY, None)
240
+ if weight_dtype is not None and not isinstance(weight_dtype, (torch.dtype, DTypeWithConstraints)):
241
+ raise ValueError("Expected weight_dtype to be a torch.dtype or DTypeWithConstraints")
242
+ bias_dtype = dtype_config_dict.get(BIAS_DTYPE_DICT_KEY, None)
243
+ is_dynamic = dtype_config_dict.get(IS_DYNAMIC_DICT_KEY, None)
244
+ return cls(input_dtype, output_dtype, weight_dtype, bias_dtype, is_dynamic)
245
+
246
+ def to_dict(self) -> Dict[str, Any]:
247
+ """
248
+ Convert this ``DTypeConfig`` to a dictionary with the items described in
249
+ :func:`~torch.ao.quantization.backend_config.DTypeConfig.from_dict`.
250
+ """
251
+ dtype_config_dict: Dict[str, Any] = {}
252
+ if self.input_dtype is not None:
253
+ dtype_config_dict[INPUT_DTYPE_DICT_KEY] = self.input_dtype_with_constraints
254
+ if self.output_dtype is not None:
255
+ dtype_config_dict[OUTPUT_DTYPE_DICT_KEY] = self.output_dtype_with_constraints
256
+ if self.weight_dtype is not None:
257
+ dtype_config_dict[WEIGHT_DTYPE_DICT_KEY] = self.weight_dtype_with_constraints
258
+ if self.bias_dtype is not None:
259
+ dtype_config_dict[BIAS_DTYPE_DICT_KEY] = self.bias_dtype
260
+ if self.is_dynamic is not None:
261
+ dtype_config_dict[IS_DYNAMIC_DICT_KEY] = self.is_dynamic
262
+ return dtype_config_dict
263
+
264
+
265
+ class BackendConfig:
266
+ # TODO: refer to NativeBackendConfig once that is implemented
267
+ """Config that defines the set of patterns that can be quantized on a given backend, and how reference
268
+ quantized models can be produced from these patterns.
269
+
270
+ A pattern in this context refers to a module, a functional, an operator, or a directed acyclic graph
271
+ of the above. Each pattern supported on the target backend can be individually configured through
272
+ :class:`~torch.ao.quantization.backend_config.BackendPatternConfig` in terms of:
273
+
274
+ (1) The supported input/output activation, weight, and bias data types
275
+
276
+ (2) How observers and quant/dequant ops are inserted in order to construct the reference pattern, and
277
+
278
+ (3) (Optionally) Fusion, QAT, and reference module mappings.
279
+
280
+ The format of the patterns is described in:
281
+ https://github.com/pytorch/pytorch/blob/master/torch/ao/quantization/backend_config/README.md
282
+
283
+ Example usage::
284
+
285
+ import torch
286
+ from torch.ao.quantization.backend_config import (
287
+ BackendConfig,
288
+ BackendPatternConfig,
289
+ DTypeConfig,
290
+ ObservationType,
291
+ )
292
+
293
+ weighted_int8_dtype_config = DTypeConfig(
294
+ input_dtype=torch.quint8,
295
+ output_dtype=torch.quint8,
296
+ weight_dtype=torch.qint8,
297
+ bias_dtype=torch.float)
298
+
299
+ def fuse_conv2d_relu(is_qat, conv, relu):
300
+ return torch.ao.nn.intrinsic.ConvReLU2d(conv, relu)
301
+
302
+ # For quantizing Linear
303
+ linear_config = BackendPatternConfig(torch.nn.Linear) \
304
+ .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \
305
+ .add_dtype_config(weighted_int8_dtype_config) \
306
+ .set_root_module(torch.nn.Linear) \
307
+ .set_qat_module(torch.ao.nn.qat.Linear) \
308
+ .set_reference_quantized_module(torch.ao.nn.quantized.reference.Linear)
309
+
310
+ # For fusing Conv2d + ReLU into ConvReLU2d
311
+ conv_relu_config = BackendPatternConfig((torch.nn.Conv2d, torch.nn.ReLU)) \
312
+ .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \
313
+ .add_dtype_config(weighted_int8_dtype_config) \
314
+ .set_fused_module(torch.ao.nn.intrinsic.ConvReLU2d) \
315
+ .set_fuser_method(fuse_conv2d_relu)
316
+
317
+ # For quantizing ConvReLU2d
318
+ fused_conv_relu_config = BackendPatternConfig(torch.ao.nn.intrinsic.ConvReLU2d) \
319
+ .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \
320
+ .add_dtype_config(weighted_int8_dtype_config) \
321
+ .set_root_module(torch.nn.Conv2d) \
322
+ .set_qat_module(torch.ao.nn.intrinsic.qat.ConvReLU2d) \
323
+ .set_reference_quantized_module(torch.ao.nn.quantized.reference.Conv2d)
324
+
325
+ backend_config = BackendConfig("my_backend") \
326
+ .set_backend_pattern_config(linear_config) \
327
+ .set_backend_pattern_config(conv_relu_config) \
328
+ .set_backend_pattern_config(fused_conv_relu_config)
329
+
330
+ """
331
+ def __init__(self, name: str = ""):
332
+ self.name = name
333
+ # Store all BackendPatternConfigs in a map to handle duplicates
334
+ # Note: the key in this map uses the complex reversed tuple format.
335
+ # This is intended only for internal use; users who wish to access
336
+ # the original patterns should go through `self.configs` instead.
337
+ self._pattern_complex_format_to_config: Dict[Pattern, BackendPatternConfig] = {}
338
+
339
+ def __repr__(self):
340
+ return f"BackendConfig({self.__dict__})"
341
+
342
+ def set_name(self, name: str) -> BackendConfig:
343
+ """
344
+ Set the name of the target backend.
345
+ """
346
+ self.name = name
347
+ return self
348
+
349
+ def set_backend_pattern_config(self, config: BackendPatternConfig) -> BackendConfig:
350
+ """
351
+ Set the config for an pattern that can be run on the target backend.
352
+ This overrides any existing config for the given pattern.
353
+ """
354
+ # Avoid circular dependencies
355
+ pattern_complex_format = torch.ao.quantization.backend_config.utils \
356
+ ._get_pattern_in_reversed_nested_tuple_format(config) # type: ignore[attr-defined]
357
+ self._pattern_complex_format_to_config[pattern_complex_format] = config
358
+ return self
359
+
360
+ def set_backend_pattern_configs(self, configs: List[BackendPatternConfig]) -> BackendConfig:
361
+ """
362
+ Set the configs for patterns that can be run on the target backend.
363
+ This overrides any existing config for a given pattern if it was previously registered already.
364
+ """
365
+ for conf in configs:
366
+ self.set_backend_pattern_config(conf)
367
+ return self
368
+
369
+ @property
370
+ def configs(self) -> List[BackendPatternConfig]:
371
+ """
372
+ Return a copy of the list of configs set in this `BackendConfig`.
373
+ """
374
+ return list(self._pattern_complex_format_to_config.values())
375
+
376
+ @classmethod
377
+ def from_dict(cls, backend_config_dict: Dict[str, Any]) -> BackendConfig:
378
+ """
379
+ Create a ``BackendConfig`` from a dictionary with the following items:
380
+
381
+ "name": the name of the target backend
382
+
383
+ "configs": a list of dictionaries that each represents a `BackendPatternConfig`
384
+
385
+ """
386
+ conf = cls(backend_config_dict.get(NAME_DICT_KEY, ""))
387
+ for d in backend_config_dict.get(CONFIGS_DICT_KEY, []):
388
+ if isinstance(d, BackendPatternConfig):
389
+ conf.set_backend_pattern_config(d)
390
+ elif isinstance(d, Dict):
391
+ conf.set_backend_pattern_config(BackendPatternConfig.from_dict(d))
392
+ else:
393
+ raise ValueError(f"Expected backend_config_dict['{CONFIGS_DICT_KEY}'] to be a dictionary")
394
+ return conf
395
+
396
+ def to_dict(self) -> Dict[str, Any]:
397
+ """
398
+ Convert this ``BackendConfig`` to a dictionary with the items described in
399
+ :func:`~torch.ao.quantization.backend_config.BackendConfig.from_dict`.
400
+ """
401
+ return {
402
+ NAME_DICT_KEY: self.name,
403
+ CONFIGS_DICT_KEY: [c.to_dict() for c in self.configs],
404
+ }
405
+
406
+
407
+ class BackendPatternConfig:
408
+ """
409
+ Config object that specifies quantization behavior for a given operator pattern.
410
+ For a detailed example usage, see :class:`~torch.ao.quantization.backend_config.BackendConfig`.
411
+ """
412
+ def __init__(self, pattern: Optional[Pattern] = None):
413
+ self.pattern: Optional[Pattern] = pattern
414
+ self.observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
415
+ self.dtype_configs: List[DTypeConfig] = []
416
+ self.root_module: Optional[Type[torch.nn.Module]] = None
417
+ self.qat_module: Optional[Type[torch.nn.Module]] = None
418
+ self.reference_quantized_module: Optional[Type[torch.nn.Module]] = None
419
+ self.fused_module: Optional[Type[torch.nn.Module]] = None
420
+ self.fuser_method: Optional[Callable] = None
421
+
422
+ # Temporary/internal configs
423
+ self._root_node_getter: Optional[Callable] = None
424
+ self._extra_inputs_getter: Optional[Callable] = None
425
+ self._num_tensor_args_to_observation_type: Dict[int, ObservationType] = {}
426
+ self._input_type_to_index: Dict[str, int] = {}
427
+ self._pattern_complex_format: Optional[Pattern] = None
428
+
429
+ def __repr__(self):
430
+ dict_nonempty = {
431
+ k: v for k, v in self.__dict__.items()
432
+ if (
433
+ (not isinstance(v, (list, dict)) and v is not None)
434
+ or (isinstance(v, (list, dict)) and len(v) > 0)
435
+ )
436
+ }
437
+ return f"BackendPatternConfig({dict_nonempty})"
438
+
439
+ def set_pattern(self, pattern: Pattern) -> BackendPatternConfig:
440
+ """
441
+ Set the pattern to configure.
442
+
443
+ The pattern can be a float module, functional operator, pytorch operator, or a tuple
444
+ combination of the above. Tuple patterns are treated as sequential patterns, and
445
+ currently only tuples of 2 or 3 elements are supported.
446
+ """
447
+ if self._pattern_complex_format is not None:
448
+ raise ValueError("Only one of 'pattern' or 'pattern_complex_format' can be set")
449
+ self.pattern = pattern
450
+ return self
451
+
452
+ def set_observation_type(self, observation_type: ObservationType) -> BackendPatternConfig:
453
+ """
454
+ Set how observers should be inserted in the graph for this pattern.
455
+
456
+ Observation type here refers to how observers (or quant-dequant ops) will be placed
457
+ in the graph. This is used to produce the desired reference patterns understood by
458
+ the backend. Weighted ops such as linear and conv require different observers
459
+ (or quantization parameters passed to quantize ops in the reference model) for the
460
+ input and the output.
461
+
462
+ There are two observation types:
463
+
464
+ `OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT` (default): the output observer instance
465
+ will be different from the input. This is the most common observation type.
466
+
467
+ `OUTPUT_SHARE_OBSERVER_WITH_INPUT`: the output observer instance will be the
468
+ same as the input. This is useful for operators like `cat`.
469
+
470
+ Note: This will be renamed in the near future, since we will soon insert QuantDeQuantStubs
471
+ with observers (and fake quantizes) attached instead of observers themselves.
472
+ """
473
+ self.observation_type = observation_type
474
+ return self
475
+
476
+ def add_dtype_config(self, dtype_config: DTypeConfig) -> BackendPatternConfig:
477
+ """
478
+ Add a set of supported data types passed as arguments to quantize ops in the
479
+ reference model spec.
480
+ """
481
+ self.dtype_configs.append(dtype_config)
482
+ return self
483
+
484
+ def set_dtype_configs(self, dtype_configs: List[DTypeConfig]) -> BackendPatternConfig:
485
+ """
486
+ Set the supported data types passed as arguments to quantize ops in the
487
+ reference model spec, overriding all previously registered data types.
488
+ """
489
+ self.dtype_configs = dtype_configs
490
+ return self
491
+
492
+ def set_root_module(self, root_module: Type[torch.nn.Module]) -> BackendPatternConfig:
493
+ """
494
+ Set the module that represents the root for this pattern.
495
+
496
+ When we construct the reference quantized model during the convert phase,
497
+ the root modules (e.g. torch.nn.Linear for torch.ao.nn.intrinsic.LinearReLU)
498
+ will be swapped to the corresponding reference quantized modules (e.g.
499
+ torch.ao.nn.reference.quantized.Linear). This allows custom backends to
500
+ specify custom reference quantized module implementations to match the
501
+ numerics of their lowered operators. Since this is a one-to-one mapping,
502
+ both the root module and the reference quantized module must be specified
503
+ in the same BackendPatternConfig in order for the conversion to take place.
504
+ """
505
+ self.root_module = root_module
506
+ return self
507
+
508
+ def set_qat_module(self, qat_module: Type[torch.nn.Module]) -> BackendPatternConfig:
509
+ """
510
+ Set the module that represents the QAT implementation for this pattern.
511
+ """
512
+ self.qat_module = qat_module
513
+ return self
514
+
515
+ def set_reference_quantized_module(self, reference_quantized_module: Type[torch.nn.Module]) -> BackendPatternConfig:
516
+ """
517
+ Set the module that represents the reference quantized implementation for
518
+ this pattern's root module.
519
+
520
+ For more detail, see :func:`~torch.ao.quantization.backend_config.BackendPatternConfig.set_root_module`.
521
+ """
522
+ self.reference_quantized_module = reference_quantized_module
523
+ return self
524
+
525
+ def set_fused_module(self, fused_module: Type[torch.nn.Module]) -> BackendPatternConfig:
526
+ """
527
+ Set the module that represents the fused implementation for this pattern.
528
+ """
529
+ self.fused_module = fused_module
530
+ return self
531
+
532
+ def set_fuser_method(self, fuser_method: Callable) -> BackendPatternConfig:
533
+ """
534
+ Set the function that specifies how to fuse this BackendPatternConfig's pattern.
535
+
536
+ The first argument of this function should be `is_qat`, and the rest of the arguments
537
+ should be the items in the tuple pattern. The return value of this function should be
538
+ the resulting fused module.
539
+
540
+ For example, the fuser method for the pattern `(torch.nn.Linear, torch.nn.ReLU)` can be:
541
+
542
+ def fuse_linear_relu(is_qat, linear, relu):
543
+ return torch.ao.nn.intrinsic.LinearReLU(linear, relu)
544
+
545
+ For a more complicated example, see https://gist.github.com/jerryzh168/8bea7180a8ba3c279f2c9b050f2a69a6.
546
+ """
547
+ self.fuser_method = fuser_method
548
+ return self
549
+
550
+ def _set_root_node_getter(self, root_node_getter: Callable) -> BackendPatternConfig:
551
+ self._root_node_getter = root_node_getter
552
+ return self
553
+
554
+ def _set_extra_inputs_getter(self, extra_inputs_getter: Callable) -> BackendPatternConfig:
555
+ self._extra_inputs_getter = extra_inputs_getter
556
+ return self
557
+
558
+ def _set_num_tensor_args_to_observation_type(
559
+ self, num_tensor_args_to_observation_type: Dict[int, ObservationType]) -> BackendPatternConfig:
560
+ self._num_tensor_args_to_observation_type = num_tensor_args_to_observation_type
561
+ return self
562
+
563
+ def _set_input_type_to_index(self, input_type_to_index: Dict[str, int]) -> BackendPatternConfig:
564
+ self._input_type_to_index = input_type_to_index
565
+ return self
566
+
567
+ def _set_pattern_complex_format(self, pattern: Pattern) -> BackendPatternConfig:
568
+ """
569
+ Set the pattern to configure, using the reversed nested tuple format.
570
+
571
+ See the BackendConfig README for more detail:
572
+ https://github.com/pytorch/pytorch/blob/master/torch/ao/quantization/backend_config/README.md#advanced-pattern-specification
573
+ """
574
+ if self.pattern is not None:
575
+ raise ValueError("Only one of 'pattern' or 'pattern_complex_format' can be set")
576
+ self._pattern_complex_format = pattern
577
+ return self
578
+
579
+ @classmethod
580
+ def from_dict(cls, backend_pattern_config_dict: Dict[str, Any]) -> BackendPatternConfig:
581
+ """
582
+ Create a ``BackendPatternConfig`` from a dictionary with the following items:
583
+
584
+ "pattern": the pattern being configured
585
+ "observation_type": the :class:`~torch.ao.quantization.backend_config.ObservationType` that specifies how
586
+ observers should be inserted for this pattern
587
+ "dtype_configs": a list of dictionaries that represents :class:`~torch.ao.quantization.backend_config.DTypeConfig` s
588
+ "root_module": a :class:`torch.nn.Module` that represents the root for this pattern
589
+ "qat_module": a :class:`torch.nn.Module` that represents the QAT implementation for this pattern
590
+ "reference_quantized_module": a :class:`torch.nn.Module` that represents the reference quantized
591
+ implementation for this pattern's root module.
592
+ "fused_module": a :class:`torch.nn.Module` that represents the fused implementation for this pattern
593
+ "fuser_method": a function that specifies how to fuse the pattern for this pattern
594
+ "pattern_complex_format": the pattern specified in the reversed nested tuple format (deprecated)
595
+
596
+ """
597
+ def _get_dtype_config(obj: Any) -> DTypeConfig:
598
+ """
599
+ Convert the given object into a ``DTypeConfig`` if possible, else throw an exception.
600
+ """
601
+ if isinstance(obj, DTypeConfig):
602
+ return obj
603
+ if isinstance(obj, Dict):
604
+ return DTypeConfig.from_dict(obj)
605
+ raise ValueError(
606
+ f"Expected a list of DTypeConfigs in "
607
+ f"backend_pattern_config_dict[\"{DTYPE_CONFIGS_DICT_KEY}\"], got '{type(obj)}'"
608
+ )
609
+
610
+ conf = cls()
611
+ if PATTERN_DICT_KEY in backend_pattern_config_dict:
612
+ conf.set_pattern(backend_pattern_config_dict[PATTERN_DICT_KEY])
613
+ if OBSERVATION_TYPE_DICT_KEY in backend_pattern_config_dict:
614
+ conf.set_observation_type(backend_pattern_config_dict[OBSERVATION_TYPE_DICT_KEY])
615
+ for d in backend_pattern_config_dict.get(DTYPE_CONFIGS_DICT_KEY, []):
616
+ conf.add_dtype_config(_get_dtype_config(d))
617
+ conf.set_root_module(backend_pattern_config_dict.get(ROOT_MODULE_DICT_KEY, None))
618
+ conf.set_qat_module(backend_pattern_config_dict.get(QAT_MODULE_DICT_KEY, None))
619
+ conf.set_reference_quantized_module(backend_pattern_config_dict.get(REFERENCE_QUANTIZED_MODULE_DICT_KEY, None))
620
+ conf.set_fused_module(backend_pattern_config_dict.get(FUSED_MODULE_DICT_KEY, None))
621
+ conf.set_fuser_method(backend_pattern_config_dict.get(FUSER_METHOD_DICT_KEY, None))
622
+ conf._set_root_node_getter(backend_pattern_config_dict.get(ROOT_NODE_GETTER_DICT_KEY, None))
623
+ conf._set_extra_inputs_getter(backend_pattern_config_dict.get(EXTRA_INPUTS_GETTER_DICT_KEY, None))
624
+ conf._set_num_tensor_args_to_observation_type(
625
+ backend_pattern_config_dict.get(NUM_TENSOR_ARGS_TO_OBSERVATION_TYPE_DICT_KEY, {}))
626
+ conf._set_input_type_to_index(backend_pattern_config_dict.get(INPUT_TYPE_TO_INDEX_DICT_KEY, {}))
627
+ if PATTERN_COMPLEX_FORMAT_DICT_KEY in backend_pattern_config_dict:
628
+ conf._set_pattern_complex_format(backend_pattern_config_dict[PATTERN_COMPLEX_FORMAT_DICT_KEY])
629
+ return conf
630
+
631
+ def to_dict(self) -> Dict[str, Any]:
632
+ """
633
+ Convert this ``BackendPatternConfig`` to a dictionary with the items described in
634
+ :func:`~torch.ao.quantization.backend_config.BackendPatternConfig.from_dict`.
635
+ """
636
+ backend_pattern_config_dict: Dict[str, Any] = {
637
+ OBSERVATION_TYPE_DICT_KEY: self.observation_type,
638
+ DTYPE_CONFIGS_DICT_KEY: [c.to_dict() for c in self.dtype_configs],
639
+ }
640
+ if self.pattern is not None:
641
+ backend_pattern_config_dict[PATTERN_DICT_KEY] = self.pattern
642
+ if self.root_module is not None:
643
+ backend_pattern_config_dict[ROOT_MODULE_DICT_KEY] = self.root_module
644
+ if self.qat_module is not None:
645
+ backend_pattern_config_dict[QAT_MODULE_DICT_KEY] = self.qat_module
646
+ if self.reference_quantized_module is not None:
647
+ backend_pattern_config_dict[REFERENCE_QUANTIZED_MODULE_DICT_KEY] = self.reference_quantized_module
648
+ if self.fused_module is not None:
649
+ backend_pattern_config_dict[FUSED_MODULE_DICT_KEY] = self.fused_module
650
+ if self.fuser_method is not None:
651
+ backend_pattern_config_dict[FUSER_METHOD_DICT_KEY] = self.fuser_method
652
+ if self._root_node_getter is not None:
653
+ backend_pattern_config_dict[ROOT_NODE_GETTER_DICT_KEY] = self._root_node_getter
654
+ if self._extra_inputs_getter is not None:
655
+ backend_pattern_config_dict[EXTRA_INPUTS_GETTER_DICT_KEY] = self._extra_inputs_getter
656
+ if len(self._num_tensor_args_to_observation_type) > 0:
657
+ backend_pattern_config_dict[NUM_TENSOR_ARGS_TO_OBSERVATION_TYPE_DICT_KEY] = self._num_tensor_args_to_observation_type
658
+ if len(self._input_type_to_index) > 0:
659
+ backend_pattern_config_dict[INPUT_TYPE_TO_INDEX_DICT_KEY] = self._input_type_to_index
660
+ if self._pattern_complex_format is not None:
661
+ backend_pattern_config_dict[PATTERN_COMPLEX_FORMAT_DICT_KEY] = self._pattern_complex_format
662
+ return backend_pattern_config_dict
parrot/lib/python3.10/site-packages/torch/ao/quantization/backend_config/native.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ from ._common_operator_config_utils import (
4
+ _get_binary_op_configs,
5
+ _get_bn_configs,
6
+ _get_cat_config,
7
+ _get_conv_configs,
8
+ _get_default_op_configs,
9
+ _get_embedding_op_configs,
10
+ _get_fixed_qparams_op_configs,
11
+ _get_linear_configs,
12
+ _get_ln_configs,
13
+ _get_rnn_op_configs,
14
+ _get_share_qparams_op_configs,
15
+ _get_tensor_info_op_configs,
16
+ )
17
+ from .backend_config import BackendConfig, DTypeConfig
18
+
19
+ __all__ = [
20
+ "get_test_only_legacy_native_backend_config",
21
+ "default_op_quint8_dtype_config",
22
+ "default_op_fp16_dtype_config",
23
+ "default_dynamic_int8_dtype_config",
24
+ "default_dynamic_float16_dtype_config",
25
+ "input_output_only_quint8_dtype_config",
26
+ "weight_only_quint8_dtype_config",
27
+ "weight_only_quint4x2_dtype_config",
28
+ "get_native_backend_config",
29
+ "get_native_backend_config_dict",
30
+ "get_test_only_legacy_native_backend_config_dict",
31
+ ]
32
+
33
+ # ===================
34
+ # | DTYPE CONFIGS |
35
+ # ===================
36
+
37
+ # weighted op int8 dtype config
38
+ # this is config for ops that has quantized weights, like linear, conv
39
+ weighted_op_quint8_dtype_config = DTypeConfig(
40
+ input_dtype=torch.quint8,
41
+ output_dtype=torch.quint8,
42
+ weight_dtype=torch.qint8,
43
+ bias_dtype=torch.float,
44
+ )
45
+
46
+ default_op_quint8_dtype_config = DTypeConfig(
47
+ input_dtype=torch.quint8,
48
+ output_dtype=torch.quint8,
49
+ )
50
+
51
+ default_op_fp16_dtype_config = DTypeConfig(
52
+ input_dtype=torch.float16,
53
+ output_dtype=torch.float16,
54
+ weight_dtype=torch.float16,
55
+ bias_dtype=torch.float16,
56
+ )
57
+
58
+ default_dynamic_int8_dtype_config = DTypeConfig(
59
+ input_dtype=torch.quint8,
60
+ output_dtype=torch.float,
61
+ weight_dtype=torch.qint8,
62
+ bias_dtype=torch.float,
63
+ # currently the dtype check is not yet enabled, so we provided the dtype_configs but
64
+ # it is not really used yet,
65
+ # we will enable it a bit later after we moved everything to backend_config_dict
66
+ is_dynamic=True,
67
+ )
68
+
69
+ default_dynamic_float16_dtype_config = DTypeConfig(
70
+ input_dtype=torch.float16,
71
+ output_dtype=torch.float,
72
+ weight_dtype=torch.float16,
73
+ bias_dtype=torch.float,
74
+ # currently the dtype check is not yet enabled, so we provided the dtype_configs but
75
+ # it is not really used yet,
76
+ # we will enable it a bit later after we moved everything to backend_config_dict
77
+ is_dynamic=True,
78
+ )
79
+
80
+ # Needed for LayerNorm and f.layer_norm, since currently the kernel only supports float weights
81
+ input_output_only_quint8_dtype_config = DTypeConfig(
82
+ input_dtype=torch.quint8,
83
+ output_dtype=torch.quint8,
84
+ weight_dtype=torch.float,
85
+ bias_dtype=torch.float,
86
+ )
87
+
88
+ weight_only_quint8_dtype_config = DTypeConfig(
89
+ input_dtype=torch.float,
90
+ output_dtype=torch.float,
91
+ weight_dtype=torch.quint8,
92
+ )
93
+
94
+ weight_only_quint4x2_dtype_config = DTypeConfig(
95
+ input_dtype=torch.float,
96
+ output_dtype=torch.float,
97
+ weight_dtype=torch.quint4x2,
98
+ )
99
+
100
+
101
+ # =====================
102
+ # | BACKEND CONFIGS |
103
+ # =====================
104
+
105
+ def get_test_only_legacy_native_backend_config() -> BackendConfig:
106
+ """
107
+ Return the `BackendConfig` for PyTorch Native backend (fbgemm/qnnpack) with various additional fp16 ops.
108
+ """
109
+ conv_dtype_configs = [weighted_op_quint8_dtype_config]
110
+ linear_dtype_configs = [
111
+ weighted_op_quint8_dtype_config,
112
+ default_dynamic_int8_dtype_config,
113
+ default_dynamic_float16_dtype_config,
114
+ default_op_fp16_dtype_config,
115
+ ]
116
+ binary_op_dtype_configs = [
117
+ default_op_quint8_dtype_config,
118
+ default_op_fp16_dtype_config,
119
+ ]
120
+ default_op_dtype_configs = [default_op_quint8_dtype_config]
121
+ fixed_qparams_op_dtype_configs = [
122
+ default_op_quint8_dtype_config,
123
+ default_op_fp16_dtype_config,
124
+ ]
125
+ share_qparams_op_dtype_configs = [
126
+ default_op_quint8_dtype_config,
127
+ default_op_fp16_dtype_config
128
+ ]
129
+ tensor_info_op_dtype_configs = [
130
+ default_op_quint8_dtype_config,
131
+ ]
132
+ rnn_op_dtype_configs = [
133
+ default_dynamic_int8_dtype_config,
134
+ default_dynamic_float16_dtype_config,
135
+ ]
136
+ embedding_op_dtype_configs = [
137
+ weight_only_quint8_dtype_config,
138
+ weight_only_quint4x2_dtype_config,
139
+ ]
140
+ layer_norm_op_dtype_configs = [input_output_only_quint8_dtype_config]
141
+ return BackendConfig("_native_and_fp16") \
142
+ .set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \
143
+ .set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \
144
+ .set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
145
+ .set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \
146
+ .set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \
147
+ .set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \
148
+ .set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
149
+ .set_backend_pattern_configs(_get_tensor_info_op_configs(tensor_info_op_dtype_configs)) \
150
+ .set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \
151
+ .set_backend_pattern_configs(_get_ln_configs(layer_norm_op_dtype_configs)) \
152
+ .set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \
153
+ .set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs))
154
+
155
+ def get_native_backend_config() -> BackendConfig:
156
+ """
157
+ Return the `BackendConfig` for PyTorch Native backend (fbgemm/qnnpack).
158
+ """
159
+ # TODO: express this BackendConfig as a union of the FBGEMM and QNNPACK BackendConfigs
160
+ conv_dtype_configs = [weighted_op_quint8_dtype_config]
161
+ linear_dtype_configs = [
162
+ weighted_op_quint8_dtype_config,
163
+ default_dynamic_int8_dtype_config,
164
+ default_dynamic_float16_dtype_config,
165
+ ]
166
+ binary_op_dtype_configs = [default_op_quint8_dtype_config]
167
+ default_op_dtype_configs = [default_op_quint8_dtype_config]
168
+ fixed_qparams_op_dtype_configs = [default_op_quint8_dtype_config]
169
+ share_qparams_op_dtype_configs = [default_op_quint8_dtype_config]
170
+ tensor_info_op_dtype_configs = [default_op_quint8_dtype_config]
171
+ rnn_op_dtype_configs = [
172
+ default_dynamic_int8_dtype_config,
173
+ default_dynamic_float16_dtype_config,
174
+ ]
175
+ embedding_op_dtype_configs = [
176
+ weight_only_quint8_dtype_config,
177
+ weight_only_quint4x2_dtype_config,
178
+ ]
179
+ layer_norm_op_dtype_configs = [input_output_only_quint8_dtype_config]
180
+ return BackendConfig("native") \
181
+ .set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \
182
+ .set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \
183
+ .set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
184
+ .set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \
185
+ .set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \
186
+ .set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \
187
+ .set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
188
+ .set_backend_pattern_configs(_get_tensor_info_op_configs(tensor_info_op_dtype_configs)) \
189
+ .set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \
190
+ .set_backend_pattern_configs(_get_ln_configs(layer_norm_op_dtype_configs)) \
191
+ .set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \
192
+ .set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs))
193
+
194
+ def get_native_backend_config_dict():
195
+ """
196
+ Return the `BackendConfig` for PyTorch Native backend (fbgemm/qnnpack) in dictionary form.
197
+ """
198
+ return get_native_backend_config().to_dict()
199
+
200
+ def get_test_only_legacy_native_backend_config_dict():
201
+ """
202
+ Return the `BackendConfig` for PyTorch Native backend (fbgemm/qnnpack) with various additional
203
+ fp16 ops in dictionary form.
204
+ """
205
+ return get_test_only_legacy_native_backend_config().to_dict()
parrot/lib/python3.10/site-packages/torch/ao/quantization/backend_config/qnnpack.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from ._common_operator_config_utils import (
3
+ _get_binary_op_configs,
4
+ _get_bn_configs,
5
+ _get_cat_config,
6
+ _get_conv_configs,
7
+ _get_default_op_configs,
8
+ _get_embedding_op_configs,
9
+ _get_fixed_qparams_op_configs,
10
+ _get_linear_configs,
11
+ _get_rnn_op_configs,
12
+ _get_share_qparams_op_configs,
13
+ )
14
+ from .backend_config import BackendConfig, DTypeConfig, DTypeWithConstraints
15
+
16
+ __all__ = [
17
+ "get_qnnpack_backend_config",
18
+ ]
19
+
20
+ # ===================
21
+ # | DTYPE CONFIGS |
22
+ # ===================
23
+
24
+ qnnpack_weighted_op_quint8_dtype_config = DTypeConfig(
25
+ input_dtype=torch.quint8,
26
+ output_dtype=torch.quint8,
27
+ weight_dtype=torch.qint8,
28
+ bias_dtype=torch.float,
29
+ )
30
+
31
+ qnnpack_default_op_quint8_dtype_config = DTypeConfig(
32
+ input_dtype=torch.quint8,
33
+ output_dtype=torch.quint8,
34
+ )
35
+
36
+ qnnpack_default_op_fp16_dtype_config = DTypeConfig(
37
+ input_dtype=torch.float16,
38
+ output_dtype=torch.float16,
39
+ weight_dtype=torch.float16,
40
+ bias_dtype=torch.float16,
41
+ )
42
+
43
+ qnnpack_default_dynamic_int8_dtype_config = DTypeConfig(
44
+ input_dtype=torch.quint8,
45
+ output_dtype=torch.float,
46
+ weight_dtype=torch.qint8,
47
+ bias_dtype=torch.float,
48
+ is_dynamic=True,
49
+ )
50
+
51
+ qnnpack_default_dynamic_float16_dtype_config = DTypeConfig(
52
+ input_dtype=torch.float16,
53
+ output_dtype=torch.float,
54
+ weight_dtype=torch.float16,
55
+ bias_dtype=torch.float,
56
+ is_dynamic=True,
57
+ )
58
+
59
+ qnnpack_weight_only_quint8_dtype_config = DTypeConfig(
60
+ input_dtype=torch.float,
61
+ output_dtype=torch.float,
62
+ weight_dtype=torch.quint8,
63
+ )
64
+
65
+ qnnpack_weight_only_quint4x2_dtype_config = DTypeConfig(
66
+ input_dtype=torch.float,
67
+ output_dtype=torch.float,
68
+ weight_dtype=torch.quint4x2,
69
+ )
70
+
71
+ # xnnpack compatible dtype configs
72
+
73
+ # We restrict scale values to be 2 ** -12 to ensure the
74
+ # requantization scale never falls below the xnnpack lower
75
+ # threshold. Additionally, for qint8 weight, we restrict
76
+ # the quantization values to [-127, +127], excluding -128.
77
+ # For more detail, refer to the description of
78
+ # `default_symmetric_qnnpack_qconfig`.
79
+
80
+ # TODO: add additional restriction on qscheme to ensure it
81
+ # is either per_tensor_symmetric or per_channel_symmetric
82
+
83
+ qnnpack_act_qint8_scale_min_2_neg_12 = DTypeWithConstraints(
84
+ dtype=torch.qint8,
85
+ scale_min_lower_bound=2 ** -12,
86
+ )
87
+
88
+ qnnpack_weight_qint8_neg_127_to_127_scale_min_2_neg_12 = DTypeWithConstraints(
89
+ dtype=torch.qint8,
90
+ quant_min_lower_bound=-127,
91
+ quant_max_upper_bound=127,
92
+ scale_min_lower_bound=2 ** -12,
93
+ )
94
+
95
+ qnnpack_weighted_op_qint8_symmetric_dtype_config = DTypeConfig(
96
+ input_dtype=qnnpack_act_qint8_scale_min_2_neg_12,
97
+ output_dtype=qnnpack_act_qint8_scale_min_2_neg_12,
98
+ weight_dtype=qnnpack_weight_qint8_neg_127_to_127_scale_min_2_neg_12,
99
+ bias_dtype=torch.float,
100
+ )
101
+
102
+ qnnpack_default_op_qint8_symmetric_dtype_config = DTypeConfig(
103
+ input_dtype=qnnpack_act_qint8_scale_min_2_neg_12,
104
+ output_dtype=qnnpack_act_qint8_scale_min_2_neg_12,
105
+ )
106
+
107
+
108
+ # =====================
109
+ # | BACKEND CONFIGS |
110
+ # =====================
111
+
112
+ def get_qnnpack_backend_config() -> BackendConfig:
113
+ """
114
+ Return the `BackendConfig` for PyTorch's native QNNPACK backend.
115
+ """
116
+ conv_dtype_configs = [
117
+ qnnpack_weighted_op_qint8_symmetric_dtype_config,
118
+ qnnpack_weighted_op_quint8_dtype_config,
119
+ ]
120
+ linear_dtype_configs = [
121
+ qnnpack_weighted_op_qint8_symmetric_dtype_config,
122
+ qnnpack_weighted_op_quint8_dtype_config,
123
+ qnnpack_default_dynamic_int8_dtype_config,
124
+ qnnpack_default_dynamic_float16_dtype_config,
125
+ ]
126
+ binary_op_dtype_configs = [
127
+ qnnpack_default_op_qint8_symmetric_dtype_config,
128
+ qnnpack_default_op_quint8_dtype_config,
129
+ ]
130
+ default_op_dtype_configs = [
131
+ qnnpack_default_op_qint8_symmetric_dtype_config,
132
+ qnnpack_default_op_quint8_dtype_config,
133
+ ]
134
+ fixed_qparams_op_dtype_configs = [
135
+ qnnpack_default_op_qint8_symmetric_dtype_config,
136
+ qnnpack_default_op_quint8_dtype_config,
137
+ ]
138
+ share_qparams_op_dtype_configs = [
139
+ qnnpack_default_op_qint8_symmetric_dtype_config,
140
+ qnnpack_default_op_quint8_dtype_config,
141
+ ]
142
+ rnn_op_dtype_configs = [
143
+ qnnpack_default_dynamic_int8_dtype_config,
144
+ qnnpack_default_dynamic_float16_dtype_config,
145
+ ]
146
+ embedding_op_dtype_configs = [
147
+ qnnpack_weight_only_quint8_dtype_config,
148
+ qnnpack_weight_only_quint4x2_dtype_config,
149
+ ]
150
+ return BackendConfig("qnnpack") \
151
+ .set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \
152
+ .set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \
153
+ .set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
154
+ .set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \
155
+ .set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \
156
+ .set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \
157
+ .set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
158
+ .set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \
159
+ .set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \
160
+ .set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs))
parrot/lib/python3.10/site-packages/torch/ao/quantization/backend_config/tensorrt.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ from .backend_config import (
4
+ BackendConfig,
5
+ BackendPatternConfig,
6
+ DTypeConfig,
7
+ ObservationType
8
+ )
9
+ from ._common_operator_config_utils import (
10
+ _get_binary_op_configs,
11
+ _get_linear_configs,
12
+ _get_conv_configs,
13
+ _get_share_qparams_op_configs,
14
+ _get_tensor_info_op_configs,
15
+ )
16
+
17
+ __all__ = [
18
+ "get_tensorrt_backend_config",
19
+ "get_tensorrt_backend_config_dict",
20
+ ]
21
+
22
+ def get_tensorrt_backend_config() -> BackendConfig:
23
+ """
24
+ Return the `BackendConfig` for the TensorRT backend.
25
+ NOTE: Current api will change in the future, it's just to unblock experimentation for
26
+ new backends, please don't use it right now.
27
+ TODO: add a README when it's more stable
28
+ """
29
+ # dtype configs
30
+ weighted_op_qint8_dtype_config = DTypeConfig(
31
+ input_dtype=torch.qint8,
32
+ output_dtype=torch.qint8,
33
+ weight_dtype=torch.qint8,
34
+ bias_dtype=torch.float,
35
+ )
36
+ non_weighted_op_qint8_dtype_config = DTypeConfig(
37
+ input_dtype=torch.qint8,
38
+ output_dtype=torch.qint8,
39
+ )
40
+
41
+ addmm_config = BackendPatternConfig(torch.addmm) \
42
+ .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \
43
+ .add_dtype_config(weighted_op_qint8_dtype_config) \
44
+ ._set_input_type_to_index({
45
+ "bias": 0,
46
+ "input": 1,
47
+ "weight": 2,
48
+ })
49
+ cat_config = BackendPatternConfig(torch.cat) \
50
+ .set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT) \
51
+ .add_dtype_config(non_weighted_op_qint8_dtype_config)
52
+ conv_dtype_configs = [
53
+ weighted_op_qint8_dtype_config,
54
+ ]
55
+ linear_dtype_configs = [
56
+ weighted_op_qint8_dtype_config,
57
+ ]
58
+ binary_op_dtype_configs = [
59
+ weighted_op_qint8_dtype_config,
60
+ ]
61
+ share_qparams_op_dtype_configs = [
62
+ non_weighted_op_qint8_dtype_config,
63
+ ]
64
+ tensor_info_op_dtype_configs = [
65
+ non_weighted_op_qint8_dtype_config,
66
+ ]
67
+ # there might be things not supported in fx2trt, but it will error out
68
+ # during fx2trt conversion and can support them after that
69
+ return BackendConfig("tensorrt") \
70
+ .set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \
71
+ .set_backend_pattern_config(addmm_config) \
72
+ .set_backend_pattern_config(cat_config) \
73
+ .set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \
74
+ .set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
75
+ .set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
76
+ .set_backend_pattern_configs(_get_tensor_info_op_configs(tensor_info_op_dtype_configs))
77
+
78
+ def get_tensorrt_backend_config_dict():
79
+ """
80
+ Return the `BackendConfig` for the TensorRT backend in dictionary form.
81
+ """
82
+ return get_tensorrt_backend_config().to_dict()
parrot/lib/python3.10/site-packages/torch/ao/quantization/backend_config/utils.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from typing import Dict, Any, List, Callable, Union, Tuple, Type
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ from .backend_config import (
8
+ BackendConfig,
9
+ BackendPatternConfig,
10
+ DTypeConfig,
11
+ )
12
+ from ..utils import Pattern
13
+ from ..fuser_method_mappings import (
14
+ _reverse2,
15
+ _reverse3,
16
+ )
17
+
18
+ __all__ = [
19
+ "get_pattern_to_dtype_configs",
20
+ "get_qat_module_classes",
21
+ "get_fused_module_classes",
22
+ "get_pattern_to_input_type_to_index",
23
+ "get_root_module_to_quantized_reference_module",
24
+ "get_fuser_method_mapping",
25
+ "get_module_to_qat_module",
26
+ "get_fusion_pattern_to_root_node_getter",
27
+ "get_fusion_pattern_to_extra_inputs_getter",
28
+ "remove_boolean_dispatch_from_name",
29
+ "pattern_to_human_readable",
30
+ "entry_to_pretty_str",
31
+ ]
32
+
33
+ def get_pattern_to_dtype_configs(backend_config: BackendConfig) -> Dict[Pattern, List[DTypeConfig]]:
34
+ pattern_to_dtype_configs: Dict[Pattern, List[DTypeConfig]] = {}
35
+ for pattern, config in backend_config._pattern_complex_format_to_config.items():
36
+ pattern_to_dtype_configs[pattern] = config.dtype_configs
37
+ return pattern_to_dtype_configs
38
+
39
+ def get_qat_module_classes(backend_config: BackendConfig) -> Tuple[type, ...]:
40
+ qat_module_classes = []
41
+ for config in backend_config.configs:
42
+ if config.qat_module is not None:
43
+ qat_module_classes.append(config.qat_module)
44
+ return tuple(set(qat_module_classes))
45
+
46
+ def get_fused_module_classes(backend_config: BackendConfig) -> Tuple[type, ...]:
47
+ fused_module_classes = []
48
+ for config in backend_config.configs:
49
+ if config.fused_module is not None:
50
+ fused_module_classes.append(config.fused_module)
51
+ return tuple(set(fused_module_classes))
52
+
53
+ def get_pattern_to_input_type_to_index(backend_config: BackendConfig) -> Dict[Pattern, Dict[str, int]]:
54
+ pattern_to_input_type_to_index: Dict[Pattern, Dict[str, int]] = {}
55
+ for pattern, config in backend_config._pattern_complex_format_to_config.items():
56
+ pattern_to_input_type_to_index[pattern] = config._input_type_to_index
57
+ return pattern_to_input_type_to_index
58
+
59
+ def get_root_module_to_quantized_reference_module(
60
+ backend_config: BackendConfig) -> Dict[Type[torch.nn.Module], Type[torch.nn.Module]]:
61
+ mapping: Dict[Type[torch.nn.Module], Type[torch.nn.Module]] = {}
62
+ for config in backend_config.configs:
63
+ if config.root_module is not None and config.reference_quantized_module is not None:
64
+ mapping[config.root_module] = config.reference_quantized_module
65
+ return mapping
66
+
67
+ def get_fuser_method_mapping(backend_config: BackendConfig) -> Dict[Pattern, Union[nn.Sequential, Callable]]:
68
+ fuser_method_mapping : Dict[Pattern, Union[nn.Sequential, Callable]] = {}
69
+ for pattern, config in backend_config._pattern_complex_format_to_config.items():
70
+ if config.fuser_method is not None:
71
+ # Note: both the fuser method and the pattern are specified in forward order in the
72
+ # BackendConfig, but the internal pattern matching code uses the reversed nested tuple
73
+ # format, so we need to convert both to the internal format
74
+ fuser_method = _get_fuser_method_in_reversed_nested_tuple_format(config)
75
+ fuser_method_mapping[pattern] = fuser_method
76
+ return fuser_method_mapping
77
+
78
+ def get_module_to_qat_module(backend_config: BackendConfig) -> Dict[Pattern, Type[torch.nn.Module]]:
79
+ module_to_qat_module: Dict[Pattern, Type[torch.nn.Module]] = {}
80
+ for pattern, config in backend_config._pattern_complex_format_to_config.items():
81
+ if config.qat_module is not None:
82
+ module_to_qat_module[pattern] = config.qat_module
83
+ return module_to_qat_module
84
+
85
+ def get_fusion_pattern_to_root_node_getter(backend_config: BackendConfig) -> Dict[Pattern, Callable]:
86
+ """ Get a map from fusion pattern to a function that returns the root node
87
+ from the fusion pattern, e.g. the most common one is:
88
+ def get_root_node(node_pattern):
89
+ while not isinstance(node_pattern[-1], Node):
90
+ node_pattern = node_pattern[-1]
91
+ return node_pattern[-1]
92
+ This can work for all patterns whose root node is the "last node" in the pattern,
93
+ e.g. (torch.add, MatchAllNode, (torch.ReLU, torch.Conv2d))
94
+ """
95
+ root_node_getter_mapping: Dict[Pattern, Callable] = {}
96
+ for pattern, config in backend_config._pattern_complex_format_to_config.items():
97
+ if config._root_node_getter is not None:
98
+ root_node_getter_mapping[pattern] = config._root_node_getter
99
+ return root_node_getter_mapping
100
+
101
+ def get_fusion_pattern_to_extra_inputs_getter(backend_config: BackendConfig) -> Dict[Pattern, Callable]:
102
+ """ Get a map from fusion pattern to a function that returns extra input nodes
103
+ from the fusion pattern, in the order required by the root node. This is optional,
104
+ if not specified, we will not copy over any extra inputs for the root node.
105
+ Example:
106
+ # Let's say we have the pattern (torch.add, MatchAllNode, (torch.nn.BatchNorm2d, torch.nn.Conv2d))
107
+ # and root node is torch.nn.Conv2d, and the node in MatchAllNode would be an extra
108
+ # argument to the fused module, we can unpack the pattern and return the node at
109
+ # MatchAllNode here
110
+ # we can implement extra_inputs_getter as follows:
111
+ def extra_inputs_getter(pattern) -> List[Any]:
112
+ add, extra_input, conv_pattern = pattern
113
+ return [extra_input]
114
+ """
115
+ extra_inputs_getter_mapping: Dict[Pattern, Callable] = {}
116
+ for pattern, config in backend_config._pattern_complex_format_to_config.items():
117
+ if config._extra_inputs_getter is not None:
118
+ extra_inputs_getter_mapping[pattern] = config._extra_inputs_getter
119
+ return extra_inputs_getter_mapping
120
+
121
+ def remove_boolean_dispatch_from_name(p) -> Any:
122
+ """
123
+ Some ops have a default string representation such as
124
+ '<function boolean_dispatch.<locals>.fn at 0x7ff1106bf280>',
125
+ this function replaces them with the hardcoded function names.
126
+ """
127
+ if p is F.fractional_max_pool2d:
128
+ return "torch.nn.functional.fractional_max_pool2d"
129
+ elif p is F.fractional_max_pool3d:
130
+ return "torch.nn.functional.fractional_max_pool3d"
131
+ elif p is F.max_pool1d:
132
+ return "torch.nn.functional.max_pool1d"
133
+ elif p is F.max_pool2d:
134
+ return "torch.nn.functional.max_pool2d"
135
+ elif p is F.max_pool3d:
136
+ return "torch.nn.functional.max_pool3d"
137
+ elif p is F.adaptive_max_pool1d:
138
+ return "torch.nn.functional.adaptive_max_pool1d"
139
+ elif p is F.adaptive_max_pool2d:
140
+ return "torch.nn.functional.adaptive_max_pool2d"
141
+ elif p is F.adaptive_max_pool3d:
142
+ return "torch.nn.functional.adaptive_max_pool3d"
143
+ assert "boolean_dispatch" not in str(p), \
144
+ f"{p} does not have a human readable representation in " + \
145
+ "quantization documentation"
146
+ return p
147
+
148
+ def pattern_to_human_readable(p) -> Any:
149
+ if isinstance(p, tuple):
150
+ # nested patterns, recurse
151
+ return tuple(pattern_to_human_readable(inner_p) for inner_p in p)
152
+ elif isinstance(p, str):
153
+ # method names are already human readable
154
+ return p
155
+ else:
156
+ p = remove_boolean_dispatch_from_name(p)
157
+ return p
158
+
159
+ # TODO(future PR): move backend_config_dict to use dataclass and move this logic to
160
+ # the corresponding __str__ function
161
+ def entry_to_pretty_str(entry) -> str:
162
+ """
163
+ Given a backend_config_dict entry, returns a string with the human readable
164
+ representation of it.
165
+ """
166
+ s = "{\n"
167
+
168
+ # always output the pattern first
169
+ if "pattern" in entry:
170
+ pattern_str = pattern_to_human_readable(entry["pattern"])
171
+
172
+ s += f" 'pattern': {pattern_str},\n"
173
+
174
+ # custom output for dtype_configs to make it look nice
175
+ if "dtype_configs" in entry:
176
+ s += " 'dtype_configs': [\n"
177
+ for dtype_config in entry["dtype_configs"]:
178
+ s += " {\n"
179
+ for k, v in dtype_config.items():
180
+ s += f" '{k}': {v},\n"
181
+ s += " },\n"
182
+ s += " ],\n"
183
+
184
+ # custom output for num_tensor_args_to_observation_type to make it look nice
185
+ if "num_tensor_args_to_observation_type" in entry:
186
+ s += " 'num_tensor_args_to_observation_type': {\n"
187
+ for k, v in entry["num_tensor_args_to_observation_type"].items():
188
+ s += f" {k}: {v},\n"
189
+ s += " },\n"
190
+
191
+ # output all the other fields
192
+ custom_handled_fields = [
193
+ "pattern",
194
+ "dtype_configs",
195
+ "num_tensor_args_to_observation_type",
196
+ ]
197
+ for field_name in entry:
198
+ if field_name in custom_handled_fields:
199
+ continue
200
+ s += f" '{field_name}': {entry[field_name]},\n"
201
+
202
+ s += "}"
203
+ return s
204
+
205
+ def _get_pattern_in_reversed_nested_tuple_format(config: BackendPatternConfig) -> Pattern:
206
+ """
207
+ Return the pattern specified in the given config in the reversed nested tuple format
208
+ used internally in the quantization pattern matching code.
209
+
210
+ If the pattern is not a tuple, or the pattern is already specified in the reversed
211
+ nested tuple format, return the pattern as is. Otherwise:
212
+
213
+ For 2-tuples (a, b), return (b, a).
214
+ For 3-tuples (a, b, c), return (c, (b, a)).
215
+
216
+ For example:
217
+ * Given nn.Linear, return nn.Linear
218
+ * Given (nn.Linear, nn.ReLU), return (nn.ReLU, nn.Linear)
219
+ * Given (nn.Conv2d, nn.BatchNorm2d, nn.ReLU), return
220
+ (nn.ReLU, (nn.BatchNorm2d, nn.Conv2d))
221
+
222
+ For context, the reason why this is needed is the user-facing BackendConfig
223
+ API accepts the flat 2-or-3-tuple format in forward order. While this simple
224
+ format handles the vast majority of use cases, it does not handle the more
225
+ complex ones, and so the internal pattern matching code for quantization uses
226
+ the following, more general reversed nested tuple format instead:
227
+
228
+ operator = module_type | functional | torch op | native op | MatchAllNode
229
+ Pattern = (operator, Pattern, Pattern, ...) | operator
230
+
231
+ In the future, we expect to replace the above complex format with the one used
232
+ by the subgraph rewriter in torch.fx, so we don't have to maintain our own
233
+ complex pattern matching code. Then we won't need this helper function anymore.
234
+ """
235
+ if config._pattern_complex_format is not None:
236
+ return config._pattern_complex_format
237
+ if config.pattern is None:
238
+ raise ValueError("Either 'pattern' or 'pattern_complex_format' must be specified")
239
+ if not isinstance(config.pattern, tuple):
240
+ return config.pattern
241
+
242
+ # Pattern is specified in the simple tuple format, need to convert
243
+ if len(config.pattern) == 2:
244
+ (a, b) = config.pattern
245
+ return (b, a)
246
+ elif len(config.pattern) == 3:
247
+ (a, b, c) = config.pattern
248
+ return (c, (b, a))
249
+ else:
250
+ raise ValueError("Expected a tuple with 2 or 3 elements, got: ", config.pattern)
251
+
252
+ def _get_fuser_method_in_reversed_nested_tuple_format(config: BackendPatternConfig) -> Callable:
253
+ """
254
+ Return the fuser method specified in the given config in the reversed nested
255
+ tuple format used internally in the quantization pattern matching code.
256
+
257
+ If pattern is specified in the reversed nested tuple format, we assume the
258
+ fuser method is also specified in this format and simply return it as is.
259
+ Otherwise, we convert the fuser method as follows:
260
+
261
+ * Given f(is_qat, conv, relu), return f'(is_qat, relu, conv)
262
+ * Given f(is_qat, conv, bn, relu), return f'(is_qat, relu, bn_conv),
263
+ where bn_conv is a 2-tuple (bn, conv)
264
+
265
+ The first argument of a fuser method is always `is_qat` and is not affected
266
+ in the conversion. We currently only support functions with 3 or 4 arguments.
267
+ """
268
+ assert config.fuser_method is not None
269
+ if config._pattern_complex_format is not None:
270
+ return config.fuser_method
271
+ if not isinstance(config.pattern, tuple):
272
+ raise ValueError("Expected pattern to be a tuple, got: ", config.pattern)
273
+
274
+ # Pattern is specified in the simple tuple format, need to convert
275
+ if len(config.pattern) == 2:
276
+ return _reverse2(config.fuser_method)
277
+ elif len(config.pattern) == 3:
278
+ return _reverse3(config.fuser_method)
279
+ else:
280
+ raise ValueError("Expected a tuple with 2 or 3 elements, got: ", config.pattern)
parrot/lib/python3.10/site-packages/torch/ao/quantization/backend_config/x86.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from ._common_operator_config_utils import (
3
+ _get_binary_op_configs,
4
+ _get_bn_configs,
5
+ _get_cat_config,
6
+ _get_conv_configs,
7
+ _get_default_op_configs,
8
+ _get_embedding_op_configs,
9
+ _get_fixed_qparams_op_configs,
10
+ _get_linear_configs,
11
+ _get_rnn_op_configs,
12
+ _get_share_qparams_op_configs,
13
+ _get_tensor_info_op_configs,
14
+ )
15
+ from .backend_config import BackendConfig, DTypeConfig
16
+
17
+ __all__ = [
18
+ "get_x86_backend_config",
19
+ ]
20
+
21
+ # ===================
22
+ # | DTYPE CONFIGS |
23
+ # ===================
24
+
25
+ # X86 aligns with FBGEMM for now
26
+
27
+ x86_weighted_op_int8_dtype_config = DTypeConfig(
28
+ input_dtype=torch.quint8,
29
+ output_dtype=torch.quint8,
30
+ weight_dtype=torch.qint8,
31
+ bias_dtype=torch.float,
32
+ )
33
+
34
+ x86_default_op_quint8_dtype_config = DTypeConfig(
35
+ input_dtype=torch.quint8,
36
+ output_dtype=torch.quint8,
37
+ )
38
+
39
+ x86_default_op_fp16_dtype_config = DTypeConfig(
40
+ input_dtype=torch.float16,
41
+ output_dtype=torch.float16,
42
+ weight_dtype=torch.float16,
43
+ bias_dtype=torch.float16,
44
+ )
45
+
46
+ x86_default_dynamic_int8_dtype_config = DTypeConfig(
47
+ input_dtype=torch.quint8,
48
+ output_dtype=torch.float,
49
+ weight_dtype=torch.qint8,
50
+ bias_dtype=torch.float,
51
+ is_dynamic=True,
52
+ )
53
+
54
+ x86_default_dynamic_float16_dtype_config = DTypeConfig(
55
+ input_dtype=torch.float16,
56
+ output_dtype=torch.float,
57
+ weight_dtype=torch.float16,
58
+ bias_dtype=torch.float,
59
+ is_dynamic=True,
60
+ )
61
+
62
+ x86_weight_only_quint8_dtype_config = DTypeConfig(
63
+ input_dtype=torch.float,
64
+ output_dtype=torch.float,
65
+ weight_dtype=torch.quint8,
66
+ )
67
+
68
+ x86_weight_only_quint4x2_dtype_config = DTypeConfig(
69
+ input_dtype=torch.float,
70
+ output_dtype=torch.float,
71
+ weight_dtype=torch.quint4x2,
72
+ )
73
+
74
+
75
+ # =====================
76
+ # | BACKEND CONFIGS |
77
+ # =====================
78
+
79
+ def get_x86_backend_config() -> BackendConfig:
80
+ """
81
+ Return the `BackendConfig` for PyTorch's native x86 backend.
82
+ """
83
+ conv_dtype_configs = [x86_weighted_op_int8_dtype_config]
84
+ linear_dtype_configs = [
85
+ x86_weighted_op_int8_dtype_config,
86
+ x86_default_dynamic_int8_dtype_config,
87
+ x86_default_dynamic_float16_dtype_config,
88
+ ]
89
+ binary_op_dtype_configs = [x86_weighted_op_int8_dtype_config]
90
+ default_op_dtype_configs = [x86_default_op_quint8_dtype_config]
91
+ fixed_qparams_op_dtype_configs = [x86_weighted_op_int8_dtype_config]
92
+ share_qparams_op_dtype_configs = [x86_default_op_quint8_dtype_config]
93
+ tensor_info_op_dtype_configs = [x86_default_op_quint8_dtype_config]
94
+ rnn_op_dtype_configs = [
95
+ x86_default_dynamic_int8_dtype_config,
96
+ x86_default_dynamic_float16_dtype_config,
97
+ ]
98
+ embedding_op_dtype_configs = [
99
+ x86_weight_only_quint8_dtype_config,
100
+ x86_weight_only_quint4x2_dtype_config,
101
+ ]
102
+ return BackendConfig("x86") \
103
+ .set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \
104
+ .set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \
105
+ .set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
106
+ .set_backend_pattern_config(_get_cat_config(default_op_dtype_configs)) \
107
+ .set_backend_pattern_configs(_get_default_op_configs(default_op_dtype_configs)) \
108
+ .set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \
109
+ .set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
110
+ .set_backend_pattern_configs(_get_tensor_info_op_configs(tensor_info_op_dtype_configs)) \
111
+ .set_backend_pattern_configs(_get_bn_configs(default_op_dtype_configs)) \
112
+ .set_backend_pattern_configs(_get_rnn_op_configs(rnn_op_dtype_configs)) \
113
+ .set_backend_pattern_configs(_get_embedding_op_configs(embedding_op_dtype_configs))
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (263 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_decomposed.cpython-310.pyc ADDED
Binary file (29.2 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_equalize.cpython-310.pyc ADDED
Binary file (25 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/_lower_to_native_backend.cpython-310.pyc ADDED
Binary file (26.9 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/convert.cpython-310.pyc ADDED
Binary file (25.8 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/custom_config.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/fuse.cpython-310.pyc ADDED
Binary file (4.07 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/fuse_handler.cpython-310.pyc ADDED
Binary file (4.27 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/graph_module.cpython-310.pyc ADDED
Binary file (5.39 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/lower_to_fbgemm.cpython-310.pyc ADDED
Binary file (740 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/lower_to_qnnpack.cpython-310.pyc ADDED
Binary file (745 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/lstm_utils.cpython-310.pyc ADDED
Binary file (5.97 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/match_utils.cpython-310.pyc ADDED
Binary file (4.99 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/pattern_utils.cpython-310.pyc ADDED
Binary file (3.1 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/prepare.cpython-310.pyc ADDED
Binary file (38 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/qconfig_mapping_utils.cpython-310.pyc ADDED
Binary file (8.7 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/quantize_handler.cpython-310.pyc ADDED
Binary file (7.05 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/tracer.cpython-310.pyc ADDED
Binary file (1.81 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/__pycache__/utils.cpython-310.pyc ADDED
Binary file (27 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__init__.py ADDED
File without changes
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (190 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/detector.cpython-310.pyc ADDED
Binary file (49.1 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/model_report.cpython-310.pyc ADDED
Binary file (20.6 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/model_report_observer.cpython-310.pyc ADDED
Binary file (7.68 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/__pycache__/model_report_visualizer.cpython-310.pyc ADDED
Binary file (23.1 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/detector.py ADDED
@@ -0,0 +1,1539 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from typing import Any, Dict, Set, Tuple, Callable, List
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.ao.nn.qat as nnqat
7
+ from abc import ABC, abstractmethod
8
+ from torch.ao.quantization.fake_quantize import FakeQuantize
9
+ from torch.ao.quantization.fx.graph_module import GraphModule
10
+ from torch.ao.quantization.fx._model_report.model_report_observer import ModelReportObserver
11
+ from torch.ao.quantization.qconfig import (
12
+ QConfig,
13
+ default_qconfig,
14
+ _assert_valid_qconfig,
15
+ )
16
+ from torch.ao.quantization.observer import (
17
+ ObserverBase,
18
+ default_dynamic_quant_observer,
19
+ default_per_channel_weight_observer,
20
+ default_observer,
21
+ default_weight_observer,
22
+ )
23
+ from torch.ao.quantization.fx._equalize import (
24
+ default_equalization_qconfig,
25
+ EqualizationQConfig,
26
+ )
27
+ from torch.ao.quantization.observer import _is_activation_post_process
28
+
29
+ # Names for observer insert keys
30
+ DETECTOR_TARGET_NODE_KEY = "target_node"
31
+ DETECTOR_OBS_TO_INSERT_KEY = "observer_to_insert"
32
+ DETECTOR_IS_POST_OBS_KEY = "is_post_observer"
33
+ DETECTOR_OBS_ARGS_KEY = "observer_args"
34
+
35
+ # Mapping related code
36
+ class DetectorQConfigInfo:
37
+ r"""
38
+ This class contains the QConfig information for a single module.
39
+ The list of variables / values this contains can grow depending on the
40
+ extensibility of the qconfig mapping feature set but this currently includes:
41
+ - if activation observer is dynamic
42
+ - if weight observer is per channel
43
+
44
+
45
+ Args:
46
+ module_fqn (str): The fully qualified name (fqn) of the module that this
47
+ information contains info relevant to qconfig for
48
+ """
49
+
50
+ def __init__(self, module_fqn: str):
51
+ super().__init__()
52
+ self.module_fqn = module_fqn
53
+
54
+ # populate this section with all the variables we might find important
55
+ # change from none if your detector is actually using this
56
+ self.is_activation_dynamic = False
57
+ self.is_weight_per_channel = False
58
+
59
+ # equalization related options
60
+ self.is_equalization_recommended = False
61
+
62
+ def generate_quantization_qconfig(self, module: torch.nn.Module) -> QConfig:
63
+ r"""
64
+ Args:
65
+ module (torch.nn.Module) The module we are generating
66
+ the qconfig for
67
+
68
+ Returns the generated quantization QConfig according to what a valid configuration is
69
+ """
70
+ # Apply suggestions to new qconfig
71
+ module_qconfig = default_qconfig
72
+
73
+ # keep track of dynamic and per_channel recommendations
74
+ recommendations_list = []
75
+ # append as if a list of combinations
76
+ recommendations_list.append((self.is_activation_dynamic, self.is_weight_per_channel))
77
+ recommendations_list.append((self.is_activation_dynamic, False)) # only trying dynamic rec
78
+ recommendations_list.append((False, self.is_weight_per_channel)) # only trying dynamic
79
+
80
+ # now we try each of the combinations
81
+ for rec in recommendations_list:
82
+ # rec[0] -> dynamic recommended
83
+ # rec[1] -> per channel recommended
84
+ activation = default_dynamic_quant_observer if rec[0] else default_observer
85
+ weight = default_per_channel_weight_observer if rec[1] else default_weight_observer
86
+ test_config = QConfig(activation, weight)
87
+ try:
88
+ _assert_valid_qconfig(test_config, module)
89
+ module_qconfig = test_config
90
+ break
91
+ except AssertionError:
92
+ # if not a valid configuration, we move on to the next one in priority
93
+ continue
94
+
95
+ # return the QConfig chosen
96
+ return module_qconfig
97
+
98
+ def generate_equalization_qconfig(self) -> EqualizationQConfig:
99
+ r"""
100
+ This returns the equalization configuration for a module.
101
+
102
+ For now, it just returns the default, but as more equalization options become
103
+ possible, this method can get more fleshed out with more nuanced granularity.
104
+
105
+
106
+ Returns the generated equalization QConfig according to what a valid configuration is
107
+ """
108
+ # in this case, we just return default equalization config
109
+ # we know this is valid because only valid modules would even
110
+ # have this option
111
+ return default_equalization_qconfig
112
+
113
+ # Adding base class for detectors
114
+ class DetectorBase(ABC):
115
+ r""" Base Detector Module
116
+ Any detector class should derive from this class.
117
+
118
+ Concrete detectors should follow the same general API, which includes:
119
+ - A method to calculate and return observer insertion points
120
+ - Should return both the fqns and the Observer class to insert
121
+ - A method to return a report based on the detector
122
+ - Should return a str-based report and dict info in Tuple[str,Dict] format
123
+ """
124
+
125
+ def __init__(self):
126
+ super().__init__()
127
+ self.detector_config_info = None
128
+
129
+ @abstractmethod
130
+ def determine_observer_insert_points(self, model) -> Dict:
131
+ r"""
132
+ Args
133
+ model (nn.Module or subclass): model to find observer insertion points
134
+
135
+ Returns a Dict mapping from unique observer fqns (where we want to insert them) to a Dict.
136
+ This dict maps string keys to detector specific information
137
+ """
138
+ pass
139
+
140
+ @abstractmethod
141
+ def get_detector_name(self) -> str:
142
+ r""" Returns the name of the current detector """
143
+ pass
144
+
145
+
146
+ @abstractmethod
147
+ def get_qconfig_info(self, model) -> Dict[str, DetectorQConfigInfo]:
148
+ r""" Returns the DetectorQConfigInfo for each module_fqn relevant
149
+ Args
150
+ model (nn.Module or subclass): model to find observer insertion points
151
+
152
+ Returns a Dict mapping from unique observer fqns (where we want to insert them) to:
153
+ A DetectorQConfigInfo with the information to generate a QConfig for a specific module
154
+ """
155
+ pass
156
+
157
+ def _get_targeting_node(self, prepared_fx_model: GraphModule, target_fqn: str) -> torch.fx.node.Node:
158
+ r"""
159
+ Takes in a GraphModule and the target_fqn and finds the node whose target is this fqn.
160
+
161
+ If it's not found, it means it is most likely inside a fused layer
162
+ We just go one layer up in terms of the fqn we are searching for until we find parent node
163
+ If we get to empty string, then we know that it doesn't exist
164
+
165
+ The reason for the recursion is that if the model that we are looking for got fused,
166
+ we will have module fqn as e.g. x.linear.0 but the graph will only have a node for the fused module,
167
+ which would have fqn as x.linear so they will not match.
168
+ To handle this, if we don't match, we then take off the last bit of the fqn e.g. x.linear.0 -> x.linear,
169
+ or more generally foo.bar.baz -> foo.bar and search again, this will allow us to locate the correct module
170
+ even in cases with fusion
171
+
172
+ Args:
173
+ prepared_fx_model (GraphModule): The prepared Fx GraphModule
174
+ target_fqn (str): The fqn of the layer we are trying to target
175
+
176
+ Returns the node object we are trying to add observers around
177
+ """
178
+ for node in prepared_fx_model.graph.nodes:
179
+ # if the node's target is our target, return it
180
+ if node.target == target_fqn:
181
+ return node
182
+
183
+ # getting here means node not found
184
+ # if no "." we are already at base and failed
185
+ parent_fqn_sep_index = target_fqn.rfind(".")
186
+ if parent_fqn_sep_index == -1:
187
+ raise ValueError("passed in target_fqn not found in graph's targets.")
188
+ else:
189
+ # recursively call it with parent fqn
190
+ return self._get_targeting_node(prepared_fx_model, target_fqn[:parent_fqn_sep_index])
191
+
192
+ @abstractmethod
193
+ def generate_detector_report(self, model) -> Tuple[str, Dict[str, Any]]:
194
+ r"""
195
+ Args
196
+ model (nn.Module or subclass): model to find observer insertion points
197
+
198
+ Returns a Tuple of two elements:
199
+ Str: string report of the suggested improvements
200
+ Dict: contains useful data collected by the observer pertinent to this report
201
+ """
202
+ pass
203
+
204
+ class PerChannelDetector(DetectorBase):
205
+ r""" This class is used to detect if any Linear or Conv layers in a model utilize per_channel quantization.
206
+ Only Linear and Conv layers can use per_channel as of now so only these two are currently checked.
207
+
208
+ per_channel quantization can lead to major benefits in the form of accuracy.
209
+ Therefore, if the backend used by the user supports it, it is recommended to use
210
+
211
+ Args:
212
+ backend (str, optional): the backend the user wishes to use in production
213
+ Default value is current torch.backends.quantized.engine
214
+ """
215
+
216
+ # Keys for return dictionary
217
+ BACKEND_KEY = "backend"
218
+ PER_CHAN_SUPPORTED_KEY = "per_channel_quantization_supported"
219
+ PER_CHAN_USED_KEY = "per_channel_quantization_used"
220
+
221
+ # Default map for representing supported per channel quantization modules for different backends
222
+ DEFAULT_BACKEND_PER_CHANNEL_SUPPORTED_MODULES: Dict[str, Set[Any]] = {
223
+ "fbgemm": {nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nnqat.Linear, nnqat.Conv1d, nnqat.Conv2d, nnqat.Conv3d},
224
+ "qnnpack": {nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nnqat.Linear, nnqat.Conv1d, nnqat.Conv2d, nnqat.Conv3d},
225
+ "onednn": {nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nnqat.Linear, nnqat.Conv1d, nnqat.Conv2d, nnqat.Conv3d},
226
+ "x86": {nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nnqat.Linear, nnqat.Conv1d, nnqat.Conv2d, nnqat.Conv3d},
227
+ }
228
+
229
+ def __init__(self, backend: str = torch.backends.quantized.engine):
230
+ super().__init__()
231
+
232
+ # store the backend information
233
+ self.backend_chosen = backend
234
+ self.supported_modules = set()
235
+ if self.backend_chosen in self.DEFAULT_BACKEND_PER_CHANNEL_SUPPORTED_MODULES:
236
+ self.supported_modules = self.DEFAULT_BACKEND_PER_CHANNEL_SUPPORTED_MODULES[self.backend_chosen]
237
+ else:
238
+ raise ValueError(f"Not configured to work with {self.backend_chosen}. Try a different default backend")
239
+
240
+ def get_detector_name(self) -> str:
241
+ r""" returns the string name of this detector"""
242
+ return "per_channel_detector"
243
+
244
+ def get_qconfig_info(self, model) -> Dict[str, DetectorQConfigInfo]:
245
+ r""" Returns the DetectorQConfigInfo for each module_fqn relevant
246
+ Args
247
+ model (nn.Module or subclass): model to find observer insertion points
248
+
249
+ Returns a Dict mapping from unique observer fqns (where we want to insert them) to:
250
+ A DetectorQConfigInfo with the information to generate a QConfig for a specific module
251
+ """
252
+ # run the helper function to populate the dictionary
253
+ per_channel_info = self._detect_per_channel_helper(model)
254
+
255
+ # we actually have a qconfig info object we are populating
256
+ module_fqn_to_detector_qconfig_info = {}
257
+
258
+ for module_fqn in per_channel_info:
259
+ # create a detector info instance
260
+ detector_qconfig_info = DetectorQConfigInfo(module_fqn)
261
+
262
+ # see if per channel quantization is supported
263
+ per_chan_supported: bool = per_channel_info[module_fqn][self.PER_CHAN_SUPPORTED_KEY]
264
+ detector_qconfig_info.is_weight_per_channel = per_chan_supported
265
+ module_fqn_to_detector_qconfig_info[module_fqn] = detector_qconfig_info
266
+
267
+ return module_fqn_to_detector_qconfig_info
268
+
269
+ def determine_observer_insert_points(self, model: nn.Module) -> Dict:
270
+ r"""
271
+ There is no observers inserted for the PerChannelDetector.
272
+
273
+ Returns an empty dictionary since no observers are added or needed
274
+ """
275
+ return {}
276
+
277
+
278
+ def _detect_per_channel_helper(self, model: nn.Module):
279
+ r"""
280
+ determines if per_channel quantization is supported in modules and submodules.
281
+
282
+ Returns a dictionary in the higher level _detect_per_channel function.
283
+ Each entry maps the fully-qualified-name to information on whether per_channel quantization.
284
+
285
+ Args:
286
+ model: The current module that is being checked to see if it is per_channel quantizable
287
+
288
+ Returns dictionary mapping fqns to if per_channel quantization is possible
289
+ """
290
+ # create dict we will return
291
+ per_channel_info: Dict = {}
292
+
293
+ # get the fully qualified name and check if in list of modules to include and list of modules to ignore
294
+ for fqn, module in model.named_modules():
295
+
296
+ is_in_include_list = any(isinstance(module, x) for x in self.supported_modules)
297
+
298
+ # check if the module per_channel is supported
299
+ # based on backend
300
+ per_channel_supported = False
301
+
302
+ if is_in_include_list:
303
+ per_channel_supported = True
304
+
305
+ # assert statement for MyPy
306
+ q_config_file = module.qconfig
307
+ assert isinstance(q_config_file, QConfig)
308
+
309
+ # this object should either be fake quant or observer
310
+ q_or_s_obj = module.qconfig.weight.p.func()
311
+ assert isinstance(q_or_s_obj, (FakeQuantize, ObserverBase))
312
+
313
+ per_channel_used = False # will be true if found in qconfig
314
+
315
+ if hasattr(q_or_s_obj, "ch_axis"): # then we know that per_channel quantization used
316
+
317
+ # all fake quants have channel axis so need to check is_per_channel
318
+ if isinstance(q_or_s_obj, FakeQuantize):
319
+ if hasattr(q_or_s_obj, "is_per_channel") and q_or_s_obj.is_per_channel:
320
+ per_channel_used = True
321
+ elif isinstance(q_or_s_obj, ObserverBase):
322
+ # should be an observer otherwise
323
+ per_channel_used = True
324
+ else:
325
+ raise ValueError("Should be either observer or fake quant")
326
+
327
+ per_channel_info[fqn] = {
328
+ self.PER_CHAN_SUPPORTED_KEY: per_channel_supported,
329
+ self.PER_CHAN_USED_KEY: per_channel_used,
330
+ self.BACKEND_KEY: self.backend_chosen
331
+ }
332
+
333
+ return per_channel_info
334
+
335
+ def generate_detector_report(self, model: nn.Module) -> Tuple[str, Dict[str, Any]]:
336
+ r"""Checks if any Linear or Conv layers in the model utilize per_channel quantization.
337
+ Only Linear and Conv layers can use per_channel as of now so only these two are currently checked.
338
+
339
+ Looks at q_config format and backend to determine if per_channel can be utilized.
340
+ Uses the DEFAULT_BACKEND_PER_CHANNEL_SUPPORTED_MODULES structure to determine support
341
+
342
+ Args:
343
+ model: The prepared and calibrated model we want to check if using per_channel
344
+
345
+ Returns a tuple with two elements:
346
+ String report of potential actions to improve model (if per_channel quantization is available in backend)
347
+ Dictionary mapping per_channel quantizable elements to:
348
+ whether per_channel quantization is supported by the backend
349
+ if it is being utilized in the current model
350
+ """
351
+
352
+ # run the helper function to populate the dictionary
353
+ per_channel_info = self._detect_per_channel_helper(model)
354
+
355
+ # String to let the user know of further optimizations
356
+ further_optims_str = f"Further Optimizations for backend {self.backend_chosen}: \n"
357
+
358
+ optimizations_possible = False
359
+ for fqn in per_channel_info:
360
+ fqn_dict = per_channel_info[fqn]
361
+ if fqn_dict[self.PER_CHAN_SUPPORTED_KEY] and not fqn_dict[self.PER_CHAN_USED_KEY]:
362
+ optimizations_possible = True
363
+ further_optims_str += f"Module {fqn} can be configured to use per_channel quantization.\n"
364
+
365
+ if optimizations_possible:
366
+ further_optims_str += (
367
+ "To use per_channel quantization, make sure the qconfig has a per_channel weight observer."
368
+ )
369
+ else:
370
+ further_optims_str += "No further per_channel optimizations possible."
371
+
372
+ # return the string and the dictionary form of same information
373
+ return (further_optims_str, per_channel_info)
374
+
375
+
376
+ class DynamicStaticDetector(DetectorBase):
377
+ r"""
378
+ Determines whether dynamic or static quantization is more appropriate for a given module.
379
+
380
+ Takes advantage of the ModelReportObserver that records range information.
381
+ Stationary distribution of data are strictly above tolerance level for the comparison statistic:
382
+
383
+ S = average_batch_activation_range/epoch_activation_range
384
+
385
+ Nonstationary distributions are below or at the tolerance level for this metric.
386
+
387
+ If the distribution of data right after the module is non-stationary, recommend dynamic quantization
388
+ Otherwise recommend static quantization
389
+
390
+ Args:
391
+ tolerance (float, optional): The threshold where S metric is stationary above and non-stationary otherwise. Default: 0.5
392
+ """
393
+ # names for the pre and post observers that are inserted
394
+ DEFAULT_PRE_OBSERVER_NAME = "model_report_pre_observer"
395
+ DEFAULT_POST_OBSERVER_NAME = "model_report_post_observer"
396
+
397
+ # naming conventions for stationary vs non-stationary data
398
+ STATIONARY_STR = "stationary"
399
+ NON_STATIONARY_STR = "non-stationary"
400
+
401
+ # naming for activation
402
+ INPUT_ACTIVATION_PREFIX = "input_activation_"
403
+ OUTPUT_ACTIVATION_PREFIX = "output_activation_"
404
+
405
+ # naming conventions for the keys of the return module info
406
+ TOLERANCE_KEY = "dynamic_static_tolerance"
407
+ DEFAULT_DYNAMIC_REC_KEY = "dynamic_recommended"
408
+ PRE_OBS_COMP_STAT_KEY = INPUT_ACTIVATION_PREFIX + "dynamic_static_comp_stat"
409
+ POST_OBS_COMP_STAT_KEY = OUTPUT_ACTIVATION_PREFIX + "dynamic_static_comp_stat"
410
+ PRE_OBS_DATA_DIST_KEY = INPUT_ACTIVATION_PREFIX + "dynamic_static_data_classification"
411
+ POST_OBS_DATA_DIST_KEY = OUTPUT_ACTIVATION_PREFIX + "dynamic_static_data_classification"
412
+ IS_CURRENTLY_SUPPORTED_KEY = "is_dynamic_supported"
413
+
414
+ # modules that are supported both dynamic and static for this report function
415
+ DEFAULT_DYNAMIC_STATIC_CHECK_SUPPORTED = {nn.Linear}
416
+
417
+ # modules that will be supported soon for both
418
+ DEFAULT_DYNAMIC_STATIC_FUTURE_SUPPORTED = {nn.Conv1d, nn.Conv2d, nn.Conv3d}
419
+
420
+ def __init__(self, tolerance=0.5):
421
+ super().__init__()
422
+
423
+ # set tolerance level and initialize a set to keep track of useful fqn locations
424
+ self.tolerance = tolerance
425
+ self.useful_observer_fqns: Set[str] = set()
426
+
427
+ def determine_observer_insert_points(self, prepared_fx_model: GraphModule) -> Dict[str, Dict[str, Any]]:
428
+ r"""
429
+ Determines where observers need to be inserted for the Dynamic vs Static detector.
430
+ For this detector, we want to place observers on either side of linear layers in the model.
431
+
432
+ Currently inserts observers for:
433
+ linear layers
434
+
435
+ Args:
436
+ prepared_fx_model (GraphModule): The prepared Fx GraphModule
437
+
438
+ Returns a Dict mapping from unique observer fqns (where we want to insert them) to a Dict with:
439
+ key "target_node" -> the node we are trying to observe with this observer (torch.fx.node.Node)
440
+ key "observer_to_insert" -> the observer we wish to insert (ObserverBase)
441
+ key "is_post_observer" -> True if this is meant to be a post-observer for target_node, False if pre-observer
442
+ key "observer_args" -> The arguments that are meant to be passed into the observer
443
+ """
444
+
445
+ # observer for this detector is ModelReportObserver
446
+ obs_ctr = ModelReportObserver
447
+
448
+ # return dict
449
+ obs_fqn_to_info: Dict[str, Dict[str, Any]] = {}
450
+
451
+ for fqn, module in prepared_fx_model.named_modules():
452
+ # make sure module is supported
453
+ if self._is_supported(module, insert=True):
454
+ # if it's a supported type, we want to get node and add observer insert locations
455
+ targeted_node = self._get_targeting_node(prepared_fx_model, fqn)
456
+
457
+ # add entry for pre-observer
458
+ pre_obs_fqn = fqn + "." + self.DEFAULT_PRE_OBSERVER_NAME
459
+
460
+ obs_fqn_to_info[pre_obs_fqn] = {
461
+ DETECTOR_TARGET_NODE_KEY: targeted_node,
462
+ DETECTOR_OBS_TO_INSERT_KEY: obs_ctr(),
463
+ DETECTOR_IS_POST_OBS_KEY: False,
464
+ DETECTOR_OBS_ARGS_KEY: targeted_node.args
465
+ }
466
+
467
+ # add entry for post-observer
468
+ post_obs_fqn = fqn + "." + self.DEFAULT_POST_OBSERVER_NAME
469
+
470
+ obs_fqn_to_info[post_obs_fqn] = {
471
+ DETECTOR_TARGET_NODE_KEY: targeted_node,
472
+ DETECTOR_OBS_TO_INSERT_KEY: obs_ctr(),
473
+ DETECTOR_IS_POST_OBS_KEY: True,
474
+ DETECTOR_OBS_ARGS_KEY: (targeted_node,)
475
+ }
476
+
477
+ return obs_fqn_to_info
478
+
479
+ def get_detector_name(self) -> str:
480
+ r""" returns the string name of this detector"""
481
+ return "dynamic_vs_static_detector"
482
+
483
+
484
+ def get_qconfig_info(self, model) -> Dict[str, DetectorQConfigInfo]:
485
+ r""" Returns the DetectorQConfigInfo for each module_fqn relevant
486
+ Args
487
+ model (nn.Module or subclass): model to find observer insertion points
488
+
489
+ Returns a Dict mapping from unique observer fqns (where we want to insert them) to:
490
+ A DetectorQConfigInfo with the information to generate a QConfig for a specific module
491
+ """
492
+ # run the helper function to populate the dictionary
493
+ dynamic_static_info = self._generate_dict_info(model)
494
+
495
+ # we actually have a qconfig info object we are populating
496
+ module_fqn_to_detector_qconfig_info = {}
497
+
498
+ for module_fqn in dynamic_static_info:
499
+ # create a detector info instance
500
+ detector_qconfig_info = DetectorQConfigInfo(module_fqn)
501
+
502
+ # see if per channel quantization is supported
503
+ dynamic_static_recommended: bool = dynamic_static_info[module_fqn][self.DEFAULT_DYNAMIC_REC_KEY]
504
+ detector_qconfig_info.is_activation_dynamic = dynamic_static_recommended
505
+ module_fqn_to_detector_qconfig_info[module_fqn] = detector_qconfig_info
506
+
507
+ return module_fqn_to_detector_qconfig_info
508
+
509
+ def _is_supported(self, module: nn.Module, insert: bool = False) -> bool:
510
+ r"""Returns whether the given module is supported for observers
511
+
512
+ Args
513
+ module: The module to check and ensure is supported
514
+ insert: True if this is check for observer insertion, false if for report gen
515
+
516
+ Returns True if the module is supported by observer, False otherwise
517
+ """
518
+ # check to see if module is of a supported type
519
+ is_supported_type = any(isinstance(module, x) for x in self.DEFAULT_DYNAMIC_STATIC_CHECK_SUPPORTED)
520
+
521
+ # check if it will be supported
522
+ future_supported_type = any(isinstance(module, x) for x in self.DEFAULT_DYNAMIC_STATIC_FUTURE_SUPPORTED)
523
+
524
+ # supported
525
+ supported = is_supported_type or future_supported_type
526
+
527
+ # this is check for observer insertion
528
+ if insert:
529
+ return supported
530
+ else:
531
+ # this is for report gen and we also need to check if it contains observers
532
+ has_obs = hasattr(module, self.DEFAULT_PRE_OBSERVER_NAME) and hasattr(module, self.DEFAULT_POST_OBSERVER_NAME)
533
+ return supported and has_obs
534
+
535
+ def _generate_dict_info(self, model: GraphModule) -> Dict[str, Any]:
536
+ r"""
537
+ Helper function for generate_detector_report that does the generation of the dictionary.
538
+ This process is done as specified in generate_detector_report documentation
539
+
540
+ Args:
541
+ model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers
542
+
543
+ Returns a Dictionary mapping modules with ModelReportObservers around them to:
544
+ whether dynamic quantization is recommended
545
+ their S metric of input to module
546
+ whether input to module is stationary or non-stationary
547
+ their S metric of output of module
548
+ whether output of module is stationary or non-stationary
549
+ the tolerance level to decided whether input/output is stationary or non-stationary
550
+ whether it is currently supported or planned for the future
551
+ """
552
+ # store modules dynamic vs static information
553
+ module_dynamic_static_info = {}
554
+
555
+ # This for loop goes through the modules, and extracts all relevant information into module_dynamic_static_info
556
+ # This information primary includes whether the data distributions around a supported module is stationary or not
557
+ # Based on this, it is recorded whether dynamic or static quantization is recommended
558
+
559
+ # loop through all submodules included nested ones
560
+ for fqn, module in model.named_modules():
561
+ # if module is Linear has the ModelReportObserver attached to it
562
+ if self._is_supported(module):
563
+ # get pre and post observers for the module
564
+ pre_obs = getattr(module, self.DEFAULT_PRE_OBSERVER_NAME)
565
+ post_obs = getattr(module, self.DEFAULT_POST_OBSERVER_NAME)
566
+
567
+ # get the statistics for each module
568
+ pre_stat = pre_obs.get_batch_to_epoch_ratio()
569
+ post_stat = post_obs.get_batch_to_epoch_ratio()
570
+
571
+ # record module, pre and post stat, and whether to do dynamic or static based off it
572
+ # true if post observer data distribution is non-stationary, false if it's stationary
573
+ dynamic_recommended = post_stat <= self.tolerance
574
+
575
+ # specify the classifications for whether data distributions considered stationary or non-stationary
576
+ pre_obs_dist_classif = self.STATIONARY_STR if pre_stat > self.tolerance else self.NON_STATIONARY_STR
577
+ post_obs_dist_classif = self.STATIONARY_STR if post_stat > self.tolerance else self.NON_STATIONARY_STR
578
+
579
+ # check if current support or future support
580
+ is_supported_type = any(isinstance(module, x) for x in self.DEFAULT_DYNAMIC_STATIC_CHECK_SUPPORTED)
581
+
582
+ # store the set of important information for this module
583
+ module_info = {
584
+ self.TOLERANCE_KEY: self.tolerance,
585
+ self.DEFAULT_DYNAMIC_REC_KEY: dynamic_recommended,
586
+ self.PRE_OBS_COMP_STAT_KEY: pre_stat,
587
+ self.PRE_OBS_DATA_DIST_KEY: pre_obs_dist_classif,
588
+ self.POST_OBS_COMP_STAT_KEY: post_stat,
589
+ self.POST_OBS_DATA_DIST_KEY: post_obs_dist_classif,
590
+ self.IS_CURRENTLY_SUPPORTED_KEY: is_supported_type,
591
+ }
592
+
593
+ module_dynamic_static_info[fqn] = module_info
594
+
595
+ return module_dynamic_static_info
596
+
597
+ def generate_detector_report(self, model: GraphModule) -> Tuple[str, Dict[str, Any]]:
598
+ r"""
599
+ Determines whether dynamic or static quantization is more appropriate for a given module.
600
+
601
+ Takes advantage of the ModelReportObserver that records range information.
602
+ Stationary distribution of data are strictly above tolerance level for the comparison statistic:
603
+
604
+ S = average_batch_activation_range/epoch_activation_range
605
+
606
+ Nonstationary distributions are below or at the tolerance level for this metric.
607
+
608
+ If the distribution of data right after the module is non-stationary, recommend dynamic quantization
609
+ Otherwise recommend static quantization
610
+
611
+ This will then generate suggestions for dynamic vs static quantization focused around Linear.
612
+
613
+ Args:
614
+ model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers
615
+
616
+ Returns a tuple with two elements:
617
+ String report of of whether dynamic or static quantization is recommended for certain modules
618
+ Dictionary mapping modules with ModelReportObservers around them to:
619
+ whether dynamic quantization is recommended
620
+ their S metric of input to module
621
+ whether input to module is stationary or non-stationary
622
+ their S metric of output of module
623
+ whether output of module is stationary or non-stationary
624
+ the tolerance level to decided whether input/output is stationary or non-stationary
625
+ whether it is currently supported or planned for the future
626
+ """
627
+
628
+ # get the dictionary of the information to format the string report
629
+ module_dynamic_static_info = self._generate_dict_info(model)
630
+
631
+ dynamic_vs_static_string = "Dynamic vs. Static Quantization suggestions: \n"
632
+
633
+ modules_added: bool = False # check to make sure at least 1 module added.
634
+
635
+ dynamic_benefit = " You will get more accurate results if you use dynamic quantization"
636
+ static_benefit = " You can increase model efficiency if you use static quantization"
637
+ future_support_str = ". This layer is not yet supported for dynamic quantization"
638
+ # This for loop goes through the information collected in module_dynamic_static_info and:
639
+ # Populates the string based report with the information from module_dynamic_static_info
640
+ # Compiles the complete report by appending relevant formatted strings
641
+
642
+ for module_fqn in module_dynamic_static_info.keys():
643
+
644
+ # there is at least 1 module for suggestion
645
+ modules_added = True
646
+ module_info = module_dynamic_static_info[module_fqn]
647
+ suggestion_string_template = "For module {} it is suggested to use {} quantization because {}.\n"
648
+
649
+ # decide what string formatting values will be
650
+ quantization_type = ""
651
+ quantization_reasoning = "the distribution of data before {} is {} and the distribution after is {}."
652
+
653
+ benefit_str = ""
654
+
655
+ # strings for if dynamic quantized per tensor is needed
656
+ recommend_per_tensor = ". We recommend to add a {} before this module if it is static."
657
+ rec_lay_to_add = "dynamic quantize per tensor layer"
658
+ dynamic_per_tensor_string = recommend_per_tensor.format(rec_lay_to_add)
659
+ dynamic_per_tensor_reasoning_string = (
660
+ " This is because the input to this module has a non-stationary distribution"
661
+ )
662
+
663
+ # start composing explanation
664
+ if module_info[self.DEFAULT_DYNAMIC_REC_KEY]:
665
+ quantization_type = "dynamic"
666
+ # check if currently supported or future supported
667
+ benefit_str = dynamic_benefit
668
+ if not module_info[self.IS_CURRENTLY_SUPPORTED_KEY]:
669
+ benefit_str += future_support_str
670
+ else:
671
+ quantization_type = "static"
672
+ benefit_str = static_benefit
673
+
674
+ # now set the quantization explanation string
675
+ quantization_reasoning = (
676
+ quantization_reasoning.format(
677
+ module_fqn, module_info[self.PRE_OBS_DATA_DIST_KEY], module_info[self.POST_OBS_DATA_DIST_KEY]
678
+ )
679
+ + benefit_str
680
+ )
681
+
682
+ # if we have a non-stationary input -> linear -> stationary we suggested static
683
+ # however, we want to also recommend they add a dynamic quantize per tensor right if this change is made
684
+ if (
685
+ module_info[self.PRE_OBS_DATA_DIST_KEY] == self.NON_STATIONARY_STR
686
+ and module_info[self.POST_OBS_DATA_DIST_KEY] == self.STATIONARY_STR
687
+ ):
688
+ quantization_reasoning = (
689
+ quantization_reasoning + dynamic_per_tensor_string + dynamic_per_tensor_reasoning_string
690
+ )
691
+
692
+ # format the overall suggestion string with the specific inputs
693
+ module_suggestion_string = suggestion_string_template.format(
694
+ module_fqn, quantization_type, quantization_reasoning
695
+ )
696
+
697
+ # append to overall suggestion
698
+ dynamic_vs_static_string += module_suggestion_string
699
+
700
+ if not modules_added:
701
+ dynamic_vs_static_string += "No applicable layers for suggestions. Only linear and conv are valid.\n"
702
+
703
+ # return the string as well as the dictionary of information
704
+ return (dynamic_vs_static_string, module_dynamic_static_info)
705
+
706
+
707
+ class InputWeightEqualizationDetector(DetectorBase):
708
+ r"""
709
+ Determines whether input-weight equalization can help improve quantization for certain modules.
710
+
711
+ Specifically, this list of modules includes:
712
+ linear
713
+ conv
714
+
715
+ Determines whether input-weight equalization is recommended based on the comp stat:
716
+ s_c = sqrt(w_c/W)/sqrt(i_c/I)
717
+ where:
718
+ w_c is range of weight for channel c, W is range of weight over all channels
719
+ i_c is range of input for channel c, I is range of input over all channels
720
+
721
+ if s_c >= threshold or <= 1 / threshold, recommends input-weight equalization
722
+
723
+ Args:
724
+ ratio_threshold (float): The threshold for s_c to determine if input-weight equalization is suggested
725
+ Should be between 0 and 1 (both non-inclusive)
726
+ ch_axis (int, optional): The channel axis being observed to determine input weight equalization
727
+ Default: 1
728
+
729
+ * :attr:`ratio_threshold`: The threshold for s_c to determine if input-weight equalization is suggested
730
+ Should be between 0 and 1
731
+
732
+ * :attr:`ch_axis`: The channel axis being observed to determine input weight equalization
733
+
734
+ * :attr:`SUPPORTED_MODULES`: This specifies the modules that are supported for input-weight equalization
735
+
736
+ * :attr:`DEFAULT_PRE_OBSERVER_NAME`: The name of the pre-observer to be inserted for this detector
737
+ """
738
+
739
+ SUPPORTED_MODULES: Set[Callable] = {nn.Linear,
740
+ nn.Conv1d,
741
+ nn.Conv2d,
742
+ nn.Conv3d,
743
+ nnqat.Linear,
744
+ nnqat.Conv1d,
745
+ nnqat.Conv2d,
746
+ nnqat.Conv3d}
747
+
748
+ # names for the pre and post observers that are inserted
749
+ DEFAULT_PRE_OBSERVER_NAME: str = "model_report_pre_observer"
750
+
751
+ # weight / activation prefix for each of the below info
752
+ WEIGHT_PREFIX = "weight_"
753
+ ACTIVATION_PREFIX = "input_activation_"
754
+
755
+ # string names for keys of info dictionaries
756
+ PER_CHANNEL_MAX_KEY = "per_channel_max"
757
+ PER_CHANNEL_MIN_KEY = "per_channel_min"
758
+ GLOBAL_MAX_KEY = "global_max"
759
+ GLOBAL_MIN_KEY = "global_min"
760
+
761
+ # keys for return dict of recommendations
762
+ RECOMMENDED_KEY = "input_weight_equalization_recommended"
763
+ COMP_METRIC_KEY = "input_weight_channel_comparison_metrics"
764
+ THRESHOLD_KEY = "input_weight_threshold"
765
+ CHANNEL_KEY = "input_weight_channel_axis"
766
+
767
+ # default weight and info strings
768
+ WEIGHT_STR = "weight"
769
+ INPUT_STR = "input"
770
+
771
+ # default for what ratio we recommend input weight
772
+ DEFAULT_RECOMMEND_INPUT_WEIGHT_CHANNEL_RATIO = 0.4
773
+
774
+ def __init__(self, ratio_threshold: float, ch_axis: int = 1):
775
+ # ensure passed in inputs are valid
776
+ if ratio_threshold <= 0 or ratio_threshold >= 1:
777
+ raise ValueError("Make sure threshold is > 0 and < 1")
778
+
779
+ # initialize attributes based on args
780
+ self.ratio_threshold: float = ratio_threshold
781
+ self.ch_axis: int = ch_axis
782
+
783
+ def _is_supported(self, module: nn.Module, insert: bool = False) -> bool:
784
+ r"""Returns whether the given module is supported for observers
785
+
786
+ Args
787
+ module: The module to check and ensure is supported
788
+ insert: True if this is check for observer insertion, false if for report gen
789
+
790
+ Returns True if the module is supported by observer, False otherwise
791
+ """
792
+ # check to see if module is of a supported type
793
+ is_supported_type = any(type(module) is x for x in self.SUPPORTED_MODULES)
794
+
795
+ # this is check for observer insertion
796
+ if insert:
797
+ return is_supported_type
798
+ else:
799
+ # this is for report gen and we also need to check if it contains observers
800
+ has_obs = hasattr(module, self.DEFAULT_PRE_OBSERVER_NAME)
801
+ return is_supported_type and has_obs
802
+
803
+ def get_qconfig_info(self, model) -> Dict[str, DetectorQConfigInfo]:
804
+ r""" Returns the DetectorQConfigInfo for each module_fqn relevant
805
+ Args
806
+ model (nn.Module or subclass): model to find observer insertion points
807
+
808
+ Returns a Dict mapping from unique observer fqns (where we want to insert them) to:
809
+ A DetectorQConfigInfo with the information to generate a QConfig for a specific module
810
+ """
811
+ # run the helper function to populate the dictionary
812
+ # find the range of inputs
813
+ input_values: Dict[str, Dict] = self._extract_input_info(model)
814
+
815
+ # find the range of weights
816
+ weight_values: Dict[str, Dict] = self._extract_weight_info(model)
817
+
818
+ # calculate per_channel comparison statistic s_c
819
+ comp_stats: Dict[str, torch.Tensor] = self._generate_comparison_values(input_values, weight_values)
820
+
821
+ # generate the return dictionary
822
+ input_weight_equalization_info: Dict[str, Dict] = self._generate_dict_info(input_values, weight_values, comp_stats)
823
+
824
+ # we actually have a qconfig info object we are populating
825
+ module_fqn_to_detector_qconfig_info = {}
826
+
827
+ for module_fqn in input_weight_equalization_info:
828
+ # create a detector info instance
829
+ detector_qconfig_info = DetectorQConfigInfo(module_fqn)
830
+
831
+ # see if per channel quantization is supported
832
+ input_weight_recommended: bool = input_weight_equalization_info[module_fqn][self.RECOMMENDED_KEY]
833
+ detector_qconfig_info.is_equalization_recommended = input_weight_recommended
834
+ module_fqn_to_detector_qconfig_info[module_fqn] = detector_qconfig_info
835
+
836
+ return module_fqn_to_detector_qconfig_info
837
+
838
+ def determine_observer_insert_points(self, prepared_fx_model: GraphModule) -> Dict[str, Dict[str, Any]]:
839
+ r"""Determines where observers need to be inserted for the Input Weight Equalization Detector.
840
+ For this detector, we want to place observers in front of supported layers.
841
+
842
+ Currently inserts observers for:
843
+ linear layers
844
+ conv layers
845
+
846
+ Args:
847
+ prepared_fx_model (GraphModule): The prepared Fx GraphModule
848
+
849
+ Returns a Dict mapping from unique observer fqns (where we want to insert them) to a Dict with:
850
+ key "target_node" -> the node we are trying to observe with this observer (torch.fx.node.Node)
851
+ key "observer_to_insert" -> the observer we wish to insert (ObserverBase)
852
+ key "is_post_observer" -> True if this is meant to be a post-observer for target_node, False if pre-observer
853
+ key "observer_args" -> The arguments that are meant to be passed into the observer
854
+ """
855
+
856
+ # observer for this detector is ModelReportObserver
857
+ obs_ctr = ModelReportObserver
858
+
859
+ # return dict
860
+ obs_fqn_to_info: Dict[str, Dict[str, Any]] = {}
861
+
862
+ for fqn, module in prepared_fx_model.named_modules():
863
+ # check to see if module is of a supported type
864
+ if self._is_supported(module, insert=True):
865
+ # if it's a supported type, we want to get node and add observer insert locations
866
+ targeted_node = self._get_targeting_node(prepared_fx_model, fqn)
867
+
868
+ # add entry for pre-observer
869
+ pre_obs_fqn = fqn + "." + self.DEFAULT_PRE_OBSERVER_NAME
870
+
871
+ obs_fqn_to_info[pre_obs_fqn] = {
872
+ DETECTOR_TARGET_NODE_KEY: targeted_node,
873
+ DETECTOR_OBS_TO_INSERT_KEY: obs_ctr(ch_axis=self.ch_axis),
874
+ DETECTOR_IS_POST_OBS_KEY: False,
875
+ DETECTOR_OBS_ARGS_KEY: targeted_node.args,
876
+ }
877
+
878
+ return obs_fqn_to_info
879
+
880
+ def get_detector_name(self) -> str:
881
+ r"""Returns the name of this detector"""
882
+ return "input_weight_equalization_detector"
883
+
884
+ def _extract_input_info(self, model: GraphModule) -> Dict[str, Dict]:
885
+ r"""
886
+ Takes in a calibrated GraphModule and then finds the relevant observers.
887
+ It then extracts the input information for each observer returns it
888
+
889
+ Args
890
+ model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers
891
+
892
+ Returns a dict mapping relevant module fqns (str) to a dict with keys:
893
+ "input_activation_per_channel_max" : maps to the per_channel max values
894
+ "input_activation_per_channel_min" : maps to the per_channel min values
895
+ "input_activation_global_max" : maps to the global max recorded
896
+ "input_activation_global_min" : maps to the global min recorded
897
+ """
898
+
899
+ # return dictionary mapping observer fqns to desired info
900
+ input_info: Dict[str, Dict] = {}
901
+
902
+ for fqn, module in model.named_modules():
903
+ # if module is supported and it has a pre-observer
904
+ if self._is_supported(module):
905
+ # get pre observer for the module
906
+ pre_obs = getattr(module, self.DEFAULT_PRE_OBSERVER_NAME)
907
+
908
+ input_info[fqn] = {
909
+ self.ACTIVATION_PREFIX + self.PER_CHANNEL_MAX_KEY: pre_obs.max_val,
910
+ self.ACTIVATION_PREFIX + self.PER_CHANNEL_MIN_KEY: pre_obs.min_val,
911
+ self.ACTIVATION_PREFIX + self.GLOBAL_MAX_KEY: max(pre_obs.max_val),
912
+ self.ACTIVATION_PREFIX + self.GLOBAL_MIN_KEY: min(pre_obs.min_val),
913
+ }
914
+
915
+ return input_info
916
+
917
+ def _extract_weight_info(self, model: GraphModule) -> Dict[str, Dict]:
918
+ r"""
919
+ Takes in a calibrated GraphModule and then finds the relevant observers.
920
+ It then extracts the weight information for each layer an observer is attached to.
921
+
922
+ Args
923
+ model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers
924
+
925
+ Returns a dict mapping module fqns (str) to a dict with keys:
926
+ "per_channel_max" : maps to the per_channel max values
927
+ "per_channel_min" : maps to the per_channel min values
928
+ "global_max" : maps to the global max recorded
929
+ "global_min" : maps to the global min recorded
930
+ """
931
+ # return dictionary mapping observer fqns to desired info
932
+ weight_info: Dict[str, Dict] = {}
933
+
934
+ for fqn, module in model.named_modules():
935
+ # if module is supported and it has a pre-observer
936
+ if self._is_supported(module):
937
+ # we don't need actual observer, just the module weights
938
+ # calculate min and max vals
939
+ device = module.weight.device
940
+ min_val: torch.Tensor = torch.tensor([float('inf')], device=device)
941
+ max_val: torch.Tensor = torch.tensor([float('-inf')], device=device)
942
+ x_copy = module.weight
943
+ x_dim = x_copy.size()
944
+
945
+ new_axis_list = [i for i in range(len(x_dim))] # noqa: C416
946
+ new_axis_list[self.ch_axis] = 0
947
+ new_axis_list[0] = self.ch_axis
948
+ y = x_copy.permute(new_axis_list)
949
+
950
+ # Need to match dtype of min/max because the updates to buffers
951
+ # are done in place and types need to match for comparisons
952
+ y = y.to(min_val.dtype)
953
+ y = torch.flatten(y, start_dim=1)
954
+ if min_val.numel() == 0 or max_val.numel() == 0:
955
+ min_val, max_val = torch.aminmax(y, dim=1)
956
+ else:
957
+ min_val_cur, max_val_cur = torch.aminmax(y, dim=1)
958
+ min_val = torch.min(min_val_cur, min_val)
959
+ max_val = torch.max(max_val_cur, max_val)
960
+
961
+ weight_info[fqn] = {
962
+ self.WEIGHT_PREFIX + self.PER_CHANNEL_MAX_KEY: max_val,
963
+ self.WEIGHT_PREFIX + self.PER_CHANNEL_MIN_KEY: min_val,
964
+ self.WEIGHT_PREFIX + self.GLOBAL_MAX_KEY: max(max_val),
965
+ self.WEIGHT_PREFIX + self.GLOBAL_MIN_KEY: min(min_val),
966
+ }
967
+
968
+ return weight_info
969
+
970
+ def _calculate_range_ratio(self, info_dict: Dict, info_str: str, module_fqn: str) -> torch.Tensor:
971
+ r"""
972
+ Takes in an info dict and calculates the s_c matrix.
973
+
974
+ Args:
975
+ info_dict (dict): A dictionary of either input or weight range info
976
+ info_str (str): A str describing whether currently looking at weight or input info
977
+ Either "weight" or "input"
978
+ module_fqn (str): The fqn of the module we are looking at
979
+
980
+ Returns a tensor of values, where each value is the s_c stat for a different channel
981
+ """
982
+ # calculate the ratios of the info
983
+ # get the prefix str
984
+ prefix_str = self.ACTIVATION_PREFIX if info_str == self.INPUT_STR else self.WEIGHT_PREFIX
985
+
986
+ per_channel_range = info_dict[prefix_str + self.PER_CHANNEL_MAX_KEY] - info_dict[prefix_str + self.PER_CHANNEL_MIN_KEY]
987
+ global_range = info_dict[prefix_str + self.GLOBAL_MAX_KEY] - info_dict[prefix_str + self.GLOBAL_MIN_KEY]
988
+
989
+ if global_range == 0:
990
+ range_zero_explanation = "We recommend removing this channel as it doesn't provide any useful information."
991
+ raise ValueError(
992
+ f"The range of the {info_str} data for module {module_fqn} is 0, "
993
+ f"which means you have a constant value channel. {range_zero_explanation}"
994
+ )
995
+
996
+ ratio = per_channel_range / global_range
997
+
998
+ return ratio
999
+
1000
+ def _generate_comparison_values(self, input_info: Dict, weight_info: Dict) -> Dict[str, torch.Tensor]:
1001
+ r"""
1002
+ Takes in the information on the min and max values of the inputs and weights and:
1003
+ Calculates the comp stat for each channel: s_c = sqrt(w_c/W)/sqrt(i_c/I)
1004
+
1005
+ Args:
1006
+ input_info (dict): A dict mapping each observer to input range information
1007
+ weight_info (dict): A dict mapping each observer to weight range information
1008
+
1009
+ Returns a dict mapping relevant observer fqns (str) to a 1-D tensor.
1010
+ Each value is a different s_c value for a different channel
1011
+ """
1012
+ # create return dictionary for each observer
1013
+ module_fqn_to_channel: Dict[str, torch.Tensor] = {}
1014
+
1015
+ # for each module (both passed in dicts should have same keys)
1016
+ for module_fqn in input_info:
1017
+
1018
+ # raise error if not in weight info
1019
+ if module_fqn not in weight_info:
1020
+ raise KeyError(f"Unable to find weight range stats for module {module_fqn}")
1021
+
1022
+ # calculate the ratios of the weight info and input info
1023
+ weight_ratio = self._calculate_range_ratio(weight_info[module_fqn], self.WEIGHT_STR, module_fqn)
1024
+ input_ratio = self._calculate_range_ratio(input_info[module_fqn], self.INPUT_STR, module_fqn)
1025
+
1026
+ # if mismatched size, because of grouping, we want to replicate weight enough times
1027
+ weight_channels = len(weight_ratio)
1028
+ input_channels = len(input_ratio)
1029
+ if weight_channels != input_channels:
1030
+ # we try to replicate
1031
+ assert input_channels % weight_channels == 0, "input channels should be divisible by weight channels."
1032
+ # get replication factor
1033
+ rep_factor: int = input_channels // weight_channels
1034
+
1035
+ # weight ratio is (n,), input ratio is (k,), we just repeat weight ratio k // n
1036
+ weight_ratio = weight_ratio.repeat(rep_factor)
1037
+
1038
+ # calculate the s metric per channel
1039
+ s = torch.sqrt(weight_ratio) / torch.sqrt(input_ratio)
1040
+ module_fqn_to_channel[module_fqn] = s
1041
+
1042
+ # return compiled observer ratios
1043
+ return module_fqn_to_channel
1044
+
1045
+ def _generate_dict_info(self, input_info: Dict, weight_info: Dict, comp_stats: Dict) -> Dict[str, Dict]:
1046
+ r"""
1047
+ Helper function for generate_detector_report that does the generation of the dictionary.
1048
+ This process is done as specified in generate_detector_report documentation
1049
+
1050
+ Args:
1051
+ input_info (dict): A dict mapping each module to input range information
1052
+ weight_info (dict): A dict mapping each module to weight range information
1053
+ comp_stats (dict): A dict mapping each module to its corresponding comp stat
1054
+
1055
+ Returns a dictionary mapping each module with relevant ModelReportObservers around them to:
1056
+ whether input weight equalization is recommended
1057
+ their s_c metric compared to the threshold
1058
+ the threshold used to make the recommendation
1059
+ the channel used for recording data
1060
+ the input channel range info
1061
+ the weight channel range info
1062
+ """
1063
+ # store modules input weight equalization info
1064
+ input_weight_equalization_info: Dict[str, Dict] = {}
1065
+
1066
+ # for each module we add separate set of suggestions
1067
+ for module_fqn in input_info:
1068
+
1069
+ # get relevant info for this module
1070
+ mod_input_info: Dict = input_info[module_fqn]
1071
+ mod_weight_info: Dict = weight_info[module_fqn]
1072
+ mod_comp_stat: Dict = comp_stats[module_fqn]
1073
+
1074
+ # decide if each channel should have input weight equalization or not
1075
+ channel_rec_vals: list = []
1076
+
1077
+ for val in mod_comp_stat:
1078
+ float_rep: float = val.item()
1079
+
1080
+ # decide if recommending input weight equalization
1081
+ recommended: bool = float_rep >= self.ratio_threshold and float_rep <= 1 / self.ratio_threshold
1082
+ channel_rec_vals.append(recommended)
1083
+
1084
+ # build the return dict input
1085
+ # also unpack input and weight dicts into it
1086
+ input_weight_equalization_info[module_fqn] = {
1087
+ self.RECOMMENDED_KEY: channel_rec_vals,
1088
+ self.COMP_METRIC_KEY: mod_comp_stat,
1089
+ self.THRESHOLD_KEY: self.ratio_threshold,
1090
+ self.CHANNEL_KEY: self.ch_axis,
1091
+ **mod_input_info,
1092
+ **mod_weight_info,
1093
+ }
1094
+
1095
+ # return our compiled info for each module
1096
+ return input_weight_equalization_info
1097
+
1098
+ def generate_detector_report(self, model: GraphModule) -> Tuple[str, Dict[str, Any]]:
1099
+ r"""
1100
+ Determines whether input weight equalization is appropriate for a given module.
1101
+
1102
+ Takes advantage of the ModelReport Observer which records per channel information of input range
1103
+ It then uses the passed in weight info inconjunction to compute the desired ratio
1104
+ Finally, it gives suggestions based on this information for each module of interest
1105
+
1106
+ Args:
1107
+ model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers
1108
+
1109
+ Returns a tuple with two elements:
1110
+ String report of of whether input weight equalization is recommended for certain modules
1111
+ Dictionary mapping modules of interest to:
1112
+ whether input weight equalization is recommended
1113
+ their s_c metric compared to the threshold
1114
+ the threshold used to make the recommendation
1115
+ the channel used for recording data
1116
+ the input channel range info
1117
+ the weight channel range info
1118
+ """
1119
+
1120
+ # find the range of inputs
1121
+ input_values: Dict[str, Dict] = self._extract_input_info(model)
1122
+
1123
+ # find the range of weights
1124
+ weight_values: Dict[str, Dict] = self._extract_weight_info(model)
1125
+
1126
+ # calculate per_channel comparison statistic s_c
1127
+ comp_stats: Dict[str, torch.Tensor] = self._generate_comparison_values(input_values, weight_values)
1128
+
1129
+ # generate the return dictionary
1130
+ input_weight_equalization_info: Dict[str, Dict] = self._generate_dict_info(input_values, weight_values, comp_stats)
1131
+
1132
+ # now we can generate report based on this information
1133
+ input_weight_string = "Input-Weight Equalization suggestions: \n"
1134
+
1135
+ # some strings to be formatted depending on module we are adding
1136
+ module_suggestion_str = "For Module {} looked at with axis {}: \n"
1137
+ channel_suggestion_str = "\tWe suggest {} input weight equalization because {}\n"
1138
+ use_str = "to use"
1139
+ no_use_str = "to not use"
1140
+ input_weight_benefit_str = "{}/{} channels would benefit and we expect significant reduction in quantization error."
1141
+ input_weight_non_benefit_reasoning = "{}/{} channels benefitting from input-weight equalization being applied."
1142
+ input_weight_non_benefit_str = "we don't expect much improvement from input-weight equalization based on {}"
1143
+
1144
+ # added module check
1145
+ added_module: bool = False
1146
+
1147
+ # compile the suggestion string
1148
+ for module_fqn in input_weight_equalization_info:
1149
+ # we added at least 1 module
1150
+ added_module = True
1151
+ # add the module level description
1152
+ input_weight_string += module_suggestion_str.format(module_fqn, self.ch_axis)
1153
+
1154
+ mod_info: Dict[str, Any] = input_weight_equalization_info[module_fqn]
1155
+
1156
+ # gather info on how many channels would benefit from input weight and
1157
+ recommendation_per_channel: torch.Tensor = mod_info[self.RECOMMENDED_KEY]
1158
+ num_recs = sum(recommendation_per_channel)
1159
+
1160
+ if num_recs / len(recommendation_per_channel) >= self.DEFAULT_RECOMMEND_INPUT_WEIGHT_CHANNEL_RATIO:
1161
+ input_benefit_formatted = input_weight_benefit_str.format(num_recs, len(recommendation_per_channel))
1162
+ channel_str = channel_suggestion_str.format(use_str, input_benefit_formatted)
1163
+ input_weight_string += channel_str
1164
+ else:
1165
+ non_benefit_reason_formatted = input_weight_non_benefit_reasoning.format(num_recs, len(recommendation_per_channel))
1166
+ non_benefit_str = input_weight_non_benefit_str.format(non_benefit_reason_formatted)
1167
+ channel_str = channel_suggestion_str.format(no_use_str, non_benefit_str)
1168
+ input_weight_string += channel_str
1169
+
1170
+ # if no modules looked at, amend return string
1171
+ if not added_module:
1172
+ input_weight_string += "No applicable layers for suggestions. Only linear and conv valid.\n"
1173
+
1174
+ # return a tuple with the string explanation and the compiled dict info
1175
+ return (input_weight_string, input_weight_equalization_info)
1176
+
1177
+
1178
+ class OutlierDetector(DetectorBase):
1179
+ r"""
1180
+ Determines whether there are significant outliers in activation data around a certain layer.
1181
+
1182
+ This is ideally used in conjunction with information on stationary vs. non-stationary distribution:
1183
+ If the data is stationary, and there are significant outliers, then we want to flag them
1184
+ We want to do this on a per channel basis for detecting outliers
1185
+
1186
+ Determines whether activation data is flagged as outlier based on if data is stationary and:
1187
+ p_r = avg(100th percentile / "reference_percentile"th percentile)
1188
+ where:
1189
+ p_r is average percentile ratio across all batches in the epoch
1190
+ reference_percentile is a percentile values between 0 and 100 exclusive
1191
+
1192
+ if p_r is above some threshold, then we consider the activations to have significant outliers
1193
+
1194
+ Args:
1195
+ ratio_threshold (float, optional): The threshold for p_r to determine if there are outliers in activations
1196
+ Should be >= 1
1197
+ Default: 3.5
1198
+ reference_percentile (float, optional): The denominator to find the relative scale of the 100th percentile
1199
+ Should be between 0 and 1
1200
+ Default: 0.975
1201
+ fraction_batches_used_threshold (float, optional): Threshold of fraction of batches per channel to determine outlier
1202
+ If fraction is below this, we deem number of samples used to calculate outliers as insignificant and alert user
1203
+ regardless of whether we detected outliers or not in channel to take a closer look at channel results
1204
+ Should be between 0 and 1
1205
+ Default: 0.95
1206
+ ch_axis (int, optional): The channel axis being observed to determine input weight equalization
1207
+ Default: 1
1208
+
1209
+ * :attr:`ratio_threshold`: The threshold for p_r to determine if there are outliers in activations
1210
+ The p_r value (average ratio of 100th percentile/reference_percentile) is compared to ratio_threshold
1211
+ If it is significantly greater, then we consider it an outlier
1212
+ This threshold was calculated based on the ratio of the percentiles in a normal distribution
1213
+ The calculations behind value choice: https://drive.google.com/file/d/1N2wdtXWI-kOH8S7HH4-PYB_NmqzZil4p/view?usp=sharing
1214
+
1215
+ * :attr:`reference_percentile`: The denominator of the top fraction to find the relative scale of the 100th percentile
1216
+ Should be between 0 and 1
1217
+ The calculations behind value choice: https://drive.google.com/file/d/1N2wdtXWI-kOH8S7HH4-PYB_NmqzZil4p/view?usp=sharing
1218
+
1219
+ * :attr:`fraction_batches_used_threshold`: The fraction of batches to determine outliers for each channel should be above this
1220
+ Some batches may not be used because of 0-based errors, so this is to ensure a good amount of the total batches are used
1221
+ Should be between 0 and 1
1222
+
1223
+ * :attr:`ch_axis`: The channel axis being observed to determine outliers
1224
+
1225
+ * :attr:`DEFAULT_PRE_OBSERVER_NAME`: The name of the pre-observer to be inserted for this detector
1226
+ """
1227
+
1228
+ # names for the pre observers that are inserted
1229
+ DEFAULT_PRE_OBSERVER_NAME: str = "model_report_pre_observer"
1230
+
1231
+ # pre activation prefix
1232
+ INPUT_ACTIVATION_PREFIX = "input_activation_"
1233
+
1234
+ # names for dict keys
1235
+ OUTLIER_KEY = "outliers_detected"
1236
+ NUM_BATCHES_KEY = "outlier_detection_batches_used"
1237
+ IS_SUFFICIENT_BATCHES_KEY = "outlier_detection_is_sufficient_batches"
1238
+ COMP_METRIC_KEY = "outlier_detection_percentile_ratios"
1239
+ RATIO_THRES_KEY = "outlier_detection_ratio_threshold"
1240
+ REF_PERCENTILE_KEY = "outlier_detection_reference_percentile"
1241
+ CHANNEL_AXIS_KEY = "outlier_detection_channel_axis"
1242
+ MAX_VALS_KEY = INPUT_ACTIVATION_PREFIX + "per_channel_max"
1243
+ CONSTANT_COUNTS_KEY = "constant_batch_counts"
1244
+
1245
+ def __init__(
1246
+ self,
1247
+ ratio_threshold: float = 3.5,
1248
+ reference_percentile: float = 0.975,
1249
+ fraction_batches_used_threshold: float = 0.95,
1250
+ ch_axis: int = 1,
1251
+ ):
1252
+ # initialize the variables of interest
1253
+ self.ratio_threshold = ratio_threshold
1254
+
1255
+ # make sure passed in percentile is valid
1256
+ assert reference_percentile >= 0 and reference_percentile <= 1
1257
+ assert fraction_batches_used_threshold >= 0 and fraction_batches_used_threshold <= 1
1258
+ self.reference_percentile = reference_percentile
1259
+ self.fraction_batches_used_threshold = fraction_batches_used_threshold
1260
+ self.ch_axis = ch_axis
1261
+
1262
+ def get_detector_name(self) -> str:
1263
+ r"""Returns the name of this detector"""
1264
+ return "outlier_detector"
1265
+
1266
+ def _supports_insertion(self, module: nn.Module) -> bool:
1267
+ r"""Returns whether the given module is supported for observers insertion
1268
+
1269
+ Any module that doesn't have children and isn't an observer itself is supported
1270
+
1271
+ Args
1272
+ module: The module to check and ensure is supported
1273
+
1274
+ Returns True if the module is supported by observer, False otherwise
1275
+ """
1276
+ # case for insertion of module
1277
+ # check if the module has any children and isn't observer
1278
+ num_children = len(list(module.children()))
1279
+ return num_children == 0 and not _is_activation_post_process(module)
1280
+
1281
+ def get_qconfig_info(self, model) -> Dict[str, DetectorQConfigInfo]:
1282
+ r""" Returns the DetectorQConfigInfo for each module_fqn relevant
1283
+ Args
1284
+ model (nn.Module or subclass): model to find observer insertion points
1285
+
1286
+ Returns a Dict mapping from unique observer fqns (where we want to insert them) to:
1287
+ A DetectorQConfigInfo with the information to generate a QConfig for a specific module
1288
+ """
1289
+ # currently doesn't do anything for outlier detector
1290
+ return {}
1291
+
1292
+ def _supports_report_gen(self, module: nn.Module) -> bool:
1293
+ r"""Returns whether the given module is supported for report generation
1294
+
1295
+ Any module that has a model report pre-observer is supported
1296
+
1297
+ Args
1298
+ module: The module to check and ensure is supported
1299
+
1300
+ Returns True if the module is supported by observer, False otherwise
1301
+ """
1302
+ return hasattr(module, self.DEFAULT_PRE_OBSERVER_NAME)
1303
+
1304
+ def determine_observer_insert_points(self, prepared_fx_model: GraphModule) -> Dict[str, Dict[str, Any]]:
1305
+ r""" Determines where observers need to be inserted for the Outlier Detector.
1306
+
1307
+ For this detector, we want to place observers in front of supported layers.
1308
+
1309
+ Currently inserts observers for:
1310
+ all layers that do not have children (leaf level layers)
1311
+
1312
+ Args:
1313
+ prepared_fx_model (GraphModule): The prepared Fx GraphModule
1314
+
1315
+ Returns a Dict mapping from unique observer fqns (where we want to insert them) to a Dict with:
1316
+ key "target_node" -> the node we are trying to observe with this observer (torch.fx.node.Node)
1317
+ key "observer_to_insert" -> the observer we wish to insert (ObserverBase)
1318
+ key "is_post_observer" -> True if this is meant to be a post-observer for target_node, False if pre-observer
1319
+ key "observer_args" -> The arguments that are meant to be passed into the observer
1320
+ """
1321
+ # observer for this detector is ModelReportObserver
1322
+ obs_ctr = ModelReportObserver
1323
+
1324
+ # return dict
1325
+ obs_fqn_to_info: Dict[str, Dict[str, Any]] = {}
1326
+
1327
+ for fqn, module in prepared_fx_model.named_modules():
1328
+ # check to see if module is of a supported type
1329
+ if self._supports_insertion(module):
1330
+ # if it's a supported type, we want to get node and add observer insert locations
1331
+ targeted_node = self._get_targeting_node(prepared_fx_model, fqn)
1332
+
1333
+ # add entry for pre-observer
1334
+ pre_obs_fqn = fqn + "." + self.DEFAULT_PRE_OBSERVER_NAME
1335
+
1336
+ obs_fqn_to_info[pre_obs_fqn] = {
1337
+ DETECTOR_TARGET_NODE_KEY: targeted_node,
1338
+ DETECTOR_OBS_TO_INSERT_KEY: obs_ctr(ch_axis=self.ch_axis, comp_percentile=self.reference_percentile),
1339
+ DETECTOR_IS_POST_OBS_KEY: False,
1340
+ DETECTOR_OBS_ARGS_KEY: targeted_node.args,
1341
+ }
1342
+
1343
+ return obs_fqn_to_info
1344
+
1345
+ def _calculate_outlier_info(
1346
+ self,
1347
+ percentile_ratios: torch.Tensor,
1348
+ counted_batches: torch.Tensor,
1349
+ total_batches: int,
1350
+ ) -> Dict[str, List[bool]]:
1351
+ r"""
1352
+ Gives info on whether the percentile ratios calculated would be considered outliers
1353
+ Also gives information on whether the collected data is statistically significant to make this claim
1354
+
1355
+ Args:
1356
+ percentile_ratios (torch.Tensor): The average percentile_ratios per channel calculated by the observer
1357
+ counted_batches (torch.Tensor): The number of batches used for average calculation per tensor
1358
+ total_batches (int): The total number of batches that passed through observer in this epoch
1359
+
1360
+ Returns a dictionary mapping:
1361
+ "outliers_detected" : list of bools per channel that are true if it is considered an outlier
1362
+ "is_sufficient_batches": if o_r was >= fraction_batches_used_threshold:
1363
+ where o_r = counted_batches / total_batches
1364
+ """
1365
+ outlier_dict: Dict[str, List[bool]] = {self.OUTLIER_KEY: [], self.IS_SUFFICIENT_BATCHES_KEY: []}
1366
+
1367
+ # get both as flattened lists for easy mapping
1368
+ ratios_list: List = percentile_ratios.tolist()
1369
+ num_batches_list: List = counted_batches.tolist()
1370
+
1371
+ # calculate whether channels were statistically significant
1372
+ significant_size = [
1373
+ batch_size / total_batches >= self.fraction_batches_used_threshold for batch_size in num_batches_list
1374
+ ]
1375
+ outlier_dict[self.IS_SUFFICIENT_BATCHES_KEY] = significant_size
1376
+
1377
+ # calculate for each channel whether it's an outlier or not based on ratio
1378
+ outlier_detected = [ratio > self.ratio_threshold for ratio in ratios_list]
1379
+ outlier_dict[self.OUTLIER_KEY] = outlier_detected
1380
+
1381
+ # return the dictionary with the two lists
1382
+ return outlier_dict
1383
+
1384
+ def _generate_info_dict(self, model: GraphModule) -> Dict[str, Dict]:
1385
+ r"""
1386
+ Helper function for generate_detector_report that does the generation of the dictionary.
1387
+ This process is done as specified in generate_detector_report documentation
1388
+
1389
+ Args:
1390
+ model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers
1391
+
1392
+ Returns a dict mapping relevant module fqns to:
1393
+ whether there were outliers found in activation before
1394
+ the number of batches used for each channel
1395
+ whether fraction of applicable batches used is above fraction_batches_used_threshold
1396
+ their p_r metric compared to the threshold
1397
+ the threshold used to make the recommendation
1398
+ the reference_percentile used to make the recommendation
1399
+ the channel axis used to determine individual channels
1400
+ the constant batch counts per channel
1401
+ the per channel max values
1402
+ """
1403
+ # return dictionary mapping observer fqns to desired info
1404
+ info_dict: Dict[str, Dict] = {}
1405
+
1406
+ for fqn, module in model.named_modules():
1407
+ # if module is supported and it has a pre-observer
1408
+ if self._supports_report_gen(module):
1409
+ # get pre observer for the module
1410
+ pre_obs: ModelReportObserver = getattr(module, self.DEFAULT_PRE_OBSERVER_NAME)
1411
+
1412
+ # get the number of batches and calculated ratio thresholds
1413
+ num_batches: torch.Tensor = pre_obs.percentile_batches_tracked
1414
+ average_ratios: torch.Tensor = pre_obs.average_percentile_ratio
1415
+ channel_batch_cnts: torch.Tensor = pre_obs.constant_channels
1416
+ total_batches: int = pre_obs.num_batches_tracked
1417
+
1418
+ # also get the max values
1419
+ max_vals: torch.Tensor = pre_obs.max_val
1420
+
1421
+ # we have to specifically modify how we are recording negative ratio for pre-relu layers
1422
+ for index, ratio_val in enumerate(average_ratios):
1423
+ # check if we have a negative ratio
1424
+ # a ratio might be negative if we have a situation where the 100th percentile is
1425
+ # > 0 while the nth percentile is < 0, in which case this would not be detected
1426
+ # as an outlier. Since we care more about magnitude, we make it positive.
1427
+ if ratio_val.item() < 0:
1428
+ # first make it positive
1429
+ average_ratios[index] = -ratio_val
1430
+
1431
+ if ratio_val.item() < 1:
1432
+ # if it's less than 1 we have the flip it as well
1433
+ average_ratios[index] = 1 / ratio_val
1434
+
1435
+ outlier_calcs = self._calculate_outlier_info(average_ratios, num_batches, total_batches)
1436
+
1437
+ # calculate whether ratios were outliers
1438
+ info_dict[fqn] = {
1439
+ self.CHANNEL_AXIS_KEY: self.ch_axis,
1440
+ self.REF_PERCENTILE_KEY: self.reference_percentile,
1441
+ self.RATIO_THRES_KEY: self.ratio_threshold,
1442
+ self.COMP_METRIC_KEY: average_ratios,
1443
+ self.NUM_BATCHES_KEY: num_batches,
1444
+ self.OUTLIER_KEY: outlier_calcs[self.OUTLIER_KEY],
1445
+ self.IS_SUFFICIENT_BATCHES_KEY: outlier_calcs[self.IS_SUFFICIENT_BATCHES_KEY],
1446
+ self.CONSTANT_COUNTS_KEY: channel_batch_cnts,
1447
+ self.MAX_VALS_KEY: max_vals
1448
+ }
1449
+
1450
+ return info_dict
1451
+
1452
+ def generate_detector_report(self, model: GraphModule) -> Tuple[str, Dict[str, Any]]:
1453
+ r"""
1454
+ Determines whether input weight equalization is appropriate for a given module.
1455
+
1456
+ Takes advantage of the ModelReport Observer which records the relevant percentile information
1457
+
1458
+ Args:
1459
+ model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers
1460
+
1461
+ Returns a tuple with two elements:
1462
+ String report of of whether there are outliers in the activations around certain modules
1463
+ Dictionary mapping modules of interest to:
1464
+ whether there were outliers found in activation before
1465
+ the number of batches used for each channel
1466
+ whether fraction of applicable batches used is above fraction_batches_used_threshold
1467
+ their p_r metric compared to the threshold
1468
+ the threshold used to make the recommendation
1469
+ the reference_percentile used to make the recommendation
1470
+ the channel axis used to determine individual channels
1471
+ the constant batch counts per channel
1472
+ the per channel max values
1473
+ """
1474
+ # generate the information dictionary of outlier information
1475
+ info_dict = self._generate_info_dict(model)
1476
+
1477
+ # now we can generate report based on this information
1478
+ outlier_string = "Outlier detection report: \n"
1479
+
1480
+ # added module check
1481
+ added_module: bool = False
1482
+
1483
+ # some strings to be formatted depending on module we are adding
1484
+ module_suggestion_str = "For Module {} looked at with axis {}: \n"
1485
+ channel_suggestion_str = "\tFor channel {}, we found outliers in the preceding activation data with {}.\n"
1486
+ channel_max_value_str = "a max value across all batches of {}"
1487
+ note_string = "Note: outlier detection is only reliable for {}. We recommend {} to ensure the most accurate results."
1488
+ note_distribution = "stationary distributions"
1489
+ note_rec = "running the static vs. dynamic detector to ensure activation data before modules above is stationary"
1490
+
1491
+ # suggestion for constant batch check since that can make it no outliers
1492
+ constant_str = "\tFor channel {}, we found {} constant value batches. {}\n"
1493
+ constant_suggestion = "We recommend taking a look at the dict and data to see how frequent this occurred and why."
1494
+
1495
+ # compile the suggestion string
1496
+ for module_fqn in info_dict:
1497
+ # get module specific info
1498
+ mod_info: Dict[str, Any] = info_dict[module_fqn]
1499
+ # check to see if we already added high level model desc
1500
+ added_model_desc = False
1501
+ # look at each individual channel and add a suggestion
1502
+ for index, outlier_detected in enumerate(mod_info[self.OUTLIER_KEY]):
1503
+ if outlier_detected:
1504
+ # we found at least 1 outlier
1505
+ if not added_model_desc:
1506
+ # add the module level description
1507
+ outlier_string += module_suggestion_str.format(module_fqn, self.ch_axis)
1508
+ added_model_desc = True
1509
+
1510
+ # we mark that we found at least one outlier
1511
+ added_module = True
1512
+ max_value_found_str = channel_max_value_str.format(mod_info[self.MAX_VALS_KEY][index])
1513
+ channel_str = channel_suggestion_str.format(index, max_value_found_str)
1514
+ outlier_string += channel_str
1515
+
1516
+ # also check if we found constant batch
1517
+ if mod_info[self.CONSTANT_COUNTS_KEY][index] != 0:
1518
+ # make sure we add a module level highlight.
1519
+ if not added_model_desc:
1520
+ # add the module level description
1521
+ outlier_string += module_suggestion_str.format(module_fqn, self.ch_axis)
1522
+ added_model_desc = True
1523
+
1524
+ constant_values_for_channel = mod_info[self.CONSTANT_COUNTS_KEY][index]
1525
+ formatted_str = constant_str.format(index, constant_values_for_channel, constant_suggestion)
1526
+ outlier_string += formatted_str
1527
+ # we also added at least one thing to description
1528
+ added_module = True
1529
+
1530
+
1531
+ # if found outlier, give suggestion, else give default response
1532
+ if added_module:
1533
+ # compose the note string
1534
+ note_composed = note_string.format(note_distribution, note_rec)
1535
+ outlier_string += note_composed
1536
+ else:
1537
+ outlier_string += "There were no outliers found in the activations.\n"
1538
+
1539
+ return (outlier_string, info_dict)
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/model_report.py ADDED
@@ -0,0 +1,607 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from typing import Any, Dict, Set, Tuple, Callable
3
+ from collections import OrderedDict
4
+ import torch
5
+ from torch.ao.quantization.fx._model_report.detector import (
6
+ DetectorBase,
7
+ DETECTOR_OBS_ARGS_KEY,
8
+ DETECTOR_OBS_TO_INSERT_KEY,
9
+ DETECTOR_IS_POST_OBS_KEY,
10
+ DETECTOR_TARGET_NODE_KEY,
11
+ DetectorQConfigInfo
12
+ )
13
+ from torch.ao.quantization.fx._model_report.model_report_visualizer import ModelReportVisualizer
14
+ from torch.ao.quantization.fx.graph_module import GraphModule
15
+ from torch.ao.quantization.observer import ObserverBase
16
+ from torch.ao.quantization.qconfig_mapping import QConfigMapping, QConfig
17
+ from torch.ao.quantization.fx._equalize import EqualizationQConfig
18
+
19
+ class ModelReport:
20
+ r"""
21
+ The ModelReport class aims to provide users an easy way to diagnose issues that they run into
22
+ with their models. The class works with all traceable GraphModules to help diagnose issues,
23
+ though the requirements on the type of model more-so depends on the specific report the user
24
+ is trying to generate. With respect to the reports, the ModelReport class is initialized with
25
+ a set of Detector classes, each of which generate reports on quantization configuration
26
+ issues a use might have.
27
+
28
+ Currently supports generating reports on:
29
+ - Suggestions for per-channel vs. per-tensor quantization (nn.Module)
30
+ - Suggestions for dynamic vs static quantization for linear layers (Graph Modules)
31
+ - Suggestions for input-weight equalization for linear and conv layers (Graph Modules)
32
+ - Suggestions for outlier detection for all layers (Graph Modules)
33
+
34
+ The ModelReport class has the primary functionality of inserting observers (primarily the ModelReportObserver)
35
+ where needed for each detector to gather the information it needs, and then after callibration, the ModelReport
36
+ class compiles the report generated by each Detector class into a single report to return to the user. It also
37
+ has the capability to remove all the observers it inserted as well.
38
+
39
+ * :attr:`_model` The model we wish to generate the report for. Must be a traceable GraphModule
40
+
41
+ * :attr:`_desired_report_detectors` The set of Detectors representing desired reports from the ModelReport class
42
+ Make sure that these are all unique types of detectors [do not have more than 1 of the same class]
43
+
44
+ * :attr:`_desired_detector_names` The set of detector names of the _desired_report_detectors.
45
+ This set is generated by calling the get_detector_name() of each detector
46
+
47
+ * :attr:`_detector_name_to_observer_fqns` The mapping from each detector to fqns of observers of interest
48
+ The purpose of this is to keep track of what observers were inserted for each detector, so that they
49
+ can be removed at the end if desired
50
+
51
+ * :attr:`_prepared_flag` A boolean flag that keeps track of whether we have prepared the model or not
52
+ This is to ensure we only insert observers once with the ModelReport instance
53
+
54
+ * :attr:`_removed_observers` A boolean to track if we have removed observers already
55
+ The purpose is to ensure we don't attempt to remove observers twice with the same ModelReport
56
+ instance. This also allows the functionality where we can generate the report multiple times
57
+ as long as we haven't removed the observers yet.
58
+
59
+ Note:
60
+ This class was initially designed to work with the Fx Graph Mode workflow in mind. However,
61
+ full functionality is available as long as there is a traceable GraphModule that is being used.
62
+ One method to get a traceable GraphModule without going through the Fx workflow is to use
63
+ the QuantizationTracer class.
64
+
65
+ General Flow for Fx workflow:
66
+ 1.) Initialize ModelReport object with reports of interest by passing in initialized detector objects and model
67
+ 2.) Prepare your model with prepare_fx
68
+ 3.) Call model_report.prepare_detailed_calibration to add relevant observers
69
+ 4.) Callibrate your model with data
70
+ 5.) Call model_report.generate_report on your model to generate report and optionally remove added observers
71
+ Optional
72
+ 6.) Call model_report.generate_visualizer to get a ModelReportVisualizer instance
73
+ 7.) To help in parsing report information and debugging, view report info as a:
74
+ - Table
75
+ - Histogram
76
+ - Line plot
77
+ 8.) Call model_report.generate_qconfigs to generate the qconfigs based on the report suggestions
78
+
79
+ Example (with QuantizationTracer):
80
+ >>> # xdoctest: +SKIP
81
+ >>> # get the necessary qconfig
82
+ >>> config = PrepareCustomConfig()
83
+ >>> skipped_module_names, skipped_module_classes = get_skipped_module_name_and_classes(config, False)
84
+
85
+ >>> # initialize our model and get GraphModule
86
+ >>> model = SomeModel()
87
+ >>> tracer = QuantizationTracer(skipped_module_names, skipped_module_classes)
88
+ >>> graph_module = GraphModule(model, tracer.trace(model))
89
+
90
+ >>> # get our set of detectors and ModelReport instance
91
+ >>> detector_set = set([DynamicStaticDetector(tolerance=0.5), InputWeightEqualizationDetector(ratio_threshold=0.7)])
92
+ >>> tracer_reporter = ModelReport(graph_module, tracer_detector_set)
93
+
94
+ >>> # now we insert the observers and callibrate the model
95
+ >>> tracer_model_with_observers = tracer_reporter.prepare_detailed_calibration()
96
+ >>> for i in range(num_callibration_batches):
97
+ >>> example_input = get_callibration_input()
98
+ >>> tracer_model_with_observers(example_input)
99
+
100
+ >>> # finally we generate the reports and optionally remove the observers we inserted
101
+ >>> reports = tracer_reporter.generate_model_report(remove_inserted_observers=True)
102
+
103
+ >>> # Optional: we can generate the qconfig mapping based on the suggestions
104
+ >>> qconfigs = model_report.generate_qconfig_mapping()
105
+
106
+ >>> # Optional: we can generate the equalization mapping based on the suggestions
107
+ >>> qconfigs = model_report.generate_equalization_mapping()
108
+
109
+ >>> # Optional: we get a ModelReportVisualizer instance to do any visualizations desired
110
+ >>> model_report_visualizer = tracer_reporter.generate_visualizer()
111
+
112
+ """
113
+
114
+ def __init__(self, model: GraphModule, desired_report_detectors: Set[DetectorBase]):
115
+
116
+ if len(desired_report_detectors) == 0:
117
+ raise ValueError("Should include at least 1 desired report")
118
+
119
+ # keep track of the model we wish to generate report for
120
+ self._model: GraphModule = model
121
+
122
+ # keep the reports private so they can't be modified
123
+ self._desired_report_detectors = desired_report_detectors
124
+ self._desired_detector_names = {detector.get_detector_name() for detector in desired_report_detectors}
125
+
126
+ # keep a mapping of desired reports to observers of interest
127
+ # this is to get the readings, and to remove them, can create a large set
128
+ # this set can then be used to traverse the graph and remove added observers
129
+ self._detector_name_to_observer_fqns: Dict[str, Set[str]] = {}
130
+
131
+ # initialize each report to have empty set of observers of interest
132
+ for desired_report in self._desired_detector_names:
133
+ self._detector_name_to_observer_fqns[desired_report] = set()
134
+
135
+ # flags to ensure that we can only prepare and remove observers once
136
+ self._prepared_flag = False
137
+ self._removed_observers = False
138
+
139
+ # store the reports that we generated for visualization purposes
140
+ # initially empty since no reports generated
141
+ self._generated_reports: Dict[str, Dict] = {}
142
+
143
+ def get_desired_reports_names(self) -> Set[str]:
144
+ """ Returns a copy of the desired reports for viewing """
145
+ return self._desired_detector_names.copy()
146
+
147
+ def get_observers_of_interest(self) -> Dict[str, Set[str]]:
148
+ """ Returns a copy of the observers of interest for viewing """
149
+ return self._detector_name_to_observer_fqns.copy()
150
+
151
+ def prepare_detailed_calibration(self) -> GraphModule:
152
+ r"""
153
+ Takes in a graph model and inserts the following observers:
154
+ - ModelReportObserver
155
+
156
+ Each observer is inserted based on the desired_reports into the relevant locations
157
+
158
+ Right now, each report in self._desired_detector_names has independent insertions
159
+ However, if a module already has a Observer of the same type, the insertion will not occur
160
+ This is because all of the same type of Observer collect same information, so redundant
161
+
162
+ Returns the same GraphModule with the observers inserted
163
+ """
164
+
165
+ # if already prepared once, cannot prepare again
166
+ if self._prepared_flag:
167
+ raise ValueError("Already ran preparing detailed callibration. Run the report generation next after callibration.")
168
+
169
+ # loop through each detector, find where placements should be, and keep track
170
+ insert_observers_fqns: Dict[str, Any] = {}
171
+
172
+ for detector in self._desired_report_detectors:
173
+ # determine observer points for each detector
174
+ obs_fqn_to_info = detector.determine_observer_insert_points(self._model)
175
+ # map each insert point to the observer to use
176
+ insert_observers_fqns.update(obs_fqn_to_info)
177
+ # update the set of observers this report cares about
178
+ self._detector_name_to_observer_fqns[detector.get_detector_name()] = set(obs_fqn_to_info.keys())
179
+
180
+ # now insert all the observers at their desired locations
181
+ for observer_fqn in insert_observers_fqns:
182
+ target_node = insert_observers_fqns[observer_fqn][DETECTOR_TARGET_NODE_KEY]
183
+ insert_obs = insert_observers_fqns[observer_fqn][DETECTOR_OBS_TO_INSERT_KEY]
184
+ insert_post = insert_observers_fqns[observer_fqn][DETECTOR_IS_POST_OBS_KEY]
185
+ observer_args = insert_observers_fqns[observer_fqn][DETECTOR_OBS_ARGS_KEY]
186
+ self._insert_observer_around_module(
187
+ observer_fqn, target_node, insert_obs, observer_args, insert_post
188
+ )
189
+
190
+ self._prepared_flag = True
191
+
192
+ return self._model
193
+
194
+ def _insert_observer_around_module(
195
+ self,
196
+ obs_fqn: str,
197
+ target_node: torch.fx.node.Node,
198
+ obs_to_insert: ObserverBase,
199
+ observer_args: Tuple,
200
+ insert_post: bool
201
+ ):
202
+ r"""
203
+ Helper function that inserts the observer into both the graph structure and the module of the model
204
+
205
+ Args
206
+ node_fqn (str): The fully qualified name of the observer we want to insert
207
+ target_node (torch.fx.node.Node): The node in model we are inserting observers around
208
+ obs_to_insert (ObserverBase): The observer we are inserting around target_node
209
+ observer_args (Tuple): The arguments we want to pass into the observer
210
+ insert_post (bool): whether this is meant to be a post observer for this node
211
+ """
212
+ # if we are inserting post, then our target node is the next node
213
+ if insert_post:
214
+ target_node = target_node.next
215
+
216
+ with self._model.graph.inserting_before(target_node):
217
+ self._model.add_submodule(obs_fqn, obs_to_insert)
218
+ self._model.graph.create_node(op="call_module", target=obs_fqn, args=observer_args)
219
+
220
+ # recompile model after inserts are made
221
+ self._model.recompile()
222
+
223
+ def _get_node_from_fqn(self, node_fqn: str) -> torch.fx.node.Node:
224
+ r"""
225
+ Takes in a node fqn and returns the node based on the fqn
226
+
227
+ Args
228
+ node_fqn (str): The fully qualified name of the node we want to find in model
229
+
230
+ Returns the Node object of the given node_fqn otherwise returns None
231
+ """
232
+ node_to_return = None
233
+ for node in self._model.graph.nodes:
234
+ # if the target matches the fqn, it's the node we are looking for
235
+ if node.target == node_fqn:
236
+ node_to_return = node
237
+ break
238
+
239
+ if node_to_return is None:
240
+ raise ValueError("The node_fqn is was not found within the module.")
241
+
242
+ # assert for MyPy
243
+ assert isinstance(node_to_return, torch.fx.node.Node)
244
+
245
+ return node_to_return
246
+
247
+ def generate_model_report(
248
+ self, remove_inserted_observers: bool
249
+ ) -> Dict[str, Tuple[str, Dict]]:
250
+ r"""
251
+ Generates all the requested reports.
252
+
253
+ Note:
254
+ You should have callibrated the model with relevant data before calling this
255
+
256
+ The reports generated are specified by the desired_reports specified in desired_reports
257
+
258
+ Can optionally remove all the observers inserted by the ModelReport instance
259
+
260
+ Args:
261
+ remove_inserted_observers (bool): True to remove the observers inserted by this ModelReport instance
262
+
263
+ Returns a mapping of each desired report name to a tuple with:
264
+ The textual summary of that report information
265
+ A dictionary containing relevant statistics or information for that report
266
+
267
+ Note:
268
+ Throws exception if we try to generate report on model we already removed observers from
269
+ Throws exception if we try to generate report without preparing for callibration
270
+ """
271
+ # if we haven't prepped model for callibration, then we shouldn't generate report yet
272
+ if not self._prepared_flag:
273
+ raise Exception("Cannot generate report without preparing model for callibration") # noqa: TRY002
274
+
275
+ # if we already removed the observers, we cannot generate report
276
+ if self._removed_observers:
277
+ raise Exception("Cannot generate report on model you already removed observers from") # noqa: TRY002
278
+
279
+ # keep track of all the reports of interest and their outputs
280
+ reports_of_interest = {}
281
+
282
+ for detector in self._desired_report_detectors:
283
+ # generate the individual report for the detector
284
+ report_output = detector.generate_detector_report(self._model)
285
+ reports_of_interest[detector.get_detector_name()] = report_output
286
+
287
+ # if user wishes to remove inserted observers, go ahead and remove
288
+ if remove_inserted_observers:
289
+ self._removed_observers = True
290
+ # get the set of all Observers inserted by this instance of ModelReport
291
+ all_observers_of_interest: Set[str] = set()
292
+ for desired_report in self._detector_name_to_observer_fqns:
293
+ observers_of_interest = self._detector_name_to_observer_fqns[desired_report]
294
+ all_observers_of_interest.update(observers_of_interest)
295
+
296
+ # go through all_observers_of_interest and remove them from the graph and model
297
+ for observer_fqn in all_observers_of_interest:
298
+ # remove the observer from the model
299
+ self._model.delete_submodule(observer_fqn)
300
+
301
+ # remove the observer from the graph structure
302
+ node_obj = self._get_node_from_fqn(observer_fqn)
303
+
304
+ if node_obj:
305
+ self._model.graph.erase_node(node_obj)
306
+ else:
307
+ raise ValueError("Node no longer exists in GraphModule structure")
308
+
309
+ # remember to recompile the model
310
+ self._model.recompile()
311
+
312
+ # save the generated reports for visualization purposes
313
+ saved_reports: Dict[str, Dict] = {
314
+ report_name : report_tuple[1] for report_name, report_tuple in reports_of_interest.items()
315
+ }
316
+
317
+ self._generated_reports = saved_reports
318
+
319
+ # return the reports of interest
320
+ return reports_of_interest
321
+
322
+ def _is_same_info_for_same_key(self, info_dict_a: Dict, info_dict_b: Dict) -> bool:
323
+ r"""
324
+ Takes in two dictionaries and ensures that any common keys between the two have the same
325
+ values.
326
+
327
+ Args:
328
+ info_dict_a (Dict): First dictionary we wish to compare
329
+ info_dict_b (Dict): Second dictionary we wish to compare
330
+
331
+ Returns True if all shared keys have same values, false otherwise
332
+ """
333
+ # get the set of keys for both
334
+ dict_a_keys: Set = set(info_dict_a.keys())
335
+ dict_b_keys: Set = set(info_dict_b.keys())
336
+
337
+ # get the insersection keys and check if same value for both dicts
338
+ intersecting_keys: Set = dict_a_keys.intersection(dict_b_keys)
339
+
340
+ for key in intersecting_keys:
341
+ dict_a_val = info_dict_a[key]
342
+ dict_b_val = info_dict_b[key]
343
+
344
+ # if it's a tensor we have to handle separately
345
+ if type(dict_a_val) == torch.Tensor:
346
+ # if dict_b_val not tensor, automatically false
347
+ if type(dict_b_val) != torch.Tensor or sum(dict_a_val != dict_b_val) != 0:
348
+ return False
349
+ else:
350
+ # for non-tensor vals
351
+ if dict_a_val != dict_b_val:
352
+ return False
353
+
354
+ # if no non matching shared keys found, return true
355
+ return True
356
+
357
+ def _reformat_reports_for_visualizer(self) -> OrderedDict:
358
+ r"""
359
+ Takes the generated reports and reformats them into the format that is desired by the
360
+ ModelReportVisualizer
361
+
362
+ Returns an OrderedDict mapping module_fqns to their features
363
+ """
364
+ # we want to reorder and reformat the information so it is ordered in terms of order
365
+ # found in the model
366
+
367
+ # first create new dict with all modules as keys and features under respective module
368
+ module_fqns_to_features: Dict[str, Dict] = {}
369
+
370
+ for report_name in self._generated_reports:
371
+ # get mod -> feature dict and go through
372
+ module_info = self._generated_reports[report_name]
373
+
374
+ for module_fqn in module_info:
375
+ # check if already in our accumulation dict
376
+ if module_fqn in module_fqns_to_features:
377
+ # we merge all the features together
378
+ new_info: Dict = module_info[module_fqn]
379
+ present_info: Dict = module_fqns_to_features[module_fqn]
380
+
381
+ # merge them together into the new unioned dict
382
+ # same features keys -> same info, so okay if override
383
+
384
+ # do safety check to make sure shared keys have same info
385
+ if self._is_same_info_for_same_key(new_info, present_info):
386
+ module_fqns_to_features[module_fqn] = {**new_info, **present_info}
387
+ else:
388
+ error_str = "You have the same key with different values across detectors. "
389
+ error_str += "Someone incorrectly implemented a detector with conflicting keys to existing detectors."
390
+ raise ValueError(error_str)
391
+ else:
392
+ # we just set it
393
+ module_fqns_to_features[module_fqn] = module_info[module_fqn]
394
+
395
+ # our ordered dict so that modules can be ordered in order of how they appear in model
396
+ features_by_module: OrderedDict[str, Dict] = OrderedDict()
397
+
398
+ # we loop through modules in graph in order
399
+ for fqn, module in self._model.named_modules():
400
+ # find that fqn in fqns_to_features
401
+ if fqn in module_fqns_to_features:
402
+ # add it to our ordered dict
403
+ features_by_module[fqn] = module_fqns_to_features[fqn]
404
+
405
+ # return the ordered dict of info we created
406
+ return features_by_module
407
+
408
+ def generate_visualizer(self) -> ModelReportVisualizer:
409
+ r"""
410
+ Generates a ModelReportVisualizer instance using the reports generated
411
+ by the generate_model_report() method.
412
+
413
+ Returns the generated ModelReportVisualizer instance initialized
414
+
415
+ Note:
416
+ Throws exception if attempt to get visualizers without generating report
417
+ """
418
+ # check if user has generated reports at least once
419
+ if len(self._generated_reports) == 0:
420
+ raise Exception("Unable to generate visualizers without first generating reports") # noqa: TRY002
421
+
422
+ # get the ordered dict mapping modules to their full set of collected features / stats
423
+ module_fqns_to_features: OrderedDict = self._reformat_reports_for_visualizer()
424
+
425
+ # create and return ModelReportVisualizer instance
426
+ visualizer: ModelReportVisualizer = ModelReportVisualizer(module_fqns_to_features)
427
+
428
+ return visualizer
429
+
430
+ def _generate_qconfig_mapping_helper(
431
+ self,
432
+ detector_qconfig_info_combined: Dict[str, DetectorQConfigInfo],
433
+ generation_function: Callable
434
+ ) -> QConfigMapping:
435
+ r"""
436
+ This helper takes in the compiled detector qconfig info that
437
+ has been compiled together and merges it into a QConfigMapping
438
+ """
439
+ # keep track of the qconfigmapping
440
+ qconfig_mapping = QConfigMapping()
441
+
442
+ # loop through each module / fqn and attempt to create QConfigMapping
443
+ for fqn, module in self._model.named_modules():
444
+ # if we have a qconfig info for this module
445
+ if fqn in detector_qconfig_info_combined:
446
+ qconfig_info_compiled = detector_qconfig_info_combined[fqn]
447
+
448
+ # now generate the qconfig and add it to the mapping
449
+ generated_qconfig = generation_function(qconfig_info_compiled, module)
450
+
451
+ # add to our config
452
+ qconfig_mapping.set_module_name(fqn, generated_qconfig)
453
+
454
+ # return compiled mapping
455
+ return qconfig_mapping
456
+
457
+ def _update_detector_quantizaiton_qconfig_info(self, combined_info: DetectorQConfigInfo, new_info: DetectorQConfigInfo):
458
+ r"""
459
+ Takes in the old and new information and updates the combined information.
460
+
461
+ Args:
462
+ combined_info (DetectorQConfigInfo): The DetectorQConfigInfo we are compiling all of the information in
463
+ new_info (DetectorQConfigInfo): The DetectorQConfigInfo with the information we are trying to merge the new info
464
+ into it
465
+ """
466
+ combined_info.is_activation_dynamic = combined_info.is_activation_dynamic or new_info.is_activation_dynamic
467
+ combined_info.is_weight_per_channel = combined_info.is_weight_per_channel or new_info.is_weight_per_channel
468
+
469
+ def _update_detector_equalization_qconfig_info(self, combined_info: DetectorQConfigInfo, new_info: DetectorQConfigInfo):
470
+ r"""
471
+ Takes in the old and new information and updates the combined information.
472
+
473
+ Args:
474
+ combined_info (DetectorQConfigInfo): The DetectorQConfigInfo we are compiling all of the information in
475
+ new_info (DetectorQConfigInfo): The DetectorQConfigInfo with the information we are trying to merge the new info
476
+ into it
477
+ """
478
+ is_equalization_recommended = combined_info.is_equalization_recommended or new_info.is_equalization_recommended
479
+ combined_info.is_equalization_recommended = is_equalization_recommended
480
+
481
+ def _generate_module_fqn_to_detector_info_mapping(
482
+ self,
483
+ update_qconfig_info_function: Callable
484
+ ) -> Dict[str, DetectorQConfigInfo]:
485
+ r"""
486
+ Generates a QConfigMapping based on the suggestions of the
487
+ ModelReport API. The generated mapping encompasses all the
488
+ different types of feedback from the different detectors
489
+ all into one place.
490
+
491
+ These configs are based on the suggestions provided by the ModelReport API
492
+ and can only be generated once the reports have been generated.
493
+
494
+ Args:
495
+ update_qconfig_info_function (Callable) takes in a function that takes in two DetectorQConfigInfo
496
+ and updates the one that is being compiled
497
+
498
+ Returns a Dict mapping module_fqns to DetectorQConfigInfo objects
499
+
500
+ Note:
501
+ Throws exception if we try to generate mapping on model we already removed observers from
502
+ Throws exception if we try to generate mapping without preparing for callibration
503
+ """
504
+ # if we haven't prepped model for callibration, then we shouldn't generate mapping yet
505
+ if not self._prepared_flag:
506
+ raise Exception("Cannot generate report without preparing model for callibration") # noqa: TRY002
507
+
508
+ # if we already removed the observers, we cannot mapping
509
+ if self._removed_observers:
510
+ raise Exception("Cannot generate report on model you already removed observers from") # noqa: TRY002
511
+
512
+ # keep track of qconfig info for each module across detectors
513
+ detector_qconfig_info_combined: Dict[str, DetectorQConfigInfo] = {}
514
+
515
+ for detector in self._desired_report_detectors:
516
+ # get the info from the detector
517
+ detector_info: Dict[str, DetectorQConfigInfo] = detector.get_qconfig_info(self._model)
518
+
519
+ # we go through the modules
520
+ for module_fqn in detector_info:
521
+ # see if we already have info on it
522
+ if module_fqn in detector_qconfig_info_combined:
523
+ # we combine the current options with what is there
524
+ current_options = detector_qconfig_info_combined[module_fqn]
525
+ detector_options = detector_info[module_fqn]
526
+
527
+ update_qconfig_info_function(current_options, detector_options)
528
+ else:
529
+ # we just use this for now
530
+ detector_qconfig_info_combined[module_fqn] = detector_info[module_fqn]
531
+
532
+ return detector_qconfig_info_combined
533
+
534
+ def generate_qconfig_mapping(self) -> QConfigMapping:
535
+ r"""
536
+ Generates a QConfigMapping based on the suggestions of the
537
+ ModelReport API. The generated mapping encompasses all the
538
+ different types of feedback from the different detectors
539
+ all into one place.
540
+
541
+ These configs are based on the suggestions provided by the ModelReport API
542
+ and can only be generated once the reports have been generated.
543
+
544
+ Returns a QConfigMapping for the quantization configuration
545
+
546
+ Note:
547
+ Throws exception if we try to generate mapping on model we already removed observers from
548
+ Throws exception if we try to generate mapping without preparing for callibration
549
+ """
550
+ # get the mapping info
551
+ detector_qconfig_info_combined = self._generate_module_fqn_to_detector_info_mapping(
552
+ self._update_detector_quantizaiton_qconfig_info
553
+ )
554
+
555
+ # we will do a bit of processing and remove fqns that don't have input weight recommended
556
+
557
+ # now we generate the QConfig for each of the options
558
+ mapping: QConfigMapping = self._generate_qconfig_mapping_helper(
559
+ detector_qconfig_info_combined,
560
+ self._quantization_config_generator
561
+ )
562
+
563
+ # return the generated mapping
564
+ return mapping
565
+
566
+ def _quantization_config_generator(self, detector_qconfig_info: DetectorQConfigInfo, module: torch.nn.Module) -> QConfig:
567
+ r"""
568
+ Returns the quantization configuration generated by the DetectorQConfigInfo object
569
+ """
570
+ return detector_qconfig_info.generate_quantization_qconfig(module)
571
+
572
+ def _equalization_config_generator(
573
+ self,
574
+ detector_qconfig_info: DetectorQConfigInfo,
575
+ module: torch.nn.Module
576
+ ) -> EqualizationQConfig:
577
+ r"""
578
+ We ignore the module argument here, and only focus on thedetector_qconfig_info
579
+
580
+ Returns the equalization configuration generated by the DetectorQConfigInfo object
581
+ """
582
+ return detector_qconfig_info.generate_equalization_qconfig()
583
+
584
+ def generate_equalization_mapping(self) -> QConfigMapping:
585
+ r"""
586
+ Generates a QConfigMapping based on the suggestions of the
587
+ ModelReport API for equalization. The generated mapping encompasses all the
588
+ different types of feedback from the input-weight equalization detector.
589
+
590
+ These configs are based on the suggestions provided by the ModelReport API
591
+ and can only be generated once the reports have been generated.
592
+
593
+ Returns a QConfigMapping for the equalization configuration
594
+ """
595
+ # get the mapping info
596
+ detector_qconfig_info_combined = self._generate_module_fqn_to_detector_info_mapping(
597
+ self._update_detector_equalization_qconfig_info
598
+ )
599
+
600
+ # now we generate the QConfig for each of the options
601
+ mapping: QConfigMapping = self._generate_qconfig_mapping_helper(
602
+ detector_qconfig_info_combined,
603
+ self._equalization_config_generator
604
+ )
605
+
606
+ # return the generated mapping
607
+ return mapping
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/model_report_observer.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ from torch.ao.quantization.observer import ObserverBase
4
+
5
+
6
+ class ModelReportObserver(ObserverBase):
7
+ r"""This observer is used to record additional information regarding keeping track
8
+ of S = average_batch_activation_range/epoch_activation_range.
9
+
10
+ The purpose of this information is to prepare a report to present to users on whether
11
+ Dynamic or Static Quantization is more appropriate for their model given the general
12
+ distributions of their data.
13
+
14
+ Args:
15
+ ch_axis (int, optional): The channel axis for which the range and outlier stats are computed
16
+ Default: 1
17
+ comp_percentile (float, optional): The percentile to compare against 100 percentile to find outliers
18
+ Should be between 0 and 1 exclusive
19
+ Default: 0.9
20
+
21
+ * :attr:`num_batches_tracked` specifies number of batches passed through the observer
22
+
23
+ * :attr:`average_batch_activation_range` defines average across the ranges of each batch passed through
24
+
25
+ * :attr:`epoch_activation_min` defines the minimum value passed through the observer
26
+
27
+ * :attr:`epoch_activation_max` defines the maximum value passed through the observer
28
+
29
+ * :attr:`ch_axis` defines the channel being used to compute per channel min max stats
30
+
31
+ * :attr:`min_val` defines the per channel minimum values passed through
32
+
33
+ * :attr:`max_val` defines the per channel maximum values passed through
34
+
35
+ * :attr:`comp_percentile` defines comparison percentile to find outliers
36
+
37
+ * :attr:`average_percentile_ratio` defines the per channel average percentile ratios
38
+
39
+ * :attr:`percentile_batches_tracked` defines the number of percentile batches tracked for each channel
40
+
41
+ * :attr:`constant_channels` defines the number of batches that aren't constant channels per channel
42
+
43
+ Note: this tool is meant for FX Graph Mode Quantization
44
+ """
45
+
46
+ epoch_activation_min: torch.Tensor
47
+ epoch_activation_max: torch.Tensor
48
+ min_val: torch.Tensor
49
+ max_val: torch.Tensor
50
+ comp_percentile: torch.Tensor
51
+ average_percentile_ratio: torch.Tensor
52
+ percentile_batches_tracked: torch.Tensor
53
+ constant_channels: torch.Tensor
54
+
55
+ def __init__(self, ch_axis: int = 1, comp_percentile: float = 0.9):
56
+ super().__init__(torch.qint8)
57
+ self.num_batches_tracked = 0
58
+
59
+ # keep track of the min and mix of the range for average batch and epoch as a whole
60
+ self.average_batch_activation_range: torch.Tensor = torch.tensor(float(0))
61
+ self.register_buffer("epoch_activation_min", torch.tensor(float("inf")))
62
+ self.register_buffer("epoch_activation_max", torch.tensor(float("-inf")))
63
+
64
+ # keep track of per channel min max information using the given channel
65
+ self.ch_axis: int = ch_axis
66
+ self.register_buffer("min_val", torch.tensor([]))
67
+ self.register_buffer("max_val", torch.tensor([]))
68
+
69
+ # keep track of percentile ratio information per channel
70
+ self.register_buffer("comp_percentile", torch.tensor([comp_percentile]))
71
+ self.register_buffer("average_percentile_ratio", torch.tensor([]))
72
+ self.register_buffer("percentile_batches_tracked", torch.tensor([]))
73
+ self.register_buffer("constant_channels", torch.tensor([]))
74
+
75
+ def forward(self, x):
76
+ x_copy = x.detach() # avoid keeping autograd tape
77
+ x_copy = x_copy.to(self.epoch_activation_min.dtype)
78
+
79
+ x_copy = self._calculate_range_stats(x_copy)
80
+ x_copy = self._calculate_min_max_stats(x_copy)
81
+ x_copy = self._calculate_percentile_stats(x_copy)
82
+
83
+ # return the passed in the value
84
+ return x
85
+
86
+ def _calculate_range_stats(self, x_copy):
87
+ r"""Calculates and stores range stats with forward values.
88
+
89
+ Args
90
+ x_copy: A copy of the forward data
91
+
92
+ Returns the passed in x_copy
93
+ """
94
+ # get the min, max values of the data
95
+ min_val_cur, max_val_cur = torch.aminmax(x_copy)
96
+
97
+ # calculate new epoch range values
98
+ epoch_min_val = torch.min(self.epoch_activation_min, min_val_cur)
99
+ epoch_max_val = torch.max(self.epoch_activation_max, max_val_cur)
100
+
101
+ self.epoch_activation_min.copy_(epoch_min_val)
102
+ self.epoch_activation_max.copy_(epoch_max_val)
103
+
104
+ # calculate the average batch activation range
105
+ current_batch_range = max_val_cur - min_val_cur
106
+ new_range = (
107
+ self.average_batch_activation_range * self.num_batches_tracked
108
+ + current_batch_range
109
+ ) / (self.num_batches_tracked + 1)
110
+
111
+ self.average_batch_activation_range = new_range
112
+ self.num_batches_tracked += 1 # new batch was processed
113
+
114
+ return x_copy
115
+
116
+ def _calculate_min_max_stats(self, x_copy):
117
+ r"""Calculates and stores the per_channel min, max stats with forward values.
118
+ Does calculation based on channel axis: self.ch_axis
119
+
120
+ Args
121
+ x_copy: A copy of the forward data
122
+
123
+ Returns the passed in x_copy
124
+ """
125
+ # get the current min and max vals
126
+ min_val = self.min_val
127
+ max_val = self.max_val
128
+ x_dim = x_copy.size()
129
+
130
+ new_axis_list = [i for i in range(len(x_dim))] # noqa: C416
131
+ new_axis_list[self.ch_axis] = 0
132
+ new_axis_list[0] = self.ch_axis
133
+ y = x_copy.permute(new_axis_list)
134
+ # Need to match dtype of min/max because the updates to buffers
135
+ # are done in place and types need to match for comparisons
136
+ y = y.to(self.min_val.dtype)
137
+ y = torch.flatten(y, start_dim=1)
138
+ if min_val.numel() == 0 or max_val.numel() == 0:
139
+ min_val, max_val = torch.aminmax(y, dim=1)
140
+ else:
141
+ min_val_cur, max_val_cur = torch.aminmax(y, dim=1)
142
+ min_val = torch.min(min_val_cur, min_val)
143
+ max_val = torch.max(max_val_cur, max_val)
144
+
145
+ self.min_val.resize_(min_val.shape)
146
+ self.max_val.resize_(max_val.shape)
147
+ self.min_val.copy_(min_val)
148
+ self.max_val.copy_(max_val)
149
+
150
+ return x_copy
151
+
152
+ def _calculate_percentile_stats(self, x_copy):
153
+ r"""Calculates and stores the per_channel percentile stats with forward values.
154
+ Does calculation based on channel axis: self.ch_axis
155
+
156
+ Args
157
+ x_copy: A copy of the forward data
158
+
159
+ Returns the passed in x_copy
160
+ """
161
+ # get the dimension of the copy
162
+ x_dim = x_copy.size()
163
+
164
+ new_axis_list = [i for i in range(len(x_dim))] # noqa: C416
165
+ new_axis_list[self.ch_axis] = 0
166
+ new_axis_list[0] = self.ch_axis
167
+ y = x_copy.permute(new_axis_list)
168
+ # Need to match dtype of min/max because the updates to buffers
169
+ # are done in place and types need to match for comparisons
170
+ y = y.to(self.min_val.dtype)
171
+ y = torch.flatten(y, start_dim=1)
172
+ y = y.to(dtype=self.min_val.dtype, device="cpu")
173
+
174
+ # find the percentile values along the axis
175
+ # we want both 100th percentile and comp_percentile
176
+ # we also want to find 0th quartile to see if we have constant channel
177
+ quantiles_list = [0, self.comp_percentile, 1.00]
178
+ quantiles_to_find = torch.tensor(quantiles_list, dtype=self.min_val.dtype)
179
+
180
+ # find the quantiles
181
+ desired_quantiles = torch.quantile(y, quantiles_to_find, dim=self.ch_axis, interpolation="lower")
182
+ zero_quantile = desired_quantiles[0]
183
+ comp_quantile = desired_quantiles[1]
184
+ hundreth_quartile = desired_quantiles[2]
185
+
186
+ # if any of the channels have 0s, we ignore that channel for this calculation
187
+ any_non_zero_quantile_value: torch.Tensor = (comp_quantile != torch.tensor([0])) | (hundreth_quartile != torch.tensor([0]))
188
+ any_non_zero_quantile_value = any_non_zero_quantile_value.int() # transform boolean values to int values
189
+
190
+ # we also check if we have a constant channel
191
+ any_constant_channels: torch.Tensor = (hundreth_quartile - zero_quantile) == torch.tensor([0])
192
+ any_constant_channels = any_constant_channels.int() # transform boolean values to int values
193
+
194
+ # possibilities to get nan as an answer
195
+ # will ignore any of these three cases with 0s and just not deal with them for now
196
+ # case (1) 0 in numerator: issue if 0 is largest, all negative, and rest are really negative
197
+ # case (2) 0 in denominator: is possible unless case 3, we just ignore
198
+ # case (3) 0 in both: not outlier, channel just kinda useless, ignore
199
+
200
+ # get the ratio and get rid of nan values
201
+ quantile_ratios = hundreth_quartile / comp_quantile
202
+ quantile_ratios = torch.nan_to_num(quantile_ratios)
203
+ # update averages, remembering to only update if didn't have zeros
204
+ ratio_if_not_zero = any_non_zero_quantile_value * quantile_ratios
205
+
206
+ # if num_batches and average_ratio are not initialized, we want to initialize them
207
+ if self.percentile_batches_tracked.shape[0] == 0 or self.average_percentile_ratio.shape[0] == 0:
208
+ self.percentile_batches_tracked = torch.zeros_like(any_non_zero_quantile_value)
209
+ self.average_percentile_ratio = torch.zeros_like(ratio_if_not_zero)
210
+
211
+ # also initialize the constant channel var if that is not initialized separately
212
+ if self.constant_channels.shape[0] == 0:
213
+ self.constant_channels = torch.zeros_like(any_constant_channels)
214
+
215
+ # get current num batches and average ratio
216
+ num_batches = self.percentile_batches_tracked
217
+ average_ratio = self.average_percentile_ratio
218
+
219
+ # calculate new_number of batches, new_ratios, and get rid of nans because of 0 size batches
220
+ new_number_of_batches: torch.Tensor = num_batches + any_non_zero_quantile_value
221
+ new_ratios: torch.Tensor = ((average_ratio * num_batches) + ratio_if_not_zero) / new_number_of_batches
222
+ new_ratios = torch.nan_to_num(new_ratios)
223
+
224
+ # update the number of non-constant channels
225
+ new_constant_count: torch.Tensor = self.constant_channels + any_constant_channels
226
+
227
+ # update the values locally
228
+ self.percentile_batches_tracked.copy_(new_number_of_batches)
229
+ self.average_percentile_ratio.copy_(new_ratios)
230
+ self.constant_channels.copy_(new_constant_count)
231
+
232
+ return x_copy
233
+
234
+ @torch.jit.export
235
+ def get_batch_to_epoch_ratio(self):
236
+ epoch_activation_range = self.epoch_activation_max - self.epoch_activation_min
237
+
238
+ if epoch_activation_range == torch.tensor(float(0)):
239
+ raise ValueError("Range for Epoch is 0")
240
+ elif epoch_activation_range == torch.tensor(float("inf")):
241
+ raise ValueError(
242
+ "No data has been run through observer or infinity value present"
243
+ )
244
+ else:
245
+ return self.average_batch_activation_range / epoch_activation_range
246
+
247
+ @torch.jit.export
248
+ def reset_batch_and_epoch_values(self):
249
+ # set all the values back to their original defaults for a new epoch
250
+ # keep device
251
+ device = self.max_val.device
252
+ self.num_batches_tracked = 0
253
+ self.average_batch_activation_range = torch.tensor(float(0), device=device)
254
+ self.epoch_activation_min = torch.tensor(float("inf"), device=device)
255
+ self.epoch_activation_max = torch.tensor(float("-inf"), device=device)
256
+ self.min_val = torch.tensor([], device=device)
257
+ self.max_val = torch.tensor([], device=device)
258
+ self.average_percentile_ratio = torch.tensor([], device=device)
259
+ self.percentile_batches_tracked = torch.tensor([], device=device)
260
+ self.constant_channels = torch.tensor([], device=device)
261
+
262
+ @torch.jit.export
263
+ def calculate_qparams(self):
264
+ raise Exception( # noqa: TRY002
265
+ "calculate_qparams should not be called for ModelReportObserver"
266
+ )
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_model_report/model_report_visualizer.py ADDED
@@ -0,0 +1,667 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ from typing import Any, Set, Dict, List, Tuple, OrderedDict
4
+ from collections import OrderedDict as OrdDict
5
+
6
+ # try to import tablate
7
+ got_tabulate = True
8
+ try:
9
+ from tabulate import tabulate
10
+ except ImportError:
11
+ got_tabulate = False
12
+
13
+
14
+ # var to see if we could import matplotlib
15
+ got_matplotlib = True
16
+ try:
17
+ import matplotlib.pyplot as plt
18
+ except ImportError:
19
+ got_matplotlib = False
20
+
21
+ class ModelReportVisualizer:
22
+ r"""
23
+ The ModelReportVisualizer class aims to provide users a way to visualize some of the statistics
24
+ that were generated by the ModelReport API. However, at a higher level, the class aims to provide
25
+ some level of visualization of statistics to PyTorch in order to make it easier to parse data and
26
+ diagnose any potential issues with data or a specific model. With respect to the visualizations,
27
+ the ModelReportVisualizer class currently supports several methods of visualizing data.
28
+
29
+ Supported Visualization Methods Include:
30
+ - Table format
31
+ - Plot format (line graph)
32
+ - Histogram format
33
+
34
+ For all of the existing visualization methods, there is the option to filter data based on:
35
+ - A module fqn prefix
36
+ - Feature [required for the plot and histogram]
37
+
38
+ * :attr:`generated_reports` The reports generated by the ModelReport class in the structure below
39
+ Ensure sure that features that are the same across different report contain the same name
40
+ Ensure that objects representing the same features are the same type / dimension (where applicable)
41
+
42
+ Note:
43
+ Currently, the ModelReportVisualizer class supports visualization of data generated by the
44
+ ModelReport class. However, this structure is extensible and should allow the visualization of
45
+ other information as long as the information is structured in the following general format:
46
+
47
+ Report Structure
48
+ -- module_fqn [module with attached detectors]
49
+ |
50
+ -- feature keys [not every detector extracts same information]
51
+ [same collected info has same keys, unless can be specific to detector]
52
+
53
+
54
+ The goal behind the class is that the generated visualizations can be used in conjunction with the generated
55
+ report for people to get a better understanding of issues and what the fix might be. It is also just to provide
56
+ a good visualization platform, since it might be hard to parse through the ModelReport returned dictionary as
57
+ that grows in size.
58
+
59
+ General Use Flow Expected
60
+ 1.) Initialize ModelReport object with reports of interest by passing in initialized detector objects
61
+ 2.) Prepare your model with prepare_fx
62
+ 3.) Call model_report.prepare_detailed_calibration on your model to add relevant observers
63
+ 4.) Callibrate your model with data
64
+ 5.) Call model_report.generate_report on your model to generate report and optionally remove added observers
65
+ 6.) Use output of model_report.generate_report to initialize ModelReportVisualizer instance
66
+ 7.) Use instance to view different views of data as desired, applying filters as needed
67
+ 8.) Either see the super detailed information or just the actual printed or shown table / plot / histogram
68
+
69
+ """
70
+
71
+ # keys for table dict
72
+ TABLE_TENSOR_KEY = "tensor_level_info"
73
+ TABLE_CHANNEL_KEY = "channel_level_info"
74
+
75
+ # Constants for header vals
76
+ NUM_NON_FEATURE_TENSOR_HEADERS = 2
77
+ NUM_NON_FEATURE_CHANNEL_HEADERS = 3
78
+
79
+ # Constants for row index in header
80
+ CHANNEL_NUM_INDEX = 2
81
+
82
+ def __init__(self, generated_reports: OrderedDict[str, Any]):
83
+ r"""
84
+ Initializes the ModelReportVisualizer instance with the necessary reports.
85
+
86
+ Args:
87
+ generated_reports (Dict[str, Any]): The reports generated by the ModelReport class
88
+ can also be a dictionary generated in another manner, as long as format is same
89
+ """
90
+ self.generated_reports = generated_reports
91
+
92
+ def get_all_unique_module_fqns(self) -> Set[str]:
93
+ r"""
94
+ The purpose of this method is to provide a user the set of all module_fqns so that if
95
+ they wish to use some of the filtering capabilities of the ModelReportVisualizer class,
96
+ they don't need to manually parse the generated_reports dictionary to get this information.
97
+
98
+ Returns all the unique module fqns present in the reports the ModelReportVisualizer
99
+ instance was initialized with.
100
+ """
101
+ # returns the keys of the ordered dict
102
+ return set(self.generated_reports.keys())
103
+
104
+ def get_all_unique_feature_names(self, plottable_features_only: bool = True) -> Set[str]:
105
+ r"""
106
+ The purpose of this method is to provide a user the set of all feature names so that if
107
+ they wish to use the filtering capabilities of the generate_table_view(), or use either of
108
+ the generate_plot_view() or generate_histogram_view(), they don't need to manually parse
109
+ the generated_reports dictionary to get this information.
110
+
111
+ Args:
112
+ plottable_features_only (bool): True if the user is only looking for plottable features,
113
+ False otherwise
114
+ plottable features are those that are tensor values
115
+ Default: True (only return those feature names that are plottable)
116
+
117
+ Returns all the unique module fqns present in the reports the ModelReportVisualizer
118
+ instance was initialized with.
119
+ """
120
+ unique_feature_names = set()
121
+ for module_fqn in self.generated_reports:
122
+ # get dict of the features
123
+ feature_dict: Dict[str, Any] = self.generated_reports[module_fqn]
124
+
125
+ # loop through features
126
+ for feature_name in feature_dict:
127
+ # if we need plottable, ensure type of val is tensor
128
+ if not plottable_features_only or type(feature_dict[feature_name]) == torch.Tensor:
129
+ unique_feature_names.add(feature_name)
130
+
131
+ # return our compiled set of unique feature names
132
+ return unique_feature_names
133
+
134
+ def _get_filtered_data(self, feature_filter: str, module_fqn_filter: str) -> OrderedDict[str, Any]:
135
+ r"""
136
+ Filters the data and returns it in the same ordered dictionary format so the relevant views can be displayed.
137
+
138
+ Args:
139
+ feature_filter (str): The feature filter, if we want to filter the set of data to only include
140
+ a certain set of features that include feature_filter
141
+ If feature = "", then we do not filter based on any features
142
+ module_fqn_filter (str): The filter on prefix for the module fqn. All modules that have fqn with
143
+ this prefix will be included
144
+ If module_fqn_filter = "" we do not filter based on module fqn, and include all modules
145
+
146
+ First, the data is filtered based on module_fqn, and then filtered based on feature
147
+ Returns an OrderedDict (sorted in order of model) mapping:
148
+ module_fqns -> feature_names -> values
149
+ """
150
+ # create return dict
151
+ filtered_dict: OrderedDict[str, Any] = OrdDict()
152
+
153
+ for module_fqn in self.generated_reports:
154
+ # first filter based on module
155
+ if module_fqn_filter == "" or module_fqn_filter in module_fqn:
156
+ # create entry for module and loop through features
157
+ filtered_dict[module_fqn] = {}
158
+ module_reports = self.generated_reports[module_fqn]
159
+ for feature_name in module_reports:
160
+ # check if filtering on features and do so if desired
161
+ if feature_filter == "" or feature_filter in feature_name:
162
+ filtered_dict[module_fqn][feature_name] = module_reports[feature_name]
163
+
164
+ # we have populated the filtered dict, and must return it
165
+
166
+ return filtered_dict
167
+
168
+ def _generate_tensor_table(
169
+ self,
170
+ filtered_data: OrderedDict[str, Dict[str, Any]],
171
+ tensor_features: List[str]
172
+ ) -> Tuple[List, List]:
173
+ r"""
174
+ Takes in the filtered data and features list and generates the tensor headers and table
175
+
176
+ Currently meant to generate the headers and table for both the tensor information.
177
+
178
+ Args:
179
+ filtered_data (OrderedDict[str, Dict[str, Any]]): An OrderedDict (sorted in order of model) mapping:
180
+ module_fqns -> feature_names -> values
181
+ tensor_features (List[str]): A list of the tensor level features
182
+
183
+ Returns a tuple with:
184
+ A list of the headers of the tensor table
185
+ A list of lists containing the table information row by row
186
+ The 0th index row will contain the headers of the columns
187
+ The rest of the rows will contain data
188
+ """
189
+ # now we compose the tensor information table
190
+ tensor_table: List[List[Any]] = []
191
+ tensor_headers: List[str] = []
192
+
193
+ # append the table row to the table only if we have features
194
+ if len(tensor_features) > 0:
195
+ # now we add all the data
196
+ for index, module_fqn in enumerate(filtered_data):
197
+ # we make a new row for the tensor table
198
+ tensor_table_row = [index, module_fqn]
199
+ for feature in tensor_features:
200
+ # we iterate in same order of added features
201
+
202
+ if feature in filtered_data[module_fqn]:
203
+ # add value if applicable to module
204
+ feature_val = filtered_data[module_fqn][feature]
205
+ else:
206
+ # add that it is not applicable
207
+ feature_val = "Not Applicable"
208
+
209
+ # if it's a tensor we want to extract val
210
+ if isinstance(feature_val, torch.Tensor):
211
+ feature_val = feature_val.item()
212
+
213
+ # we add to our list of values
214
+ tensor_table_row.append(feature_val)
215
+
216
+ tensor_table.append(tensor_table_row)
217
+
218
+ # add row of headers of we actually have something, otherwise just empty
219
+ if len(tensor_table) != 0:
220
+ tensor_headers = ["idx", "layer_fqn"] + tensor_features
221
+
222
+ return (tensor_headers, tensor_table)
223
+
224
+ def _generate_channels_table(
225
+ self,
226
+ filtered_data: OrderedDict[str, Any],
227
+ channel_features: List[str],
228
+ num_channels: int
229
+ ) -> Tuple[List, List]:
230
+ r"""
231
+ Takes in the filtered data and features list and generates the channels headers and table
232
+
233
+ Currently meant to generate the headers and table for both the channels information.
234
+
235
+ Args:
236
+ filtered_data (OrderedDict[str, Any]): An OrderedDict (sorted in order of model) mapping:
237
+ module_fqns -> feature_names -> values
238
+ channel_features (List[str]): A list of the channel level features
239
+ num_channels (int): Number of channels in the channel data
240
+
241
+ Returns a tuple with:
242
+ A list of the headers of the channel table
243
+ A list of lists containing the table information row by row
244
+ The 0th index row will contain the headers of the columns
245
+ The rest of the rows will contain data
246
+ """
247
+ # now we compose the table for the channel information table
248
+ channel_table: List[List[Any]] = []
249
+ channel_headers: List[str] = []
250
+
251
+ # counter to keep track of number of entries in
252
+ channel_table_entry_counter: int = 0
253
+
254
+ if len(channel_features) > 0:
255
+ # now we add all channel data
256
+ for module_fqn in filtered_data:
257
+ # we iterate over all channels
258
+ for channel in range(num_channels):
259
+ # we make a new row for the channel
260
+ new_channel_row = [channel_table_entry_counter, module_fqn, channel]
261
+ for feature in channel_features:
262
+ if feature in filtered_data[module_fqn]:
263
+ # add value if applicable to module
264
+ feature_val = filtered_data[module_fqn][feature][channel]
265
+ else:
266
+ # add that it is not applicable
267
+ feature_val = "Not Applicable"
268
+
269
+ # if it's a tensor we want to extract val
270
+ if type(feature_val) is torch.Tensor:
271
+ feature_val = feature_val.item()
272
+
273
+ # add value to channel specific row
274
+ new_channel_row.append(feature_val)
275
+
276
+ # add to table and increment row index counter
277
+ channel_table.append(new_channel_row)
278
+ channel_table_entry_counter += 1
279
+
280
+ # add row of headers of we actually have something, otherwise just empty
281
+ if len(channel_table) != 0:
282
+ channel_headers = ["idx", "layer_fqn", "channel"] + channel_features
283
+
284
+ return (channel_headers, channel_table)
285
+
286
+ def generate_filtered_tables(self, feature_filter: str = "", module_fqn_filter: str = "") -> Dict[str, Tuple[List, List]]:
287
+ r"""
288
+ Takes in optional filter values and generates two tables with desired information.
289
+
290
+ The generated tables are presented in both a list-of-lists format
291
+
292
+ The reason for the two tables are that they handle different things:
293
+ 1.) the first table handles all tensor level information
294
+ 2.) the second table handles and displays all channel based information
295
+
296
+ The reasoning for this is that having all the info in one table can make it ambiguous which collected
297
+ statistics are global, and which are actually per-channel, so it's better to split it up into two
298
+ tables. This also makes the information much easier to digest given the plethora of statistics collected
299
+
300
+ Tensor table columns:
301
+ idx layer_fqn feature_1 feature_2 feature_3 .... feature_n
302
+ ---- --------- --------- --------- --------- ---------
303
+
304
+ Per-Channel table columns:
305
+ idx layer_fqn channel feature_1 feature_2 feature_3 .... feature_n
306
+ ---- --------- ------- --------- --------- --------- ---------
307
+
308
+ Args:
309
+ feature_filter (str, optional): Filters the features presented to only those that
310
+ contain this filter substring
311
+ Default = "", results in all the features being printed
312
+ module_fqn_filter (str, optional): Only includes modules that contains this string
313
+ Default = "", results in all the modules in the reports to be visible in the table
314
+
315
+ Returns a dictionary with two keys:
316
+ (Dict[str, Tuple[List, List]]) A dict containing two keys:
317
+ "tensor_level_info", "channel_level_info"
318
+ Each key maps to a tuple with:
319
+ A list of the headers of each table
320
+ A list of lists containing the table information row by row
321
+ The 0th index row will contain the headers of the columns
322
+ The rest of the rows will contain data
323
+
324
+ Example Use:
325
+ >>> # xdoctest: +SKIP("undefined variables")
326
+ >>> mod_report_visualizer.generate_filtered_tables(
327
+ ... feature_filter = "per_channel_min",
328
+ ... module_fqn_filter = "block1"
329
+ ... ) # generates table with per_channel_min info for all modules in block 1 of the model
330
+ """
331
+ # first get the filtered data
332
+ filtered_data: OrderedDict[str, Any] = self._get_filtered_data(feature_filter, module_fqn_filter)
333
+
334
+ # now we split into tensor and per-channel data
335
+ tensor_features: Set[str] = set()
336
+ channel_features: Set[str] = set()
337
+
338
+ # keep track of the number of channels we have
339
+ num_channels: int = 0
340
+
341
+ for module_fqn in filtered_data:
342
+ for feature_name in filtered_data[module_fqn]:
343
+ # get the data for that specific feature
344
+ feature_data = filtered_data[module_fqn][feature_name]
345
+
346
+ # check if not zero dim tensor
347
+ is_tensor: bool = isinstance(feature_data, torch.Tensor)
348
+ is_not_zero_dim: bool = is_tensor and len(feature_data.shape) != 0
349
+
350
+ if is_not_zero_dim or isinstance(feature_data, list):
351
+ # works means per channel
352
+ channel_features.add(feature_name)
353
+ num_channels = len(feature_data)
354
+ else:
355
+ # means is per-tensor
356
+ tensor_features.add(feature_name)
357
+
358
+ # we make them lists for iteration purposes
359
+ tensor_features_list: List[str] = sorted(tensor_features)
360
+ channel_features_list: List[str] = sorted(channel_features)
361
+
362
+ # get the tensor info
363
+ tensor_headers, tensor_table = self._generate_tensor_table(filtered_data, tensor_features_list)
364
+
365
+ # get the channel info
366
+ channel_headers, channel_table = self._generate_channels_table(
367
+ filtered_data, channel_features_list, num_channels
368
+ )
369
+
370
+ # let's now create the dictionary to return
371
+ table_dict = {
372
+ self.TABLE_TENSOR_KEY : (tensor_headers, tensor_table),
373
+ self.TABLE_CHANNEL_KEY : (channel_headers, channel_table)
374
+ }
375
+
376
+ # return the two tables
377
+ return table_dict
378
+
379
+ def generate_table_visualization(self, feature_filter: str = "", module_fqn_filter: str = ""):
380
+ r"""
381
+ Takes in optional filter values and prints out formatted tables of the information.
382
+
383
+ The reason for the two tables printed out instead of one large one are that they handle different things:
384
+ 1.) the first table handles all tensor level information
385
+ 2.) the second table handles and displays all channel based information
386
+
387
+ The reasoning for this is that having all the info in one table can make it ambiguous which collected
388
+ statistics are global, and which are actually per-channel, so it's better to split it up into two
389
+ tables. This also makes the information much easier to digest given the plethora of statistics collected
390
+
391
+ Tensor table columns:
392
+ idx layer_fqn feature_1 feature_2 feature_3 .... feature_n
393
+ ---- --------- --------- --------- --------- ---------
394
+
395
+ Per-Channel table columns:
396
+
397
+ idx layer_fqn channel feature_1 feature_2 feature_3 .... feature_n
398
+ ---- --------- ------- --------- --------- --------- ---------
399
+
400
+ Args:
401
+ feature_filter (str, optional): Filters the features presented to only those that
402
+ contain this filter substring
403
+ Default = "", results in all the features being printed
404
+ module_fqn_filter (str, optional): Only includes modules that contains this string
405
+ Default = "", results in all the modules in the reports to be visible in the table
406
+
407
+ Example Use:
408
+ >>> # xdoctest: +SKIP("undefined variables")
409
+ >>> mod_report_visualizer.generate_table_visualization(
410
+ ... feature_filter = "per_channel_min",
411
+ ... module_fqn_filter = "block1"
412
+ ... )
413
+ >>> # prints out neatly formatted table with per_channel_min info
414
+ >>> # for all modules in block 1 of the model
415
+ """
416
+ # see if we got tabulate
417
+ if not got_tabulate:
418
+ print("Make sure to install tabulate and try again.")
419
+ return None
420
+
421
+ # get the table dict and the specific tables of interest
422
+ table_dict = self.generate_filtered_tables(feature_filter, module_fqn_filter)
423
+ tensor_headers, tensor_table = table_dict[self.TABLE_TENSOR_KEY]
424
+ channel_headers, channel_table = table_dict[self.TABLE_CHANNEL_KEY]
425
+
426
+ # get the table string and print it out
427
+ # now we have populated the tables for each one
428
+ # let's create the strings to be returned
429
+ table_str = ""
430
+ # the tables will have some headers columns that are non-feature
431
+ # ex. table index, module name, channel index, etc.
432
+ # we want to look at header columns for features, that come after those headers
433
+ if len(tensor_headers) > self.NUM_NON_FEATURE_TENSOR_HEADERS:
434
+ # if we have at least one tensor level feature to be added we add tensor table
435
+ table_str += "Tensor Level Information \n"
436
+ table_str += tabulate(tensor_table, headers=tensor_headers)
437
+ if len(channel_headers) > self.NUM_NON_FEATURE_CHANNEL_HEADERS:
438
+ # if we have at least one channel level feature to be added we add tensor table
439
+ table_str += "\n\n Channel Level Information \n"
440
+ table_str += tabulate(channel_table, headers=channel_headers)
441
+
442
+ # if no features at all, let user know
443
+ if table_str == "":
444
+ table_str = "No data points to generate table with."
445
+
446
+ print(table_str)
447
+
448
+ def _get_plottable_data(self, feature_filter: str, module_fqn_filter: str) -> Tuple[List, List[List], bool]:
449
+ r"""
450
+ Takes in the feature filters and module filters and outputs the x and y data for plotting
451
+
452
+ Args:
453
+ feature_filter (str): Filters the features presented to only those that
454
+ contain this filter substring
455
+ module_fqn_filter (str): Only includes modules that contains this string
456
+
457
+ Returns a tuple of three elements
458
+ The first is a list containing relevant x-axis data
459
+ The second is a list containing the corresponding y-axis data
460
+ If the data is per channel
461
+ """
462
+ # get the table dict and the specific tables of interest
463
+ table_dict = self.generate_filtered_tables(feature_filter, module_fqn_filter)
464
+ tensor_headers, tensor_table = table_dict[self.TABLE_TENSOR_KEY]
465
+ channel_headers, channel_table = table_dict[self.TABLE_CHANNEL_KEY]
466
+
467
+ # make sure it is only 1 feature that is being plotted
468
+ # get the number of features in each of these
469
+ tensor_info_features_count = len(tensor_headers) - ModelReportVisualizer.NUM_NON_FEATURE_TENSOR_HEADERS
470
+ channel_info_features_count = len(channel_headers) - ModelReportVisualizer.NUM_NON_FEATURE_CHANNEL_HEADERS
471
+
472
+ # see if valid tensor or channel plot
473
+ is_valid_per_tensor_plot: bool = tensor_info_features_count == 1
474
+ is_valid_per_channel_plot: bool = channel_info_features_count == 1
475
+
476
+ # offset should either be one of tensor or channel table or neither
477
+ feature_column_offset = ModelReportVisualizer.NUM_NON_FEATURE_TENSOR_HEADERS
478
+ table = tensor_table
479
+
480
+ # if a per_channel plot, we have different offset and table
481
+ if is_valid_per_channel_plot:
482
+ feature_column_offset = ModelReportVisualizer.NUM_NON_FEATURE_CHANNEL_HEADERS
483
+ table = channel_table
484
+
485
+ x_data: List = []
486
+ y_data: List[List] = []
487
+ # the feature will either be a tensor feature or channel feature
488
+ if is_valid_per_tensor_plot:
489
+ for table_row_num, row in enumerate(table):
490
+ # get x_value to append
491
+ x_val_to_append = table_row_num
492
+ # the index of the feature will the 0 + num non feature columns
493
+ tensor_feature_index = feature_column_offset
494
+ row_value = row[tensor_feature_index]
495
+ if not type(row_value) == str:
496
+ x_data.append(x_val_to_append)
497
+ y_data.append(row_value)
498
+ elif is_valid_per_channel_plot:
499
+ # gather the x_data and multiple y_data
500
+ # calculate the number of channels
501
+ num_channels: int = max(row[self.CHANNEL_NUM_INDEX] for row in table) + 1
502
+ for channel in range(num_channels):
503
+ y_data.append([]) # separate data list per channel
504
+
505
+ for table_row_num, row in enumerate(table):
506
+ # get x_value to append
507
+ x_val_to_append = table_row_num
508
+ current_channel = row[self.CHANNEL_NUM_INDEX] # initially chose current channel
509
+ new_module_index: int = table_row_num // num_channels
510
+ x_val_to_append = new_module_index
511
+
512
+ # the index of the feature will the 0 + num non feature columns
513
+ tensor_feature_index = feature_column_offset
514
+ row_value = row[tensor_feature_index]
515
+ if not type(row_value) == str:
516
+ # only append if new index we are appending
517
+ if len(x_data) == 0 or x_data[-1] != x_val_to_append:
518
+ x_data.append(x_val_to_append)
519
+
520
+ # append value for that channel
521
+ y_data[current_channel].append(row_value)
522
+ else:
523
+ # more than one feature was chosen
524
+ error_str = "Make sure to pick only a single feature with your filter to plot a graph."
525
+ error_str += " We recommend calling get_all_unique_feature_names() to find unique feature names."
526
+ error_str += " Pick one of those features to plot."
527
+ raise ValueError(error_str)
528
+
529
+ # return x, y values, and if data is per-channel
530
+ return (x_data, y_data, is_valid_per_channel_plot)
531
+
532
+ def generate_plot_visualization(self, feature_filter: str, module_fqn_filter: str = ""):
533
+ r"""
534
+ Takes in a feature and optional module_filter and plots of the desired data.
535
+
536
+ For per channel features, it averages the value across the channels and plots a point
537
+ per module. The reason for this is that for models with hundreds of channels, it can
538
+ be hard to differentiate one channel line from another, and so the point of generating
539
+ a single average point per module is to give a sense of general trends that encourage
540
+ further deep dives.
541
+
542
+ Note:
543
+ Only features in the report that have tensor value data are plottable by this class
544
+ When the tensor information is plotted, it will plot:
545
+ idx as the x val, feature value as the y_val
546
+ When the channel information is plotted, it will plot:
547
+ the first idx of each module as the x val, feature value as the y_val [for each channel]
548
+ The reason for this is that we want to be able to compare values across the
549
+ channels for same layer, and it will be hard if values are staggered by idx
550
+ This means each module is represented by only 1 x value
551
+ Args:
552
+ feature_filter (str): Filters the features presented to only those that
553
+ contain this filter substring
554
+ module_fqn_filter (str, optional): Only includes modules that contains this string
555
+ Default = "", results in all the modules in the reports to be visible in the table
556
+
557
+ Example Use:
558
+ >>> # xdoctest: +SKIP("undefined variables")
559
+ >>> mod_report_visualizer.generate_plot_visualization(
560
+ ... feature_filter = "per_channel_min",
561
+ ... module_fqn_filter = "block1"
562
+ ... )
563
+ >>> # outputs line plot of per_channel_min information for all
564
+ >>> # modules in block1 of model each channel gets it's own line,
565
+ >>> # and it's plotted across the in-order modules on the x-axis
566
+ """
567
+ # checks if we have matplotlib and let's user know to install it if don't
568
+ if not got_matplotlib:
569
+ print("make sure to install matplotlib and try again.")
570
+ return None
571
+
572
+ # get the x and y data and if per channel
573
+ x_data, y_data, data_per_channel = self._get_plottable_data(feature_filter, module_fqn_filter)
574
+
575
+ # plot based on whether data is per channel or not
576
+ ax = plt.subplot()
577
+ ax.set_ylabel(feature_filter)
578
+ ax.set_title(feature_filter + " Plot")
579
+ plt.xticks(x_data) # only show ticks for actual points
580
+
581
+ if data_per_channel:
582
+ ax.set_xlabel("First idx of module")
583
+ # set the legend as well
584
+ # plot a single line that is average of the channel values
585
+ num_modules = len(y_data[0]) # all y_data have same length, so get num modules
586
+ num_channels = len(y_data) # we want num channels to be able to calculate average later
587
+
588
+ avg_vals = [sum(y_data[:][index]) / num_channels for index in range(num_modules)]
589
+
590
+ # plot the three things we measured
591
+ ax.plot(x_data, avg_vals, label=f"Average Value Across {num_channels} Channels")
592
+ ax.legend(loc='upper right')
593
+ else:
594
+ ax.set_xlabel("idx")
595
+ ax.plot(x_data, y_data)
596
+
597
+ # actually show the plot
598
+ plt.show()
599
+
600
+ def generate_histogram_visualization(self, feature_filter: str, module_fqn_filter: str = "", num_bins: int = 10):
601
+ r"""
602
+ Takes in a feature and optional module_filter and plots the histogram of desired data.
603
+
604
+ Note:
605
+ Only features in the report that have tensor value data can be viewed as a histogram
606
+ If you want to plot a histogram from all the channel values of a specific feature for
607
+ a specific model, make sure to specify both the model and the feature properly
608
+ in the filters and you should be able to see a distribution of the channel data
609
+
610
+ Args:
611
+ feature_filter (str, optional): Filters the features presented to only those that
612
+ contain this filter substring
613
+ Default = "", results in all the features being printed
614
+ module_fqn_filter (str, optional): Only includes modules that contains this string
615
+ Default = "", results in all the modules in the reports to be visible in the table
616
+ num_bins (int, optional): The number of bins to create the histogram with
617
+ Default = 10, the values will be split into 10 equal sized bins
618
+
619
+ Example Use:
620
+ >>> # xdoctest: +SKIP
621
+ >>> mod_report_visualizer.generategenerate_histogram_visualization_plot_visualization(
622
+ ... feature_filter = "per_channel_min",
623
+ ... module_fqn_filter = "block1"
624
+ ... )
625
+ # outputs histogram of per_channel_min information for all modules in block1 of model
626
+ information is gathered across all channels for all modules in block 1 for the
627
+ per_channel_min and is displayed in a histogram of equally sized bins
628
+ """
629
+ # checks if we have matplotlib and let's user know to install it if don't
630
+ if not got_matplotlib:
631
+ print("make sure to install matplotlib and try again.")
632
+ return None
633
+
634
+ # get the x and y data and if per channel
635
+ x_data, y_data, data_per_channel = self._get_plottable_data(feature_filter, module_fqn_filter)
636
+
637
+ # for histogram, we just care about plotting the y data
638
+ # plot based on whether data is per channel or not
639
+ ax = plt.subplot()
640
+ ax.set_xlabel(feature_filter)
641
+ ax.set_ylabel("Frequency")
642
+ ax.set_title(feature_filter + " Histogram")
643
+
644
+ if data_per_channel:
645
+ # set the legend as well
646
+ # combine all the data
647
+ all_data = []
648
+ for channel_info in y_data:
649
+ all_data.extend(channel_info)
650
+
651
+ val, bins, _ = plt.hist(
652
+ all_data,
653
+ bins=num_bins,
654
+ stacked=True,
655
+ rwidth=0.8,
656
+ )
657
+ plt.xticks(bins)
658
+ else:
659
+ val, bins, _ = plt.hist(
660
+ y_data,
661
+ bins=num_bins,
662
+ stacked=False,
663
+ rwidth=0.8,
664
+ )
665
+ plt.xticks(bins)
666
+
667
+ plt.show()
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/custom_config.py ADDED
@@ -0,0 +1,420 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from __future__ import annotations
3
+ from dataclasses import dataclass
4
+ from typing import Any, Dict, List, Optional, Tuple, Type
5
+
6
+ from torch.ao.quantization import QConfigMapping
7
+ from torch.ao.quantization.backend_config import BackendConfig
8
+ from torch.ao.quantization.quant_type import QuantType, _quant_type_from_str, _get_quant_type_to_str
9
+
10
+
11
+ __all__ = [
12
+ "ConvertCustomConfig",
13
+ "FuseCustomConfig",
14
+ "PrepareCustomConfig",
15
+ "StandaloneModuleConfigEntry",
16
+ ]
17
+
18
+
19
+ # TODO: replace all usages with these constants
20
+ STANDALONE_MODULE_NAME_DICT_KEY = "standalone_module_name"
21
+ STANDALONE_MODULE_CLASS_DICT_KEY = "standalone_module_class"
22
+ FLOAT_TO_OBSERVED_DICT_KEY = "float_to_observed_custom_module_class"
23
+ OBSERVED_TO_QUANTIZED_DICT_KEY = "observed_to_quantized_custom_module_class"
24
+ NON_TRACEABLE_MODULE_NAME_DICT_KEY = "non_traceable_module_name"
25
+ NON_TRACEABLE_MODULE_CLASS_DICT_KEY = "non_traceable_module_class"
26
+ INPUT_QUANTIZED_INDEXES_DICT_KEY = "input_quantized_idxs"
27
+ OUTPUT_QUANTIZED_INDEXES_DICT_KEY = "output_quantized_idxs"
28
+ PRESERVED_ATTRIBUTES_DICT_KEY = "preserved_attributes"
29
+
30
+
31
+ @dataclass
32
+ class StandaloneModuleConfigEntry:
33
+ # qconfig_mapping for the prepare function called in the submodule,
34
+ # None means use qconfig from parent qconfig_mapping
35
+ qconfig_mapping: Optional[QConfigMapping]
36
+ example_inputs: Tuple[Any, ...]
37
+ prepare_custom_config: Optional[PrepareCustomConfig]
38
+ backend_config: Optional[BackendConfig]
39
+
40
+
41
+ class PrepareCustomConfig:
42
+ """
43
+ Custom configuration for :func:`~torch.ao.quantization.quantize_fx.prepare_fx` and
44
+ :func:`~torch.ao.quantization.quantize_fx.prepare_qat_fx`.
45
+
46
+ Example usage::
47
+
48
+ prepare_custom_config = PrepareCustomConfig() \
49
+ .set_standalone_module_name("module1", qconfig_mapping, example_inputs, \
50
+ child_prepare_custom_config, backend_config) \
51
+ .set_standalone_module_class(MyStandaloneModule, qconfig_mapping, example_inputs, \
52
+ child_prepare_custom_config, backend_config) \
53
+ .set_float_to_observed_mapping(FloatCustomModule, ObservedCustomModule) \
54
+ .set_non_traceable_module_names(["module2", "module3"]) \
55
+ .set_non_traceable_module_classes([NonTraceableModule1, NonTraceableModule2]) \
56
+ .set_input_quantized_indexes([0]) \
57
+ .set_output_quantized_indexes([0]) \
58
+ .set_preserved_attributes(["attr1", "attr2"])
59
+ """
60
+ def __init__(self):
61
+ self.standalone_module_names: Dict[str, StandaloneModuleConfigEntry] = {}
62
+ self.standalone_module_classes: Dict[Type, StandaloneModuleConfigEntry] = {}
63
+ self.float_to_observed_mapping: Dict[QuantType, Dict[Type, Type]] = {}
64
+ self.non_traceable_module_names: List[str] = []
65
+ self.non_traceable_module_classes: List[Type] = []
66
+ self.input_quantized_indexes: List[int] = []
67
+ self.output_quantized_indexes: List[int] = []
68
+ self.preserved_attributes: List[str] = []
69
+
70
+ def __repr__(self):
71
+ dict_nonempty = {
72
+ k: v for k, v in self.__dict__.items()
73
+ if len(v) > 0
74
+ }
75
+ return f"PrepareCustomConfig({dict_nonempty})"
76
+
77
+ def set_standalone_module_name(
78
+ self,
79
+ module_name: str,
80
+ qconfig_mapping: Optional[QConfigMapping],
81
+ example_inputs: Tuple[Any, ...],
82
+ prepare_custom_config: Optional[PrepareCustomConfig],
83
+ backend_config: Optional[BackendConfig]) -> PrepareCustomConfig:
84
+ """
85
+ Set the configuration for running a standalone module identified by ``module_name``.
86
+
87
+ If ``qconfig_mapping`` is None, the parent ``qconfig_mapping`` will be used instead.
88
+ If ``prepare_custom_config`` is None, an empty ``PrepareCustomConfig`` will be used.
89
+ If ``backend_config`` is None, the parent ``backend_config`` will be used instead.
90
+ """
91
+ self.standalone_module_names[module_name] = \
92
+ StandaloneModuleConfigEntry(qconfig_mapping, example_inputs, prepare_custom_config, backend_config)
93
+ return self
94
+
95
+ def set_standalone_module_class(
96
+ self,
97
+ module_class: Type,
98
+ qconfig_mapping: Optional[QConfigMapping],
99
+ example_inputs: Tuple[Any, ...],
100
+ prepare_custom_config: Optional[PrepareCustomConfig],
101
+ backend_config: Optional[BackendConfig]) -> PrepareCustomConfig:
102
+ """
103
+ Set the configuration for running a standalone module identified by ``module_class``.
104
+
105
+ If ``qconfig_mapping`` is None, the parent ``qconfig_mapping`` will be used instead.
106
+ If ``prepare_custom_config`` is None, an empty ``PrepareCustomConfig`` will be used.
107
+ If ``backend_config`` is None, the parent ``backend_config`` will be used instead.
108
+ """
109
+ self.standalone_module_classes[module_class] = \
110
+ StandaloneModuleConfigEntry(qconfig_mapping, example_inputs, prepare_custom_config, backend_config)
111
+ return self
112
+
113
+ def set_float_to_observed_mapping(
114
+ self,
115
+ float_class: Type,
116
+ observed_class: Type,
117
+ quant_type: QuantType = QuantType.STATIC) -> PrepareCustomConfig:
118
+ """
119
+ Set the mapping from a custom float module class to a custom observed module class.
120
+
121
+ The observed module class must have a ``from_float`` class method that converts the float module class
122
+ to the observed module class. This is currently only supported for static quantization.
123
+ """
124
+ if quant_type != QuantType.STATIC:
125
+ raise ValueError("set_float_to_observed_mapping is currently only supported for static quantization")
126
+ if quant_type not in self.float_to_observed_mapping:
127
+ self.float_to_observed_mapping[quant_type] = {}
128
+ self.float_to_observed_mapping[quant_type][float_class] = observed_class
129
+ return self
130
+
131
+ def set_non_traceable_module_names(self, module_names: List[str]) -> PrepareCustomConfig:
132
+ """
133
+ Set the modules that are not symbolically traceable, identified by name.
134
+ """
135
+ self.non_traceable_module_names = module_names
136
+ return self
137
+
138
+ def set_non_traceable_module_classes(self, module_classes: List[Type]) -> PrepareCustomConfig:
139
+ """
140
+ Set the modules that are not symbolically traceable, identified by class.
141
+ """
142
+ self.non_traceable_module_classes = module_classes
143
+ return self
144
+
145
+ def set_input_quantized_indexes(self, indexes: List[int]) -> PrepareCustomConfig:
146
+ """
147
+ Set the indexes of the inputs of the graph that should be quantized.
148
+ Inputs are otherwise assumed to be in fp32 by default instead.
149
+ """
150
+ self.input_quantized_indexes = indexes
151
+ return self
152
+
153
+ def set_output_quantized_indexes(self, indexes: List[int]) -> PrepareCustomConfig:
154
+ """
155
+ Set the indexes of the outputs of the graph that should be quantized.
156
+ Outputs are otherwise assumed to be in fp32 by default instead.
157
+ """
158
+ self.output_quantized_indexes = indexes
159
+ return self
160
+
161
+ def set_preserved_attributes(self, attributes: List[str]) -> PrepareCustomConfig:
162
+ """
163
+ Set the names of the attributes that will persist in the graph module even if they are not used in
164
+ the model's ``forward`` method.
165
+ """
166
+ self.preserved_attributes = attributes
167
+ return self
168
+
169
+ # TODO: remove this
170
+ @classmethod
171
+ def from_dict(cls, prepare_custom_config_dict: Dict[str, Any]) -> PrepareCustomConfig:
172
+ """
173
+ Create a ``PrepareCustomConfig`` from a dictionary with the following items:
174
+
175
+ "standalone_module_name": a list of (module_name, qconfig_mapping, example_inputs,
176
+ child_prepare_custom_config, backend_config) tuples
177
+
178
+ "standalone_module_class" a list of (module_class, qconfig_mapping, example_inputs,
179
+ child_prepare_custom_config, backend_config) tuples
180
+
181
+ "float_to_observed_custom_module_class": a nested dictionary mapping from quantization
182
+ mode to an inner mapping from float module classes to observed module classes, e.g.
183
+ {"static": {FloatCustomModule: ObservedCustomModule}}
184
+
185
+ "non_traceable_module_name": a list of modules names that are not symbolically traceable
186
+ "non_traceable_module_class": a list of module classes that are not symbolically traceable
187
+ "input_quantized_idxs": a list of indexes of graph inputs that should be quantized
188
+ "output_quantized_idxs": a list of indexes of graph outputs that should be quantized
189
+ "preserved_attributes": a list of attributes that persist even if they are not used in ``forward``
190
+
191
+ This function is primarily for backward compatibility and may be removed in the future.
192
+ """
193
+ def _get_qconfig_mapping(obj: Any, dict_key: str) -> Optional[QConfigMapping]:
194
+ """
195
+ Convert the given object into a QConfigMapping if possible, else throw an exception.
196
+ """
197
+ if isinstance(obj, QConfigMapping) or obj is None:
198
+ return obj
199
+ if isinstance(obj, Dict):
200
+ return QConfigMapping.from_dict(obj)
201
+ raise ValueError(f"Expected QConfigMapping in prepare_custom_config_dict[\"{dict_key}\"], got '{type(obj)}'")
202
+
203
+ def _get_prepare_custom_config(obj: Any, dict_key: str) -> Optional[PrepareCustomConfig]:
204
+ """
205
+ Convert the given object into a PrepareCustomConfig if possible, else throw an exception.
206
+ """
207
+ if isinstance(obj, PrepareCustomConfig) or obj is None:
208
+ return obj
209
+ if isinstance(obj, Dict):
210
+ return PrepareCustomConfig.from_dict(obj)
211
+ raise ValueError(f"Expected PrepareCustomConfig in prepare_custom_config_dict[\"{dict_key}\"], got '{type(obj)}'")
212
+
213
+ def _get_backend_config(obj: Any, dict_key: str) -> Optional[BackendConfig]:
214
+ """
215
+ Convert the given object into a BackendConfig if possible, else throw an exception.
216
+ """
217
+ if isinstance(obj, BackendConfig) or obj is None:
218
+ return obj
219
+ if isinstance(obj, Dict):
220
+ return BackendConfig.from_dict(obj)
221
+ raise ValueError(f"Expected BackendConfig in prepare_custom_config_dict[\"{dict_key}\"], got '{type(obj)}'")
222
+
223
+ conf = cls()
224
+ for (module_name, qconfig_dict, example_inputs, _prepare_custom_config_dict, backend_config_dict) in\
225
+ prepare_custom_config_dict.get(STANDALONE_MODULE_NAME_DICT_KEY, []):
226
+ qconfig_mapping = _get_qconfig_mapping(qconfig_dict, STANDALONE_MODULE_NAME_DICT_KEY)
227
+ prepare_custom_config = _get_prepare_custom_config(_prepare_custom_config_dict, STANDALONE_MODULE_NAME_DICT_KEY)
228
+ backend_config = _get_backend_config(backend_config_dict, STANDALONE_MODULE_NAME_DICT_KEY)
229
+ conf.set_standalone_module_name(
230
+ module_name, qconfig_mapping, example_inputs, prepare_custom_config, backend_config)
231
+ for (module_class, qconfig_dict, example_inputs, _prepare_custom_config_dict, backend_config_dict) in\
232
+ prepare_custom_config_dict.get(STANDALONE_MODULE_CLASS_DICT_KEY, []):
233
+ qconfig_mapping = _get_qconfig_mapping(qconfig_dict, STANDALONE_MODULE_CLASS_DICT_KEY)
234
+ prepare_custom_config = _get_prepare_custom_config(_prepare_custom_config_dict, STANDALONE_MODULE_CLASS_DICT_KEY)
235
+ backend_config = _get_backend_config(backend_config_dict, STANDALONE_MODULE_CLASS_DICT_KEY)
236
+ conf.set_standalone_module_class(
237
+ module_class, qconfig_mapping, example_inputs, prepare_custom_config, backend_config)
238
+ for quant_type_name, custom_module_mapping in prepare_custom_config_dict.get(FLOAT_TO_OBSERVED_DICT_KEY, {}).items():
239
+ quant_type = _quant_type_from_str(quant_type_name)
240
+ for float_class, observed_class in custom_module_mapping.items():
241
+ conf.set_float_to_observed_mapping(float_class, observed_class, quant_type)
242
+ conf.set_non_traceable_module_names(prepare_custom_config_dict.get(NON_TRACEABLE_MODULE_NAME_DICT_KEY, []))
243
+ conf.set_non_traceable_module_classes(prepare_custom_config_dict.get(NON_TRACEABLE_MODULE_CLASS_DICT_KEY, []))
244
+ conf.set_input_quantized_indexes(prepare_custom_config_dict.get(INPUT_QUANTIZED_INDEXES_DICT_KEY, []))
245
+ conf.set_output_quantized_indexes(prepare_custom_config_dict.get(OUTPUT_QUANTIZED_INDEXES_DICT_KEY, []))
246
+ conf.set_preserved_attributes(prepare_custom_config_dict.get(PRESERVED_ATTRIBUTES_DICT_KEY, []))
247
+ return conf
248
+
249
+ def to_dict(self) -> Dict[str, Any]:
250
+ """
251
+ Convert this ``PrepareCustomConfig`` to a dictionary with the items described in
252
+ :func:`~torch.ao.quantization.fx.custom_config.PrepareCustomConfig.from_dict`.
253
+ """
254
+ def _make_tuple(key: Any, e: StandaloneModuleConfigEntry):
255
+ qconfig_dict = e.qconfig_mapping.to_dict() if e.qconfig_mapping else None
256
+ prepare_custom_config_dict = e.prepare_custom_config.to_dict() if e.prepare_custom_config else None
257
+ return (key, qconfig_dict, e.example_inputs, prepare_custom_config_dict, e.backend_config)
258
+
259
+ d: Dict[str, Any] = {}
260
+ for module_name, sm_config_entry in self.standalone_module_names.items():
261
+ if STANDALONE_MODULE_NAME_DICT_KEY not in d:
262
+ d[STANDALONE_MODULE_NAME_DICT_KEY] = []
263
+ d[STANDALONE_MODULE_NAME_DICT_KEY].append(_make_tuple(module_name, sm_config_entry))
264
+ for module_class, sm_config_entry in self.standalone_module_classes.items():
265
+ if STANDALONE_MODULE_CLASS_DICT_KEY not in d:
266
+ d[STANDALONE_MODULE_CLASS_DICT_KEY] = []
267
+ d[STANDALONE_MODULE_CLASS_DICT_KEY].append(_make_tuple(module_class, sm_config_entry))
268
+ for quant_type, float_to_observed_mapping in self.float_to_observed_mapping.items():
269
+ if FLOAT_TO_OBSERVED_DICT_KEY not in d:
270
+ d[FLOAT_TO_OBSERVED_DICT_KEY] = {}
271
+ d[FLOAT_TO_OBSERVED_DICT_KEY][_get_quant_type_to_str(quant_type)] = float_to_observed_mapping
272
+ if len(self.non_traceable_module_names) > 0:
273
+ d[NON_TRACEABLE_MODULE_NAME_DICT_KEY] = self.non_traceable_module_names
274
+ if len(self.non_traceable_module_classes) > 0:
275
+ d[NON_TRACEABLE_MODULE_CLASS_DICT_KEY] = self.non_traceable_module_classes
276
+ if len(self.input_quantized_indexes) > 0:
277
+ d[INPUT_QUANTIZED_INDEXES_DICT_KEY] = self.input_quantized_indexes
278
+ if len(self.output_quantized_indexes) > 0:
279
+ d[OUTPUT_QUANTIZED_INDEXES_DICT_KEY] = self.output_quantized_indexes
280
+ if len(self.preserved_attributes) > 0:
281
+ d[PRESERVED_ATTRIBUTES_DICT_KEY] = self.preserved_attributes
282
+ return d
283
+
284
+
285
+ class ConvertCustomConfig:
286
+ """
287
+ Custom configuration for :func:`~torch.ao.quantization.quantize_fx.convert_fx`.
288
+
289
+ Example usage::
290
+
291
+ convert_custom_config = ConvertCustomConfig() \
292
+ .set_observed_to_quantized_mapping(ObservedCustomModule, QuantizedCustomModule) \
293
+ .set_preserved_attributes(["attr1", "attr2"])
294
+ """
295
+
296
+ def __init__(self):
297
+ self.observed_to_quantized_mapping: Dict[QuantType, Dict[Type, Type]] = {}
298
+ self.preserved_attributes: List[str] = []
299
+
300
+ def __repr__(self):
301
+ dict_nonempty = {
302
+ k: v for k, v in self.__dict__.items()
303
+ if len(v) > 0
304
+ }
305
+ return f"ConvertCustomConfig({dict_nonempty})"
306
+
307
+ def set_observed_to_quantized_mapping(
308
+ self,
309
+ observed_class: Type,
310
+ quantized_class: Type,
311
+ quant_type: QuantType = QuantType.STATIC) -> ConvertCustomConfig:
312
+ """
313
+ Set the mapping from a custom observed module class to a custom quantized module class.
314
+
315
+ The quantized module class must have a ``from_observed`` class method that converts the observed module class
316
+ to the quantized module class.
317
+ """
318
+ if quant_type not in self.observed_to_quantized_mapping:
319
+ self.observed_to_quantized_mapping[quant_type] = {}
320
+ self.observed_to_quantized_mapping[quant_type][observed_class] = quantized_class
321
+ return self
322
+
323
+ def set_preserved_attributes(self, attributes: List[str]) -> ConvertCustomConfig:
324
+ """
325
+ Set the names of the attributes that will persist in the graph module even if they are not used in
326
+ the model's ``forward`` method.
327
+ """
328
+ self.preserved_attributes = attributes
329
+ return self
330
+
331
+ # TODO: remove this
332
+ @classmethod
333
+ def from_dict(cls, convert_custom_config_dict: Dict[str, Any]) -> ConvertCustomConfig:
334
+ """
335
+ Create a ``ConvertCustomConfig`` from a dictionary with the following items:
336
+
337
+ "observed_to_quantized_custom_module_class": a nested dictionary mapping from quantization
338
+ mode to an inner mapping from observed module classes to quantized module classes, e.g.::
339
+ {
340
+ "static": {FloatCustomModule: ObservedCustomModule},
341
+ "dynamic": {FloatCustomModule: ObservedCustomModule},
342
+ "weight_only": {FloatCustomModule: ObservedCustomModule}
343
+ }
344
+ "preserved_attributes": a list of attributes that persist even if they are not used in ``forward``
345
+
346
+ This function is primarily for backward compatibility and may be removed in the future.
347
+ """
348
+ conf = cls()
349
+ for quant_type_name, custom_module_mapping in convert_custom_config_dict.get(OBSERVED_TO_QUANTIZED_DICT_KEY, {}).items():
350
+ quant_type = _quant_type_from_str(quant_type_name)
351
+ for observed_class, quantized_class in custom_module_mapping.items():
352
+ conf.set_observed_to_quantized_mapping(observed_class, quantized_class, quant_type)
353
+ conf.set_preserved_attributes(convert_custom_config_dict.get(PRESERVED_ATTRIBUTES_DICT_KEY, []))
354
+ return conf
355
+
356
+ def to_dict(self) -> Dict[str, Any]:
357
+ """
358
+ Convert this ``ConvertCustomConfig`` to a dictionary with the items described in
359
+ :func:`~torch.ao.quantization.fx.custom_config.ConvertCustomConfig.from_dict`.
360
+ """
361
+ d: Dict[str, Any] = {}
362
+ for quant_type, observed_to_quantized_mapping in self.observed_to_quantized_mapping.items():
363
+ if OBSERVED_TO_QUANTIZED_DICT_KEY not in d:
364
+ d[OBSERVED_TO_QUANTIZED_DICT_KEY] = {}
365
+ d[OBSERVED_TO_QUANTIZED_DICT_KEY][_get_quant_type_to_str(quant_type)] = observed_to_quantized_mapping
366
+ if len(self.preserved_attributes) > 0:
367
+ d[PRESERVED_ATTRIBUTES_DICT_KEY] = self.preserved_attributes
368
+ return d
369
+
370
+
371
+ class FuseCustomConfig:
372
+ """
373
+ Custom configuration for :func:`~torch.ao.quantization.quantize_fx.fuse_fx`.
374
+
375
+ Example usage::
376
+
377
+ fuse_custom_config = FuseCustomConfig().set_preserved_attributes(["attr1", "attr2"])
378
+ """
379
+
380
+ def __init__(self):
381
+ self.preserved_attributes: List[str] = []
382
+
383
+ def __repr__(self):
384
+ dict_nonempty = {
385
+ k: v for k, v in self.__dict__.items()
386
+ if len(v) > 0
387
+ }
388
+ return f"FuseCustomConfig({dict_nonempty})"
389
+
390
+ def set_preserved_attributes(self, attributes: List[str]) -> FuseCustomConfig:
391
+ """
392
+ Set the names of the attributes that will persist in the graph module even if they are not used in
393
+ the model's ``forward`` method.
394
+ """
395
+ self.preserved_attributes = attributes
396
+ return self
397
+
398
+ # TODO: remove this
399
+ @classmethod
400
+ def from_dict(cls, fuse_custom_config_dict: Dict[str, Any]) -> FuseCustomConfig:
401
+ """
402
+ Create a ``ConvertCustomConfig`` from a dictionary with the following items:
403
+
404
+ "preserved_attributes": a list of attributes that persist even if they are not used in ``forward``
405
+
406
+ This function is primarily for backward compatibility and may be removed in the future.
407
+ """
408
+ conf = cls()
409
+ conf.set_preserved_attributes(fuse_custom_config_dict.get(PRESERVED_ATTRIBUTES_DICT_KEY, []))
410
+ return conf
411
+
412
+ def to_dict(self) -> Dict[str, Any]:
413
+ """
414
+ Convert this ``FuseCustomConfig`` to a dictionary with the items described in
415
+ :func:`~torch.ao.quantization.fx.custom_config.ConvertCustomConfig.from_dict`.
416
+ """
417
+ d: Dict[str, Any] = {}
418
+ if len(self.preserved_attributes) > 0:
419
+ d[PRESERVED_ATTRIBUTES_DICT_KEY] = self.preserved_attributes
420
+ return d
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/lstm_utils.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import operator
3
+ import torch
4
+ from typing import Any, Callable, Optional, Tuple
5
+ from torch.ao.quantization import (
6
+ default_weight_observer,
7
+ default_weight_fake_quant,
8
+ FakeQuantizeBase,
9
+ QConfig,
10
+ QConfigMapping,
11
+ )
12
+ from torch.ao.quantization.backend_config import BackendConfig
13
+ from torch.ao.quantization.observer import _PartialWrapper
14
+ from torch.ao.quantization.quantize_fx import (
15
+ convert_to_reference_fx,
16
+ prepare_fx,
17
+ )
18
+
19
+ # TODO: move all LSTM util functions from fx/utils.py to this file
20
+ def _get_lstm_with_individually_observed_parts(
21
+ float_lstm: torch.nn.LSTM,
22
+ example_inputs: Tuple[Any, ...],
23
+ backend_config: Optional[BackendConfig] = None,
24
+ linear_output_obs_ctr: Optional[_PartialWrapper] = None,
25
+ sigmoid_obs_ctr: Optional[_PartialWrapper] = None,
26
+ tanh_obs_ctr: Optional[_PartialWrapper] = None,
27
+ cell_state_obs_ctr: Optional[_PartialWrapper] = None,
28
+ hidden_state_obs_ctr: Optional[_PartialWrapper] = None,
29
+ ) -> torch.ao.nn.quantizable.LSTM:
30
+ """
31
+ Return an observed `torch.ao.nn.quantizable.LSTM` created from a `torch.nn.LSTM`
32
+ with specific observers or fake quantizes assigned to the inner ops or submodules.
33
+
34
+ In both eager and FX graph mode quantization, `torch.ao.nn.quantizable.LSTM` is
35
+ used as an observed custom module, which is responsible for inserting its own
36
+ observers. By default, all inner ops inherit the parent custom module's QConfig.
37
+ Users who wish to override this behavior may extend `torch.ao.nn.quantizable.LSTM`
38
+ and use this helper function to customize the observer insertion logic.
39
+
40
+ This is meant to be used to convert a float module to an observed module in the
41
+ custom module flow.
42
+
43
+ Args:
44
+ `float_lstm`: The float LSTM module
45
+ `example_inputs`: example inputs for the forward function of the LSTM module
46
+ `backend_config`: BackendConfig to use to observe the LSTM module
47
+ `linear_output_obs_ctr`: observer or fake quantize for linear outputs Wx + b,
48
+ where W is the weight matrix, b is the bias, and x is either the inputs
49
+ or the hidden state from the previous layer (if any)
50
+ `sigmoid_obs_ctr`: observer or fake quantize for sigmoid activations
51
+ `tanh_obs_ctr`: observer or fake quantize for tanh activations
52
+ `cell_state_obs_ctr`: observer or fake quantize for the cell state
53
+ `hidden_state_obs_ctr`: observer or fake quantize for the hidden state and
54
+ the output
55
+
56
+ Return:
57
+ A `torch.ao.nn.quantizable.LSTM` with the specified observers or fake quantizes
58
+ assigned to the inner ops.
59
+ """
60
+ def make_qconfig(obs_ctr: _PartialWrapper) -> QConfig:
61
+ """
62
+ Make a QConfig with fixed qparams observers or fake quantizes.
63
+ """
64
+ if isinstance(obs_ctr(), FakeQuantizeBase):
65
+ weight = default_weight_fake_quant
66
+ else:
67
+ weight = default_weight_observer
68
+ return QConfig(activation=obs_ctr, weight=weight)
69
+
70
+ quantizable_lstm = torch.ao.nn.quantizable.LSTM(
71
+ float_lstm.input_size, float_lstm.hidden_size, float_lstm.num_layers, float_lstm.bias,
72
+ float_lstm.batch_first, float_lstm.dropout, float_lstm.bidirectional)
73
+ quantizable_lstm.qconfig = float_lstm.qconfig
74
+
75
+ for idx in range(float_lstm.num_layers):
76
+ quantizable_lstm.layers[idx] = torch.ao.nn.quantizable.modules.rnn._LSTMLayer.from_float(float_lstm,
77
+ idx,
78
+ float_lstm.qconfig,
79
+ batch_first=False)
80
+
81
+ # Build QConfigMapping for the LSTM cell
82
+ # Note: FloatFunctional qconfigs will be configured separately below
83
+ cell_qm = QConfigMapping().set_global(float_lstm.qconfig) # type: ignore[arg-type]
84
+ if sigmoid_obs_ctr is not None:
85
+ cell_qm.set_module_name("input_gate", make_qconfig(sigmoid_obs_ctr))
86
+ cell_qm.set_module_name("forget_gate", make_qconfig(sigmoid_obs_ctr))
87
+ cell_qm.set_module_name("output_gate", make_qconfig(sigmoid_obs_ctr))
88
+ if tanh_obs_ctr is not None:
89
+ cell_qm.set_module_name("cell_gate", make_qconfig(tanh_obs_ctr))
90
+
91
+ # Insert observers into each LSTM cell
92
+ # TODO: maybe make this work for layer_bw as well
93
+ for layer in quantizable_lstm.layers:
94
+ cell = layer.layer_fw.cell
95
+ cell = prepare_fx(cell, cell_qm, example_inputs, backend_config=backend_config)
96
+ # HACK: Manually replace the activation_post_process following these ops.
97
+ # This is needed for FloatFunctional ops because there is currently no way
98
+ # to configure these ops in FX graph mode quantization today. This is because
99
+ # the FloatFunctional modules simply disappear from the graph after tracing.
100
+ # In the future, we should rewrite quantizable LSTM without FloatFunctionals.
101
+ op_index_to_activation_post_process_ctr = {
102
+ (torch.add, 0): linear_output_obs_ctr, # gates.add
103
+ (torch.mul, 0): cell_state_obs_ctr, # fgate_cx.mul
104
+ (torch.mul, 1): cell_state_obs_ctr, # igate_cgate.mul
105
+ (torch.add, 1): cell_state_obs_ctr, # fgate_cx_igate_cgate.add
106
+ (torch.mul, 2): hidden_state_obs_ctr, # ogate_cy.mul
107
+ }
108
+ add_count = 0
109
+ mul_count = 0
110
+ for node in cell.graph.nodes:
111
+ op_index: Optional[Tuple[Callable, int]] = None # e.g. (torch.add, 1)
112
+ if node.target == torch.add:
113
+ op_index = (torch.add, add_count)
114
+ add_count += 1
115
+ elif node.target == torch.mul:
116
+ op_index = (torch.mul, mul_count)
117
+ mul_count += 1
118
+ else:
119
+ # Neither torch.add nor torch.mul
120
+ continue
121
+ if op_index not in op_index_to_activation_post_process_ctr:
122
+ continue
123
+ assert len(node.users) == 1
124
+ activation_post_process_name = next(iter(node.users.keys())).name
125
+ activation_post_process_ctr = op_index_to_activation_post_process_ctr[op_index]
126
+ if activation_post_process_ctr is not None:
127
+ setattr(cell, activation_post_process_name, activation_post_process_ctr())
128
+ layer.layer_fw.cell = cell
129
+ return quantizable_lstm
130
+
131
+ def _get_reference_quantized_lstm_module(
132
+ observed_lstm: torch.ao.nn.quantizable.LSTM,
133
+ backend_config: Optional[BackendConfig] = None,
134
+ ) -> torch.ao.nn.quantized.LSTM:
135
+ """
136
+ Return a `torch.ao.nn.quantized.LSTM` created from a `torch.ao.nn.quantizable.LSTM`
137
+ with observers or fake quantizes inserted through `prepare_fx`, e.g. from
138
+ `_get_lstm_with_individually_observed_parts`.
139
+
140
+ This is meant to be used to convert an observed module to a quantized module in the
141
+ custom module flow.
142
+
143
+ Args:
144
+ `observed_lstm`: a `torch.ao.nn.quantizable.LSTM` observed through `prepare_fx`
145
+ `backend_config`: BackendConfig to use to produce the reference quantized model
146
+
147
+ Return:
148
+ A reference `torch.ao.nn.quantized.LSTM` module.
149
+ """
150
+ quantized_lstm = torch.ao.nn.quantized.LSTM(
151
+ observed_lstm.input_size, observed_lstm.hidden_size, observed_lstm.num_layers,
152
+ observed_lstm.bias, observed_lstm.batch_first, observed_lstm.dropout,
153
+ observed_lstm.bidirectional)
154
+
155
+ for i, layer in enumerate(quantized_lstm.layers):
156
+ cell = copy.deepcopy(observed_lstm.layers.get_submodule(str(i)).layer_fw.cell) # type: ignore[union-attr]
157
+ cell = convert_to_reference_fx(cell, backend_config=backend_config) # type: ignore[arg-type]
158
+ assert isinstance(cell, torch.fx.GraphModule)
159
+ # HACK: Manually remove input quantize nodes and output dequantize nodes,
160
+ # since custom modules expect quint8 inputs and outputs for now. Note that
161
+ # this functionality is supposedly handled through PrepareCustomConfig's
162
+ # `set_input_quantized_indexes` and `set_output_quantized_indexes`, but that
163
+ # API doesn't currently handle tuple inputs and outputs, so we have to do
164
+ # this manually for now. In the future we should (1) relax the restriction
165
+ # on custom module input/output dtypes, and (2) expand support for complex
166
+ # input/output structures.
167
+ for node in cell.graph.nodes:
168
+ if node.target == torch.quantize_per_tensor:
169
+ arg = node.args[0]
170
+ # Remove quantize(x), quantize(hidden[0]), and quantize(hidden[1])
171
+ if arg.target == "x" or (arg.target == operator.getitem and arg.args[0].target == "hidden"):
172
+ with cell.graph.inserting_before(node):
173
+ node.replace_all_uses_with(arg)
174
+ cell.graph.erase_node(node)
175
+ if node.target == "output":
176
+ # Remove all dequantize nodes in the output tuple
177
+ for arg in node.args[0]:
178
+ with cell.graph.inserting_before(node):
179
+ node.replace_input_with(arg, arg.args[0])
180
+ cell.graph.eliminate_dead_code()
181
+ cell.recompile()
182
+ layer.layer_fw.cell = cell
183
+ return quantized_lstm
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/quantize_handler.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from abc import ABC
3
+ from typing import Callable, Dict, List, Optional, Type
4
+
5
+ import torch
6
+
7
+ from torch.ao.quantization.backend_config import (
8
+ BackendConfig,
9
+ DTypeConfig,
10
+ ObservationType,
11
+ )
12
+ from torch.ao.quantization.utils import NodePattern, Pattern, QuantizerCls
13
+ from torch.fx.graph import Node
14
+
15
+ from .utils import all_node_args_have_no_tensors
16
+
17
+
18
+ __all__ = [
19
+ "QuantizeHandler",
20
+ "BinaryOpQuantizeHandler",
21
+ "CatQuantizeHandler",
22
+ "ConvReluQuantizeHandler",
23
+ "LinearReLUQuantizeHandler",
24
+ "BatchNormQuantizeHandler",
25
+ "EmbeddingQuantizeHandler",
26
+ "RNNDynamicQuantizeHandler",
27
+ "DefaultNodeQuantizeHandler",
28
+ "FixedQParamsOpQuantizeHandler",
29
+ "CopyNodeQuantizeHandler",
30
+ "GeneralTensorShapeOpQuantizeHandler",
31
+ "CustomModuleQuantizeHandler",
32
+ "StandaloneModuleQuantizeHandler",
33
+ ]
34
+
35
+ def _default_root_node_getter(node_pattern):
36
+ if node_pattern is None:
37
+ return node_pattern
38
+ while not isinstance(node_pattern, Node):
39
+ node_pattern = node_pattern[-1]
40
+ return node_pattern
41
+
42
+ # Base Pattern Handler
43
+ class QuantizeHandler(ABC): # noqa: B024
44
+ """ Base handler class for the quantizer patterns
45
+ """
46
+ def __init__(
47
+ self,
48
+ node_pattern: NodePattern,
49
+ modules: Dict[str, torch.nn.Module],
50
+ root_node_getter: Optional[Callable] = None,
51
+ is_custom_module=False,
52
+ is_standalone_module=False):
53
+ """ Records pattern information in __init__, which will be used
54
+ in convert
55
+ """
56
+ self.node_pattern = node_pattern
57
+ self.modules = modules
58
+ if root_node_getter is None:
59
+ root_node_getter = _default_root_node_getter
60
+ self.root_node = root_node_getter(node_pattern)
61
+ self.is_custom_module_ = is_custom_module
62
+ self.is_standalone_module_ = is_standalone_module
63
+ self.num_tensor_args = 0
64
+ # determine how many of the first two args are Tensors (versus scalars)
65
+ # this distinguishes things like "x + y" from "x + 2" or "2 + x"
66
+ if isinstance(self.root_node, Node):
67
+ cache_for_no_tensor_check: Dict[Node, bool] = {}
68
+ for arg_idx in range(len(self.root_node.args)):
69
+ arg = self.root_node.args[arg_idx]
70
+ if isinstance(arg, Node) and (
71
+ not all_node_args_have_no_tensors(
72
+ arg, self.modules, cache_for_no_tensor_check)):
73
+ self.num_tensor_args += 1
74
+
75
+ def is_general_tensor_value_op(self) -> bool:
76
+ """
77
+ Returns True if the operator works for both floating point and
78
+ quantized input, and does some computation based on the input Tensor,
79
+ or the ops that only re-arranges the Tensor values or query some metadata
80
+ about the Tensor
81
+ so we need to insert observer/fake_quant for the output of the
82
+ operator (same observer instance as input)
83
+ since the distribution of values is different for input and output
84
+ Tensors (for HistogramObserver) while they share the same quantization
85
+ parameters
86
+ Example operator: avgpool2d, reshape, transpose, maxpool2d
87
+ Example observed operator:
88
+ observer_0 - avgpool2d - observer_0 (same observer instance as input)
89
+ """
90
+ return False
91
+
92
+ def is_custom_module(self):
93
+ return self.is_custom_module_
94
+
95
+ def is_standalone_module(self):
96
+ return self.is_standalone_module_
97
+
98
+ def _get_quantize_handler_cls(
99
+ observation_type: ObservationType,
100
+ dtype_configs: List[DTypeConfig],
101
+ num_tensor_args_to_observation_type: Dict[int, ObservationType]) -> Type[QuantizeHandler]:
102
+ """
103
+ Return a configurable QuantizeHandler that matches the given specifications from the backend.
104
+ """
105
+
106
+ class ConfigurableQuantizeHandler(QuantizeHandler):
107
+ def __init__(
108
+ self,
109
+ node_pattern: NodePattern,
110
+ modules: Dict[str, torch.nn.Module],
111
+ root_node_getter: Optional[Callable] = None):
112
+ super().__init__(node_pattern, modules, root_node_getter)
113
+ if num_tensor_args_to_observation_type:
114
+ assert self.num_tensor_args in num_tensor_args_to_observation_type, \
115
+ f"Must provide observation_type config for tensor number {self.num_tensor_args}" \
116
+ f" in num_tensor_args_to_observation_type for {node_pattern}"
117
+ self.observation_type = num_tensor_args_to_observation_type[self.num_tensor_args]
118
+ else:
119
+ self.observation_type = observation_type
120
+ self.dtype_configs = dtype_configs
121
+
122
+ def is_general_tensor_value_op(self) -> bool:
123
+ return self.observation_type == ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT
124
+
125
+ return ConfigurableQuantizeHandler
126
+
127
+ def _get_pattern_to_quantize_handlers(backend_config: BackendConfig) -> Dict[Pattern, QuantizerCls]:
128
+ """
129
+ Note: Quantize handler is just a holder for some check methods like
130
+ (should_insert_observer_for_output), maybe this can be a enum as well,
131
+ we can refactor this after we convert the path for fbgemm/qnnpack fully to the
132
+ new path, this is not exposed to backend developers
133
+ """
134
+ pattern_to_quantize_handlers = {}
135
+ for pattern, config in backend_config._pattern_complex_format_to_config.items():
136
+ observation_type = config.observation_type
137
+ dtype_configs = config.dtype_configs
138
+ num_tensor_args_to_observation_type = config._num_tensor_args_to_observation_type
139
+ pattern_to_quantize_handlers[pattern] = \
140
+ _get_quantize_handler_cls(
141
+ observation_type,
142
+ dtype_configs,
143
+ num_tensor_args_to_observation_type)
144
+ return pattern_to_quantize_handlers
145
+
146
+ # TODO: remove this class, this is still exposed in torch.ao.quantization
147
+ # but we should be able to break bc
148
+ class BinaryOpQuantizeHandler(QuantizeHandler):
149
+ pass
150
+
151
+ class CatQuantizeHandler(QuantizeHandler):
152
+ pass
153
+
154
+ # TODO: remove this class
155
+ class ConvReluQuantizeHandler(QuantizeHandler):
156
+ pass
157
+
158
+ # TODO: remove this class
159
+ class LinearReLUQuantizeHandler(QuantizeHandler):
160
+ pass
161
+
162
+ # TODO: remove this class
163
+ class BatchNormQuantizeHandler(QuantizeHandler):
164
+ pass
165
+
166
+ # TODO: remove this class
167
+ class EmbeddingQuantizeHandler(QuantizeHandler):
168
+ pass
169
+
170
+ # TODO: remove this class
171
+ class RNNDynamicQuantizeHandler(QuantizeHandler):
172
+ pass
173
+
174
+ # TODO: remove this class
175
+ class DefaultNodeQuantizeHandler(QuantizeHandler):
176
+ """ Common quantized op, first input and first output will be quantized
177
+ """
178
+ pass
179
+
180
+ # TODO: remove this class
181
+ class FixedQParamsOpQuantizeHandler(QuantizeHandler):
182
+ pass
183
+
184
+ # TODO: remove
185
+ class CopyNodeQuantizeHandler(QuantizeHandler):
186
+ pass
187
+
188
+ # TODO: remove
189
+ class GeneralTensorShapeOpQuantizeHandler(QuantizeHandler):
190
+ pass
191
+
192
+ # TODO: not used, can be removed after torch.ao.quantization namespace is deprecated
193
+ class CustomModuleQuantizeHandler(QuantizeHandler):
194
+ pass
195
+
196
+ # TODO: not used, can be removed after torch.ao.quantization namespace is deprecated
197
+ class StandaloneModuleQuantizeHandler(QuantizeHandler):
198
+ pass
parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/tracer.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.fx._symbolic_trace import Tracer
3
+ from torch.fx.proxy import Scope
4
+ from torch.ao.nn.intrinsic import _FusedModule
5
+ from typing import List, Callable
6
+
7
+ __all__ = [
8
+ "QuantizationTracer",
9
+ ]
10
+
11
+ class ScopeContextManager(torch.fx.proxy.ScopeContextManager):
12
+ def __init__(
13
+ self,
14
+ scope: Scope,
15
+ current_module: torch.nn.Module,
16
+ current_module_path: str
17
+ ):
18
+ super().__init__(scope, Scope(current_module_path, type(current_module)))
19
+
20
+
21
+ class QuantizationTracer(Tracer):
22
+ def __init__(
23
+ self, skipped_module_names: List[str], skipped_module_classes: List[Callable]
24
+ ):
25
+ super().__init__()
26
+ self.skipped_module_names = skipped_module_names
27
+ self.skipped_module_classes = skipped_module_classes
28
+ # NB: initialized the module_type of top level module to None
29
+ # we are assuming people won't configure the model with the type of top level
30
+ # module here, since people can use "" for global config
31
+ # We can change this if there is a use case that configures
32
+ # qconfig using top level module type
33
+ self.scope = Scope("", None)
34
+ self.record_stack_traces = True
35
+
36
+ def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool:
37
+ return (
38
+ (
39
+ (m.__module__.startswith("torch.nn") or m.__module__.startswith("torch.ao.nn"))
40
+ and not isinstance(m, torch.nn.Sequential)
41
+ )
42
+ or module_qualified_name in self.skipped_module_names
43
+ or type(m) in self.skipped_module_classes
44
+ or isinstance(m, _FusedModule)
45
+ )
parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__init__.py ADDED
File without changes
parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/qat_utils.cpython-310.pyc ADDED
Binary file (21.8 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/duplicate_dq_pass.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import logging
3
+ import operator
4
+
5
+ import torch
6
+
7
+ from torch.ao.quantization.pt2e.utils import (
8
+ _filter_sym_size_users,
9
+ _is_valid_annotation,
10
+ )
11
+
12
+ from torch.fx.node import map_arg
13
+ from torch.fx.passes.infra.pass_base import PassBase, PassResult
14
+
15
+
16
+ logger = logging.getLogger(__name__)
17
+ logger.setLevel(logging.WARNING)
18
+
19
+ __all__ = ["DuplicateDQPass"]
20
+
21
+ _QUANTIZE_OPS = [
22
+ torch.ops.quantized_decomposed.quantize_per_tensor.default,
23
+ torch.ops.quantized_decomposed.quantize_per_tensor.tensor,
24
+ torch.ops.quantized_decomposed.quantize_per_channel.default,
25
+ ]
26
+
27
+ _DEQUANTIZE_OPS = [
28
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default,
29
+ torch.ops.quantized_decomposed.dequantize_per_tensor.tensor,
30
+ torch.ops.quantized_decomposed.dequantize_per_channel.default,
31
+ ]
32
+
33
+
34
+ def _maybe_duplicate_dq(
35
+ gm: torch.fx.GraphModule, dq_node: torch.fx.Node, user: torch.fx.Node
36
+ ):
37
+ annotation = user.meta.get("quantization_annotation", None)
38
+ if not _is_valid_annotation(annotation):
39
+ return
40
+ with gm.graph.inserting_after(dq_node):
41
+ new_node = gm.graph.node_copy(dq_node)
42
+
43
+ def maybe_replace_node(n: torch.fx.Node) -> torch.fx.Node:
44
+ if n == dq_node:
45
+ return new_node
46
+ else:
47
+ return n
48
+
49
+ new_args = map_arg(user.args, maybe_replace_node)
50
+ new_kwargs = map_arg(user.kwargs, maybe_replace_node)
51
+ user.args = new_args
52
+ user.kwargs = new_kwargs
53
+
54
+
55
+ class DuplicateDQPass(PassBase):
56
+ def call(self, graph_module: torch.fx.GraphModule) -> PassResult:
57
+ for node in graph_module.graph.nodes:
58
+ if node.op == "call_function" and node.target in _DEQUANTIZE_OPS:
59
+ dq_users = _filter_sym_size_users(node)
60
+ if len(dq_users) <= 1:
61
+ continue
62
+ # Do not duplicate dq for dynamic quantization
63
+ # Pattern: choose_qparam - getitem - q - dq
64
+ q_node = node.args[0]
65
+ if q_node.op == "call_function" and q_node.target in _QUANTIZE_OPS:
66
+ getitem_node = q_node.args[1]
67
+ if (
68
+ isinstance(getitem_node, torch.fx.node.Node)
69
+ and getitem_node.op == "call_function"
70
+ and getitem_node.target == operator.getitem
71
+ ):
72
+ choose_qparam_node = getitem_node.args[0]
73
+ if (
74
+ isinstance(choose_qparam_node, torch.fx.node.Node)
75
+ and choose_qparam_node.op == "call_function"
76
+ and choose_qparam_node.target
77
+ == torch.ops.quantized_decomposed.choose_qparams.tensor
78
+ ):
79
+ continue
80
+ for user in dq_users:
81
+ _maybe_duplicate_dq(graph_module, node, user)
82
+ graph_module.graph.eliminate_dead_code()
83
+ graph_module.recompile()
84
+ return PassResult(graph_module, True)
parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/generate_numeric_debug_handle.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.fx import GraphModule, Node
2
+
3
+ __all__ = ["generate_numeric_debug_handle"]
4
+
5
+
6
+ def generate_numeric_debug_handle(graph_module: GraphModule) -> None:
7
+ unique_id = 0
8
+ for node in graph_module.graph.nodes:
9
+ if node.op == "call_function":
10
+ node.meta["numeric_debug_handle"] = {}
11
+ for arg in node.args:
12
+ if isinstance(arg, Node):
13
+ node.meta["numeric_debug_handle"][arg] = unique_id
14
+ unique_id += 1
15
+
16
+ node.meta["numeric_debug_handle"]["output"] = unique_id
17
+ unique_id += 1
parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/port_metadata_pass.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import logging
3
+ from typing import Optional
4
+
5
+ import torch
6
+ from torch._export.error import InternalError
7
+
8
+ from torch.ao.quantization.pt2e.utils import (
9
+ _filter_sym_size_users,
10
+ _find_q_dq_node_for_user,
11
+ _is_valid_annotation,
12
+ )
13
+
14
+ from torch.ao.quantization.quantizer import QuantizationSpecBase
15
+
16
+ from torch.fx.passes.infra.pass_base import PassBase, PassResult
17
+
18
+
19
+ logger = logging.getLogger(__name__)
20
+ logger.setLevel(logging.ERROR)
21
+
22
+ __all__ = ["PortNodeMetaForQDQ"]
23
+
24
+ _METADATA_TO_PORT = [
25
+ "stack_trace",
26
+ "quantization_tag",
27
+ ]
28
+
29
+ _QUANTIZE_OPS = [
30
+ torch.ops.quantized_decomposed.quantize_per_tensor.default,
31
+ torch.ops.quantized_decomposed.quantize_per_tensor.tensor,
32
+ torch.ops.quantized_decomposed.quantize_per_channel.default,
33
+ ]
34
+
35
+ _DEQUANTIZE_OPS = [
36
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default,
37
+ torch.ops.quantized_decomposed.dequantize_per_tensor.tensor,
38
+ torch.ops.quantized_decomposed.dequantize_per_channel.default,
39
+ ]
40
+
41
+
42
+ def _add_metadata(to_node: torch.fx.Node, from_node: torch.fx.Node) -> None:
43
+ from_meta = from_node.meta
44
+ for meta_name in _METADATA_TO_PORT:
45
+ if meta_name in from_meta:
46
+ to_node.meta[meta_name] = from_meta[meta_name]
47
+
48
+
49
+ def _has_quant_annotation(node: torch.fx.Node) -> bool:
50
+ return "quantization_annotation" in node.meta
51
+
52
+
53
+ def _find_choose_qparams_node(node: torch.fx.Node) -> Optional[torch.fx.Node]:
54
+ # BFS to look for choose qparams
55
+ from collections import deque
56
+
57
+ queue = deque(list(node.users.keys()))
58
+ while len(queue):
59
+ n = queue.popleft()
60
+ if n.op == "output":
61
+ continue
62
+ if (
63
+ n.op == "call_function"
64
+ and n.target == torch.ops.quantized_decomposed.choose_qparams.tensor
65
+ ):
66
+ return n
67
+ for k in n.users.keys():
68
+ queue.append(k)
69
+ return None
70
+
71
+
72
+ def _port_metadata_for_input_quant_nodes(
73
+ input_node: torch.fx.Node,
74
+ node: torch.fx.Node,
75
+ qspec: Optional[QuantizationSpecBase],
76
+ ):
77
+ if qspec is None:
78
+ return
79
+
80
+ is_dynamic_quant = getattr(qspec, "is_dynamic", None)
81
+ if is_dynamic_quant is not None and is_dynamic_quant is True:
82
+ choose_qparams_node = _find_choose_qparams_node(input_node)
83
+ if choose_qparams_node is None:
84
+ raise ValueError(f"No chose qparams node found for {node}")
85
+ choose_qparam_users = _filter_sym_size_users(choose_qparams_node)
86
+ if len(choose_qparam_users) != 2:
87
+ raise InternalError(f"Expecting exactly two user for {choose_qparams_node}")
88
+ scale_node = choose_qparam_users.pop()
89
+ dynamic_q_node = next(iter(scale_node.users.keys()))
90
+ dynamic_q_node_users = _filter_sym_size_users(dynamic_q_node)
91
+ if len(dynamic_q_node_users) > 1:
92
+ raise InternalError(f"Expecting single user for {dynamic_q_node}")
93
+ dynamic_dq_node = dynamic_q_node_users.pop()
94
+ _add_metadata(choose_qparams_node, node)
95
+ _add_metadata(dynamic_q_node, node)
96
+ _add_metadata(dynamic_dq_node, node)
97
+ else:
98
+ q_node, dq_node = _find_q_dq_node_for_user(input_node, node)
99
+ if q_node is None or dq_node is None:
100
+ return
101
+ # add metadata for all the node between q_node and get_attr node
102
+ # if the q_node can be traced back to get_attr node
103
+ q_to_get_attr_nodes = [q_node]
104
+ q_node_input = q_node.args[0]
105
+ while (
106
+ isinstance(q_node_input, torch.fx.Node)
107
+ and q_node_input.op == "call_function"
108
+ and q_node_input.target
109
+ in [
110
+ torch.ops.aten.flatten.using_ints,
111
+ torch.ops.aten.permute.default,
112
+ torch.ops.aten.permute_copy.default,
113
+ torch.ops.aten.slice_copy.Tensor,
114
+ torch.ops.aten.squeeze.dim,
115
+ torch.ops.aten.squeeze_copy.dim,
116
+ torch.ops.aten.transpose.Dimname,
117
+ torch.ops.aten.transpose.int,
118
+ torch.ops.aten.transpose_,
119
+ torch.ops.aten.view_copy.default,
120
+ torch.ops.aten.view.default,
121
+ torch.ops.aten._mkldnn_transpose,
122
+ ]
123
+ ):
124
+ q_to_get_attr_nodes.append(q_node_input)
125
+ q_node_input = q_node_input.args[0]
126
+ if isinstance(q_node_input, torch.fx.Node) and q_node_input.op == "get_attr":
127
+ for n in q_to_get_attr_nodes:
128
+ _add_metadata(n, q_node_input)
129
+ _add_metadata(dq_node, node)
130
+
131
+
132
+ def _port_metadata_for_output_quant_nodes(
133
+ node: torch.fx.Node, qspec: Optional[QuantizationSpecBase]
134
+ ):
135
+ if qspec is None:
136
+ return
137
+
138
+ node_users = _filter_sym_size_users(node)
139
+ if len(node_users) != 1:
140
+ logger.warning(f"Expecting {node} to have single user") # noqa: G004
141
+ q_node = node_users.pop()
142
+ if q_node.op != "call_function" or q_node.target not in _QUANTIZE_OPS:
143
+ logger.warning(
144
+ f"Expecting {node} user to be a quantized op but got {q_node}" # noqa: G004
145
+ ) # noqa: G004
146
+ return
147
+
148
+ _add_metadata(q_node, node)
149
+
150
+
151
+ class PortNodeMetaForQDQ(PassBase):
152
+ """
153
+ Port metadata for nodes added by quantization flow.
154
+ For static quant these are:
155
+ - quantizer_per_tensor.default, dequantize_per_tensor.default
156
+ - quantizer_per_channel.default, dequantize_per_channel.default
157
+ For dynamic quant these are:
158
+ - choose_qparams.tensor
159
+ - quantizer_per_tensor.tensor, dequantize_per_tensor.tensor
160
+ - quantizer_per_channel.default, dequantize_per_channel.default
161
+
162
+ Rules of porting metadata:
163
+ - Metadata to be ported:
164
+ - nn_module_stack
165
+ - stack_trace
166
+ - quantization_tag
167
+ - Metadata to NOT be ported:
168
+ - Everything else
169
+ - Rules:
170
+ - Statically quantized patterns:
171
+ - Dequantize nodes on the inputs to be quantized inherit metadata of the consumer node.
172
+ - Quantize nodes on the outputs inherit metadata of the producer node.
173
+ - Example 1:
174
+ - Original: [Conv -> AvgPool -> Linear]
175
+ - Quantized [Q-> DQ -> Conv -> Q -> DQ -> AvgPool -> Q -> DQ -> Linear -> Q -> DQ]
176
+ - Inner brackets specify which nodes Q/DQ inherit metdata from
177
+ - [Q-> [DQ -> Conv -> Q] -> [DQ -> AvgPool -> Q] -> [DQ -> Linear -> Q] -> DQ]
178
+ - Note first Q and last DQ do not inherit metadata from any nodes
179
+ - Example 2:
180
+ - Original: [Conv -> AvgPool -> Linear]
181
+ - AvgPool is not quantized
182
+ - Quantized [Q-> DQ -> Conv -> Q -> DQ -> AvgPool -> Q -> DQ -> Linear -> Q -> DQ]
183
+ - Inner brackets specify which nodes Q/DQ inherit metdata from
184
+ - [Q-> [DQ -> Conv -> Q] -> DQ -> [AvgPool] -> Q -> [DQ -> Linear -> Q] -> DQ]
185
+ - Note DQ and Q nodes around AvgPool do not inherit metadata from AvgPool because
186
+ AvgPool was not supposed to be quantized. Metadata porting relies on quantization_annotation
187
+ on the nodes (in this case AvgPool node) to conclude if the node or patter was
188
+ supposed to be quantized. And subsequntly decide if the preceding Q, if any, should
189
+ inherit metadata from AvgPool.
190
+ - Dynamically quantized patterns:
191
+ - Input that are dynamically quantized have choose_qparams, quantize and dequantize nodes
192
+ - For example, below linear is dynamically quantized while rest statically:
193
+ - Original: [Conv -> AvgPool -> Linear]
194
+ - Quantized [Q-> DQ -> Conv -> Q -> DQ -> AvgPool -> Q -> DQ -> choose_params -> Q -> DQ -> Linear]
195
+ - Quantized [Q-> [DQ -> Conv -> Q] -> [DQ -> AvgPool -> Q] -> DQ -> [choose_params -> Q -> DQ -> Linear]]
196
+ - Note first Q does not inherit metadata from any nodes
197
+ NB:
198
+ - The best place for porting metadata is during observer conversion to q/dq. This is because it precisely
199
+ knows which quantization spec is converted to q/dq and thus from where the metadata should be ported.
200
+ However, since FX and PT2E quant workflow are on a common code-base, this hurts readability quite a bit.
201
+ Doing it via a separate pass, helps readability of the code. Once we are able to refactor PT2E quant
202
+ code, this pass should like to be integrated in the refactored variant of "convert" step.
203
+ """
204
+
205
+ def call(self, graph_module: torch.fx.GraphModule) -> PassResult:
206
+ for node in graph_module.graph.nodes:
207
+ annotation = node.meta.get("quantization_annotation", None)
208
+ if _is_valid_annotation(annotation):
209
+ input_qspec_map = node.meta["quantization_annotation"].input_qspec_map
210
+ output_qspec = node.meta["quantization_annotation"].output_qspec
211
+ for input_node, qspec in input_qspec_map.items():
212
+ _port_metadata_for_input_quant_nodes(input_node, node, qspec)
213
+ _port_metadata_for_output_quant_nodes(node, output_qspec)
214
+ return PassResult(graph_module, True)
parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/prepare.py ADDED
@@ -0,0 +1,492 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ from torch._subclasses import FakeTensor
4
+ from torch.ao.quantization.fx.prepare import (
5
+ _insert_obs_or_fq,
6
+ _save_state,
7
+ _is_activation_post_process_node,
8
+ _create_obs_or_fq_from_qspec,
9
+ )
10
+ from torch.fx import (
11
+ GraphModule,
12
+ Graph,
13
+ Node,
14
+ )
15
+ from torch.fx.node import Argument
16
+
17
+ from torch.ao.quantization import QConfigMapping
18
+ from torch.ao.quantization.qconfig import QConfigAny
19
+ from torch.ao.quantization.fx.custom_config import PrepareCustomConfig
20
+ from typing import Dict, Tuple, Union, Any, Optional
21
+ from torch.ao.quantization.quantizer import (
22
+ EdgeOrNode,
23
+ SharedQuantizationSpec,
24
+ QuantizationSpecBase,
25
+ )
26
+ from torch.ao.quantization import ObserverOrFakeQuantize
27
+
28
+ # TODO: make pt2e folder private?
29
+ __all__ = [
30
+ "prepare",
31
+ ]
32
+
33
+
34
+ def _find_root_edge_or_node(edge_or_node: EdgeOrNode, shared_with_map: Dict[EdgeOrNode, EdgeOrNode]) -> EdgeOrNode:
35
+ """Find the root node for the sharing tree
36
+ Args:
37
+ edge_or_node: edge/node that we want to find the root
38
+ shared_with_map: each edge/node points to the parent, the root node will points to itself
39
+
40
+ Returns:
41
+ root edge/node
42
+ """
43
+ parent = shared_with_map[edge_or_node]
44
+ if parent == edge_or_node:
45
+ return edge_or_node
46
+ root = _find_root_edge_or_node(parent, shared_with_map)
47
+ # path compression
48
+ shared_with_map[edge_or_node] = root
49
+ return root
50
+
51
+ def _union(parent: EdgeOrNode, child: EdgeOrNode, shared_with_map: Dict[EdgeOrNode, EdgeOrNode]) -> None:
52
+ """Merge the subtree for `child` with `parent`, the order is important here
53
+ """
54
+ root_parent = _find_root_edge_or_node(parent, shared_with_map)
55
+ root_child = _find_root_edge_or_node(child, shared_with_map)
56
+ # union the two trees by pointing the root of child to root of parent
57
+ shared_with_map[root_child] = root_parent
58
+
59
+ def _update_shared_with(child: EdgeOrNode, qspec: QuantizationSpecBase, shared_with_map: Dict[EdgeOrNode, EdgeOrNode]):
60
+ """Update the `shared_with_map` based on the qspec, this applies the `SharedQuantizationSpec`
61
+ configuration and established the relationship between `edge_or_node` with the edge/node that it
62
+ is pointing to, we'll use this information in the end to get the group id
63
+ """
64
+ if isinstance(qspec, SharedQuantizationSpec):
65
+ parent = qspec.edge_or_node
66
+ # we point from edge_or_node to the node that it is sharing_with, e.g.
67
+ # qspec for a = SharedQuantizationSpec(b) means `a` points to `b`
68
+ _union(parent, child, shared_with_map)
69
+
70
+ def _unwrap_shared_qspec(
71
+ qspec: QuantizationSpecBase,
72
+ edge_or_node_to_qspec: Dict[EdgeOrNode, QuantizationSpecBase],
73
+ shared_with_map: Dict[EdgeOrNode, EdgeOrNode]
74
+ ) -> QuantizationSpecBase:
75
+ """Unwraps qspec to get the final root qspec (non SharedQuantizationSpec)
76
+ if qspec is SharedQuantizationSpec
77
+ (1). tries to find the root edge or node for the node that the qspec points to
78
+ (2). recursively find the root qspec based on the qspec for the root node
79
+ """
80
+ if isinstance(qspec, SharedQuantizationSpec):
81
+ sharing_with = qspec.edge_or_node
82
+ root = _find_root_edge_or_node(sharing_with, shared_with_map)
83
+ qspec = edge_or_node_to_qspec[root]
84
+ return _unwrap_shared_qspec(qspec, edge_or_node_to_qspec, shared_with_map)
85
+ return qspec
86
+
87
+ def _has_same_dtype(qspec_a: QuantizationSpecBase, qspec_b: QuantizationSpecBase):
88
+ return (
89
+ hasattr(qspec_a, "dtype") and
90
+ hasattr(qspec_b, "dtype") and
91
+ qspec_a.dtype == qspec_b.dtype
92
+ )
93
+
94
+ def _has_same_is_dynamic(qspec_a: QuantizationSpecBase, qspec_b: QuantizationSpecBase):
95
+ return (
96
+ hasattr(qspec_a, "is_dynamic") and
97
+ hasattr(qspec_b, "is_dynamic") and
98
+ qspec_a.is_dynamic == qspec_b.is_dynamic
99
+ )
100
+
101
+ def _get_edge_or_node_to_qspec(model: torch.fx.GraphModule) -> Dict[EdgeOrNode, QuantizationSpecBase]:
102
+ """Get a map from EdgeOrNode to quantization spec based on annotations on the nodes
103
+ """
104
+ edge_or_node_to_qspec: Dict[EdgeOrNode, QuantizationSpecBase] = {}
105
+ for n in model.graph.nodes:
106
+ if hasattr(n, "meta") and "quantization_annotation" in n.meta:
107
+ qa = n.meta["quantization_annotation"]
108
+ for input_to_n, qspec in qa.input_qspec_map.items():
109
+ input_edge = (input_to_n, n)
110
+ edge_or_node_to_qspec[input_edge] = qspec
111
+ if qa.output_qspec is not None:
112
+ output_node = n
113
+ qspec = qa.output_qspec
114
+ edge_or_node_to_qspec[output_node] = qspec
115
+ return edge_or_node_to_qspec
116
+
117
+ def _union_input_edge_with(input_edge, input_edge_root_qspec, edge_or_node, edge_or_node_to_qspec, shared_with_map):
118
+ """Union input edge with another edge or node, used in implicit sharing to point the current input
119
+ edge to other user edges of the producer node, or the output of producer node since these are
120
+ referring to the same Tensor
121
+ """
122
+ root_qspec = None
123
+ if edge_or_node in edge_or_node_to_qspec:
124
+ qspec = edge_or_node_to_qspec[edge_or_node]
125
+ root_qspec = _unwrap_shared_qspec(qspec, edge_or_node_to_qspec, shared_with_map)
126
+ # TODO: add assertions for types of root qspecs
127
+ if (
128
+ root_qspec is not None and
129
+ _has_same_dtype(root_qspec, input_edge_root_qspec) and
130
+ _has_same_is_dynamic(root_qspec, input_edge_root_qspec)
131
+ ):
132
+ # the input arg to the node should reuse the existing output observer for arg
133
+ # since dtype is the same (we may want to extend this to be a more strict check
134
+ # in the future)
135
+ # so we point from `input_edge` to `arg` (output of the argument)
136
+ _union(edge_or_node, input_edge, shared_with_map)
137
+
138
+
139
+ def _get_edge_or_node_to_group_id(edge_or_node_to_qspec: Dict[EdgeOrNode, QuantizationSpecBase]) -> Dict[EdgeOrNode, int]:
140
+ """Map from edge/node to the group ID, generated from quantization annotations,
141
+ edge/node with the same group ID should use the same observer/fake_quant instance
142
+
143
+ This is applying SharedQuantizationSpec configuration and map each edge/node to a group
144
+ There is another implicit sharing that's built in the quantization, when we have the following:
145
+ * op1 -> op2
146
+ * output of op1: int8_qspec
147
+ * (op1 -> op2) input edge: int8_qspec
148
+ we'll assume sharing between the output of op1 and input of (op1 -> op2) since these are the same Tensor.
149
+
150
+ Figuring out the correct group ID for all edge/node is a standard union find problem:
151
+ https://www.geeksforgeeks.org/introduction-to-disjoint-set-data-structure-or-union-find-algorithm/
152
+
153
+ Args:
154
+ edge_or_node_to_qspec: Dictionary from edge_or_node to the qspec, derived from annotations
155
+ Returns:
156
+ edge_or_node_to_group_id: Dictionary from edge_or_node to group_id (int), all edge or node that
157
+ belongs to the same group should have the same id
158
+
159
+ Example:
160
+ op2 -> cat1 -> cat2
161
+ op1 / /
162
+ op3
163
+ edge_or_node_to_qspec: {
164
+ op1: int8_qspec,
165
+ op2: int8_qspec,
166
+ (op1, cat1): int8_qspc,
167
+ (op2, cat1): SharedQuantizationSpec((op1, cat1)),
168
+ cat1: SharedQuantizationSpec((op1, cat1)),
169
+ (op3, cat2): int8_qspec,
170
+ (cat1, cat2): SharedQuantizationSpec((op3, cat2)),
171
+ cat2: SharedQuantizationSpec((op3, cat2)),
172
+ }
173
+
174
+ edge_or_node_to_group_id = _get_edge_or_node_to_group_id(edge_or_node_to_qspec)
175
+ edge_or_node_to_group_id: {
176
+ op1: 1,
177
+ op2: 1,
178
+ (op1, cat1): 1,
179
+ (op2, cat1): 1,
180
+ cat1: 1,
181
+ (op3, cat2): 1,
182
+ (cat1, cat2): 1,
183
+ cat2: 1,
184
+ }
185
+ # everything are in the same group because (cat1) and (cat1, cat2) are implicitly shared, which
186
+ # connects the two sharing group around cat1 and cat2 op due to transitive sharing
187
+ """
188
+ # means the observer of key should be shared with observer with value, by default it will
189
+ # be shared with itself
190
+ shared_with_map: Dict[EdgeOrNode, EdgeOrNode] = {k: k for k in edge_or_node_to_qspec.keys()}
191
+ for edge_or_node, qspec in edge_or_node_to_qspec.items():
192
+ if isinstance(edge_or_node, torch.fx.Node):
193
+ output_node = edge_or_node
194
+ _update_shared_with(output_node, qspec, shared_with_map)
195
+ else:
196
+ input_edge = edge_or_node
197
+ input_edge_root_qspec = _unwrap_shared_qspec(qspec, edge_or_node_to_qspec, shared_with_map)
198
+
199
+ assert isinstance(input_edge, tuple)
200
+ arg, n = input_edge
201
+ if n.meta["quantization_annotation"].allow_implicit_sharing:
202
+ # NOTE: the order is important here, we first share with other users and then share with previous
203
+ # output because the reverse order could cause circular dependency
204
+ # e.g node1 -> node2
205
+ # \ -> node3
206
+ # when processing (node1, node2), if we first point (node1, node2) to node1
207
+ # Step 1. shared_map = {(node1, node2): node1}
208
+ # Step 2. after that, we point the (node1, node2) to its other user (node1, node3) ,
209
+ # which means shared_map = {(node1, node2): node1, node1: (node1, node3)}
210
+ # because we will point the root of (node1, node2) (in this case node1) to the root of (node1, node3)
211
+ # Step 3. and when we process (node1, node3), it can try to point to node1 as well, then we'll
212
+ # have a circular dependency
213
+ # the following order works around this issue, but this does not allow arbitrary configuration
214
+ # of sharing so it might break in a different case in the future, when it breaks
215
+ # quantizer writer can check the notes here to debug the issue
216
+
217
+ # sharing with other users of the producer node
218
+ # (arg, user)
219
+ if not isinstance(arg, Node) or not isinstance(n, Node):
220
+ raise Exception(f"Expected input_edge to have type Tuple[Node, Node], but got: {arg, n}") # noqa: TRY002
221
+ for user in arg.users:
222
+ if user is n:
223
+ continue
224
+ arg_to_user_edge = (arg, user)
225
+ _union_input_edge_with(
226
+ input_edge,
227
+ input_edge_root_qspec,
228
+ arg_to_user_edge,
229
+ edge_or_node_to_qspec,
230
+ shared_with_map
231
+ )
232
+
233
+ # sharing with output of producer node
234
+ _union_input_edge_with(input_edge, input_edge_root_qspec, arg, edge_or_node_to_qspec, shared_with_map)
235
+
236
+ _update_shared_with(input_edge, qspec, shared_with_map)
237
+
238
+ # now that we get the sharing relations between all edges and nodes, we can assingn group ids
239
+ cur_group_id = 0
240
+ edge_or_node_to_group_id: Dict[EdgeOrNode, int] = {}
241
+ for edge_or_node in shared_with_map.keys():
242
+ root = _find_root_edge_or_node(edge_or_node, shared_with_map)
243
+ if root not in edge_or_node_to_group_id:
244
+ edge_or_node_to_group_id[root] = cur_group_id
245
+ cur_group_id += 1
246
+ edge_or_node_to_group_id[edge_or_node] = edge_or_node_to_group_id[root]
247
+
248
+ return edge_or_node_to_group_id
249
+
250
+ def _get_obs_or_fq_map(
251
+ edge_or_node_to_group_id: Dict[EdgeOrNode, int],
252
+ edge_or_node_to_qspec: Dict[EdgeOrNode, QuantizationSpecBase],
253
+ is_qat: bool
254
+ ) -> Dict[EdgeOrNode, ObserverOrFakeQuantize]:
255
+ """Generates the EdgeOrNode to observer/fake_quant instances
256
+ Makes sure that for EdgeOrNode that has the same group_id should have the same observer or fake quant
257
+ instances
258
+ """
259
+ obs_or_fq_map: Dict[EdgeOrNode, ObserverOrFakeQuantize] = {}
260
+ group_id_to_obs_or_fq: Dict[int, ObserverOrFakeQuantize] = {}
261
+ for edge_or_node, qspec in edge_or_node_to_qspec.items():
262
+ group_id = edge_or_node_to_group_id[edge_or_node]
263
+ if group_id not in group_id_to_obs_or_fq:
264
+ # TODO: maybe edge_or_node_to_qspec should be edge_or_node_to_root_qspec, this will simplify
265
+ # the implementation for _create_obs_or_fq_from_qspec
266
+ group_id_to_obs_or_fq[group_id] = _create_obs_or_fq_from_qspec(qspec, obs_or_fq_map, is_qat)
267
+ obs_or_fq_map[edge_or_node] = group_id_to_obs_or_fq[group_id]
268
+ return obs_or_fq_map
269
+
270
+ def _maybe_insert_input_observer_for_arg_or_kwarg(
271
+ node: Union[Node, Any],
272
+ arg: Argument,
273
+ qconfig: QConfigAny,
274
+ model: torch.nn.Module,
275
+ named_modules: Dict[str, torch.nn.Module],
276
+ obs_or_fq_map: Dict[EdgeOrNode, ObserverOrFakeQuantize],
277
+ is_qat: bool,
278
+ ) -> Argument:
279
+ """
280
+ Given a `node` and an `arg`, inserts an input observer between
281
+ `node` and `arg` if necessary.
282
+ """
283
+ # for ops such as torch.cat([x0, x1]),
284
+ # traverse through the list
285
+ if isinstance(arg, (list, tuple)):
286
+ new_arg_to_return = []
287
+ for inner_arg in arg:
288
+ new_inner_arg = _maybe_insert_input_observer_for_arg_or_kwarg(
289
+ node, inner_arg, qconfig, model, named_modules, obs_or_fq_map, is_qat,
290
+ )
291
+ new_arg_to_return.append(new_inner_arg)
292
+ return type(arg)(new_arg_to_return)
293
+
294
+ if not isinstance(arg, Node):
295
+ return arg
296
+ assert isinstance(arg, Node)
297
+ # default (no observer)
298
+ new_arg = arg
299
+
300
+ # find the original `arg` node to the current node, skipping inserted observer/fake_quant nodes
301
+ original_arg = arg
302
+ while _is_activation_post_process_node(original_arg, named_modules):
303
+ original_arg = original_arg.args[0] # type: ignore[assignment]
304
+ assert isinstance(original_arg, Node), f"expect original argument to be a Node, but got: {type(original_arg)}"
305
+
306
+ input_edge = (original_arg, node)
307
+ if input_edge not in obs_or_fq_map:
308
+ return new_arg
309
+ # input_edge needs to be observed
310
+ input_edge_obs_or_fq = obs_or_fq_map[input_edge]
311
+ if input_edge_obs_or_fq is None:
312
+ return new_arg
313
+
314
+ arg_as_output_obs_or_fq = obs_or_fq_map.get(original_arg, None)
315
+ # the arg is observed as the output and is using the same instance as the input_edge
316
+ # we'll reuse the inserted observer/fake_quant
317
+ if arg_as_output_obs_or_fq is not None and id(arg_as_output_obs_or_fq) == id(input_edge_obs_or_fq):
318
+ return new_arg
319
+
320
+ # otherwise, we'll insert a new observer/fake_quant node
321
+
322
+ existing_obs_node = None
323
+ # skip inserting new observers if the same observer instance is inserted before for another user
324
+ # Example:
325
+ # conv1 -> obs1 -> existing_obs -> conv2
326
+ # \ -> conv3
327
+ #
328
+ # instead of inserting new observers we will have:
329
+ # conv1 -> obs1 -> existing_obs -> conv2
330
+ # \ -> conv3
331
+ for maybe_obs_node in arg.users.keys():
332
+ if not _is_activation_post_process_node(maybe_obs_node, named_modules):
333
+ continue
334
+ maybe_obs_mod = named_modules[maybe_obs_node.target] # type: ignore[index]
335
+ if id(maybe_obs_mod) == id(input_edge_obs_or_fq):
336
+ return maybe_obs_node
337
+
338
+ new_arg = _insert_obs_or_fq(arg, input_edge_obs_or_fq, model, named_modules, model.graph)
339
+ return new_arg
340
+
341
+ def _maybe_insert_input_observers_for_node(
342
+ node: Node,
343
+ qconfig: QConfigAny,
344
+ model: torch.nn.Module,
345
+ named_modules: Dict[str, torch.nn.Module],
346
+ obs_or_fq_map: Dict[EdgeOrNode, ObserverOrFakeQuantize],
347
+ is_qat: bool,
348
+ ) -> None:
349
+ """
350
+ If needed, inserts observers to the input args and kwargs of `node`.
351
+ Note: modifies `node` inplace.
352
+
353
+ For example, if cur_node needs an observer after prev_node, we change from
354
+
355
+ prev_node -> cur_node
356
+
357
+ To
358
+
359
+ prev_node -> obs -> cur_node
360
+
361
+ """
362
+ # Look through every input arg. If that arg's target dtype does not
363
+ # match the current node's target dtype, insert an observer.
364
+ new_args = []
365
+ # map from old arg to new arg, used for updating the numeric debug handle map
366
+ remap = {}
367
+ for arg in node.args:
368
+ new_arg = _maybe_insert_input_observer_for_arg_or_kwarg(
369
+ node, arg, qconfig, model, named_modules, obs_or_fq_map, is_qat,
370
+ )
371
+ new_args.append(new_arg)
372
+ remap[arg] = new_arg
373
+
374
+ if "numeric_debug_handle" in node.meta:
375
+
376
+ def remap_fn(x):
377
+ return remap.get(x, x)
378
+
379
+ numeric_debug_handle = node.meta["numeric_debug_handle"]
380
+ node.meta["numeric_debug_handle"] = {remap_fn(k): v for k, v in numeric_debug_handle.items()}
381
+
382
+ # Clone has a memory_format kwarg, zeros_like has a pin_memory kwarg, and
383
+ # gelu has a has an approximate kwarg that persist in exported graph.
384
+ # This is just a work around for these.
385
+ assert (
386
+ node.target == torch.ops.aten.clone.default or
387
+ node.target == torch.ops.aten.zeros_like.default or
388
+ node.target == torch.ops.aten.gelu.default or
389
+ len(node.kwargs) == 0
390
+ ), " expecting kwargs for aten op IR to be empty"
391
+
392
+ # assign the new args to the node, inplace
393
+ node.args = tuple(new_args)
394
+
395
+ def _maybe_insert_output_observer_for_node(
396
+ node: Node,
397
+ model: torch.nn.Module,
398
+ named_modules: Dict[str, torch.nn.Module],
399
+ graph: Graph,
400
+ obs_or_fq_map: Dict[EdgeOrNode, ObserverOrFakeQuantize],
401
+ is_qat: bool,
402
+ ) -> Optional[Node]:
403
+ if node in obs_or_fq_map:
404
+ output_act_obs_or_fq = obs_or_fq_map[node]
405
+ return _insert_obs_or_fq(node, output_act_obs_or_fq, model, named_modules, graph)
406
+ return None
407
+
408
+ def _maybe_insert_input_and_output_observers_for_node(
409
+ node: Node,
410
+ model: torch.fx.GraphModule,
411
+ obs_or_fq_map: Dict[EdgeOrNode, ObserverOrFakeQuantize],
412
+ is_qat: bool,
413
+ ):
414
+ this_node_quantization_annotation = node.meta["quantization_annotation"] if "quantization_annotation" in node.meta else None
415
+ if this_node_quantization_annotation is None:
416
+ return
417
+
418
+ named_modules = dict(model.named_modules(remove_duplicate=False))
419
+ _maybe_insert_input_observers_for_node(
420
+ node,
421
+ None, # qconfig
422
+ model,
423
+ named_modules,
424
+ obs_or_fq_map,
425
+ is_qat,
426
+ )
427
+
428
+ output_is_a_tensor = "val" in node.meta and isinstance(node.meta["val"], FakeTensor)
429
+ if not output_is_a_tensor:
430
+ return
431
+
432
+ # this returns the new observer node if it was needed
433
+ maybe_output_obs_node = _maybe_insert_output_observer_for_node(
434
+ node, model, named_modules, model.graph, obs_or_fq_map, is_qat)
435
+
436
+ if maybe_output_obs_node is None:
437
+ return
438
+ # Update users of original node to use the output observer
439
+ # instead. For example, change
440
+ #
441
+ # next_node
442
+ # /
443
+ # cur_node -> obs
444
+ #
445
+ # to
446
+ #
447
+ # next_node
448
+ # /
449
+ # cur_node -> obs
450
+ #
451
+ # We need to save orig users before updating uses because
452
+ # the list of users will change as we update uses
453
+ orig_users = list(node.users.keys())
454
+ for user_node in orig_users:
455
+ if user_node is maybe_output_obs_node:
456
+ continue
457
+ user_node.replace_input_with(node, maybe_output_obs_node)
458
+
459
+ def prepare(
460
+ model: GraphModule,
461
+ node_name_to_scope: Dict[str, Tuple[str, type]],
462
+ is_qat: bool,
463
+ ) -> GraphModule:
464
+ # Since we are mutating the graph as we go, we iterate over the original
465
+ # nodes before observer insertion, instead of model.graph.nodes.
466
+ nodes_before_observation = list(model.graph.nodes)
467
+
468
+ # At the high level we construct a map from EdgeOrNode to a observer_or_fake_quant instance
469
+ # all edge/nodes that belongs to the same group will use the same instance
470
+ # and when we insert observers we'll just query this map to get the correct observer_or_fake_quant
471
+ # instance
472
+ edge_or_node_to_qspec = _get_edge_or_node_to_qspec(model)
473
+ edge_or_node_to_group_id = _get_edge_or_node_to_group_id(edge_or_node_to_qspec)
474
+ obs_or_fq_map = _get_obs_or_fq_map(edge_or_node_to_group_id, edge_or_node_to_qspec, is_qat)
475
+
476
+ for node in nodes_before_observation:
477
+ # TODO: simplify logic for inserting observers
478
+ _maybe_insert_input_and_output_observers_for_node(node, model, obs_or_fq_map, is_qat)
479
+
480
+ model = GraphModule(model, model.graph)
481
+
482
+ _save_state(
483
+ model,
484
+ {}, # node_name_to_qconfig
485
+ node_name_to_scope,
486
+ PrepareCustomConfig(),
487
+ {}, # equalization_node_name_to_qconfig
488
+ QConfigMapping(),
489
+ is_qat,
490
+ set() # observed_node_names
491
+ )
492
+ return model
parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/qat_utils.py ADDED
@@ -0,0 +1,808 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import dataclasses
3
+ import itertools
4
+ import operator
5
+ from typing import Any, Callable, Dict, List, Tuple, TYPE_CHECKING
6
+
7
+ import torch
8
+ from torch.fx import Graph, GraphModule, Node
9
+ from torch.fx.subgraph_rewriter import (
10
+ replace_pattern_with_filters,
11
+ ReplacedPatterns,
12
+ )
13
+ import torch.nn.functional as F
14
+ from torch.ao.quantization.fx._decomposed import quantized_decomposed_lib # noqa: F401
15
+ from torch.ao.quantization.pt2e.export_utils import _WrapperModule
16
+ from torch.ao.quantization.quantizer import (
17
+ DerivedQuantizationSpec,
18
+ EdgeOrNode,
19
+ SharedQuantizationSpec,
20
+ QuantizationSpecBase,
21
+ )
22
+ from .utils import (
23
+ _conv1d_bn_example_inputs,
24
+ _conv2d_bn_example_inputs,
25
+ _is_bn_node,
26
+ _is_conv_or_conv_transpose_node,
27
+ _is_conv_transpose_fn,
28
+ fold_bn_weights_into_conv_node,
29
+ _get_aten_graph_module_for_pattern,
30
+ )
31
+
32
+ if TYPE_CHECKING:
33
+ from torch.fx.passes.utils.matcher_with_name_node_map_utils import InternalMatch
34
+
35
+ __all__ = [] # type: ignore[var-annotated]
36
+
37
+
38
+ # Example inputs for quantized and folded conv-bn1d patterns used in convert
39
+ _quantized_conv1d_bn_example_inputs = (
40
+ torch.randn(1, 1, 3), # x
41
+ torch.randn(1, 1, 1), # conv_weight
42
+ torch.randn(1), # bn_weight
43
+ torch.randn(1), # bn_bias
44
+ torch.randn(1), # bn_running_mean
45
+ torch.randn(1), # bn_running_var
46
+ )
47
+
48
+ # Example inputs for quantized and folded conv-bn2d patterns used in convert
49
+ _quantized_conv2d_bn_example_inputs = (
50
+ torch.randn(1, 1, 3, 3), # x
51
+ torch.randn(1, 1, 1, 1), # conv_weight
52
+ torch.randn(1), # bn_weight
53
+ torch.randn(1), # bn_bias
54
+ torch.randn(1), # bn_running_mean
55
+ torch.randn(1), # bn_running_var
56
+ )
57
+
58
+
59
+ def _get_quantized_conv_bn_example_inputs_kwargs(
60
+ is_per_channel: bool,
61
+ has_bias: bool,
62
+ bias_is_quantized: bool,
63
+ is_cuda: bool,
64
+ ) -> Dict[str, Any]:
65
+ """
66
+ Optional example inputs for quantized and folded conv-bn patterns
67
+ used in convert, expressed as kwargs.
68
+ """
69
+ kwargs = {}
70
+ # Per tensor quantization uses literals to represent scale and zero
71
+ # point, so there is no need to include them here as kwargs
72
+ if is_per_channel:
73
+ kwargs["weight_scale"] = torch.tensor([1], dtype=torch.float)
74
+ kwargs["weight_zero_point"] = torch.tensor([0], dtype=torch.int)
75
+ if has_bias and bias_is_quantized:
76
+ kwargs["bias_scale"] = torch.tensor([1], dtype=torch.float)
77
+ kwargs["bias_zero_point"] = torch.tensor([0], dtype=torch.int)
78
+ if has_bias:
79
+ kwargs["conv_bias"] = torch.randn(1)
80
+ if is_cuda:
81
+ for k, v in kwargs.items():
82
+ if isinstance(v, torch.Tensor):
83
+ kwargs[k] = v.cuda()
84
+ return kwargs
85
+
86
+ def _get_conv_bn_pattern(conv_fn: Callable) -> Callable:
87
+ def _conv_bn_pattern(
88
+ x: torch.Tensor,
89
+ conv_weight: torch.Tensor,
90
+ conv_bias: torch.Tensor,
91
+ bn_weight: torch.Tensor,
92
+ bn_bias: torch.Tensor,
93
+ bn_running_mean: torch.Tensor,
94
+ bn_running_var: torch.Tensor,
95
+ ) -> torch.Tensor:
96
+ x = conv_fn(x, conv_weight, conv_bias)
97
+ x = F.batch_norm(x, bn_running_mean, bn_running_var, bn_weight, bn_bias, training=True)
98
+ return x
99
+ return _WrapperModule(_conv_bn_pattern)
100
+
101
+ # TODO: merge this with the `no_conv_bias` case
102
+ def _get_qat_conv_bn_pattern(conv_fn: Callable) -> Callable:
103
+ def _qat_conv_bn_pattern(
104
+ x: torch.Tensor,
105
+ conv_weight: torch.Tensor,
106
+ conv_bias: torch.Tensor,
107
+ bn_weight: torch.Tensor,
108
+ bn_bias: torch.Tensor,
109
+ bn_running_mean: torch.Tensor,
110
+ bn_running_var: torch.Tensor,
111
+ ) -> torch.Tensor:
112
+ """
113
+ Approximated method to fuse conv and bn. It requires only one forward pass.
114
+ conv_orig = conv / scale_factor where scale_factor = bn.weight / running_std.
115
+ This is based on `nniqat.ConvBn2d._forward_approximate`.
116
+ """
117
+ # TODO: allow setting eps
118
+ bn_eps = 1e-5
119
+ running_std = torch.sqrt(bn_running_var + bn_eps)
120
+ scale_factor = bn_weight / running_std
121
+ weight_shape = [1] * len(conv_weight.shape)
122
+ weight_in_channel_axis = 1 if _is_conv_transpose_fn(conv_fn) else 0
123
+ weight_shape[weight_in_channel_axis] = -1
124
+ bias_shape = [1] * len(conv_weight.shape)
125
+ bias_shape[1] = -1
126
+ scaled_weight = conv_weight * scale_factor.reshape(weight_shape)
127
+ zero_bias = torch.zeros_like(conv_bias, dtype=x.dtype)
128
+ x = conv_fn(x, scaled_weight, zero_bias)
129
+ x = x / scale_factor.reshape(bias_shape)
130
+ x = x + conv_bias.reshape(bias_shape)
131
+ x = F.batch_norm(x, bn_running_mean, bn_running_var, bn_weight, bn_bias, training=True, eps=bn_eps)
132
+ return x
133
+ return _WrapperModule(_qat_conv_bn_pattern)
134
+
135
+ def _get_qat_conv_bn_pattern_no_conv_bias(conv_fn: Callable) -> Callable:
136
+ def _qat_conv_bn_pattern_no_conv_bias(
137
+ x: torch.Tensor,
138
+ conv_weight: torch.Tensor,
139
+ # Not used, only for matching convenience
140
+ conv_bias: torch.Tensor,
141
+ bn_weight: torch.Tensor,
142
+ bn_bias: torch.Tensor,
143
+ bn_running_mean: torch.Tensor,
144
+ bn_running_var: torch.Tensor,
145
+ ) -> torch.Tensor:
146
+ """
147
+ Same as `_get_qat_conv_bn_pattern`, but handles the case with no conv bias.
148
+ """
149
+ # TODO: allow setting eps
150
+ bn_eps = 1e-5
151
+ running_std = torch.sqrt(bn_running_var + bn_eps)
152
+ scale_factor = bn_weight / running_std
153
+ weight_shape = [1] * len(conv_weight.shape)
154
+ weight_in_channel_axis = 1 if _is_conv_transpose_fn(conv_fn) else 0
155
+ weight_shape[weight_in_channel_axis] = -1
156
+ bias_shape = [1] * len(conv_weight.shape)
157
+ bias_shape[1] = -1
158
+ scaled_weight = conv_weight * scale_factor.reshape(weight_shape)
159
+ x = conv_fn(x, scaled_weight, None)
160
+ x = x / scale_factor.reshape(bias_shape)
161
+ x = F.batch_norm(x, bn_running_mean, bn_running_var, bn_weight, bn_bias, training=True, eps=bn_eps)
162
+ return x
163
+ return _WrapperModule(_qat_conv_bn_pattern_no_conv_bias)
164
+
165
+ def _append_qdq(x, is_per_channel, is_bias, kwargs):
166
+ """
167
+ Helper function to append q-dq ops after `x`, using dummy values for the qparams
168
+ and qmin/qmax. We use dummy values here because we match with `ignore_literals=True`
169
+ and will manually replace these values after subgraph rewriting.
170
+
171
+ Return the dq node.
172
+ """
173
+ # Dummy args to be passed into q-dq ops
174
+ per_channel_axis = 0
175
+ scale_key = "bias_scale" if is_bias else "weight_scale"
176
+ zp_key = "bias_zero_point" if is_bias else "weight_zero_point"
177
+ scale = kwargs[scale_key] if is_per_channel else 1.0
178
+ zp = kwargs[zp_key] if is_per_channel else 0
179
+ qmin = -127
180
+ qmax = 127
181
+ dtype = torch.int8
182
+
183
+ qd = torch.ops.quantized_decomposed
184
+ if is_per_channel:
185
+ x = qd.quantize_per_channel(x, scale, zp, per_channel_axis, qmin, qmax, dtype)
186
+ x = qd.dequantize_per_channel(x, scale, zp, per_channel_axis, qmin, qmax, dtype)
187
+ else:
188
+ x = qd.quantize_per_tensor(x, scale, zp, qmin, qmax, dtype)
189
+ x = qd.dequantize_per_tensor(x, scale, zp, qmin, qmax, dtype)
190
+ return x
191
+
192
+ def _get_quantized_qat_conv_bn_pattern(
193
+ is_per_channel: bool,
194
+ has_bias: bool,
195
+ bias_is_quantized: bool,
196
+ conv_fn: Callable,
197
+ bn_is_training: bool,
198
+ ) -> Callable:
199
+ """
200
+ Return the quantized version of QAT conv + BN pattern.
201
+ This is based on `nniqat.ConvBn2d._forward_approximate`,
202
+ used in QAT convert. We first match this pattern and replace
203
+ it with the normal [conv - bn] pattern, then fold the BN
204
+ weights into conv.
205
+ """
206
+ # TODO: allow setting eps
207
+ bn_eps = 1e-5
208
+
209
+ def _quantized_qat_conv_bn_pattern(
210
+ x: torch.Tensor,
211
+ conv_weight: torch.Tensor,
212
+ bn_weight: torch.Tensor,
213
+ bn_bias: torch.Tensor,
214
+ bn_running_mean: torch.Tensor,
215
+ bn_running_var: torch.Tensor,
216
+ **kwargs,
217
+ ) -> torch.Tensor:
218
+ running_std = torch.sqrt(bn_running_var + bn_eps)
219
+ scale_factor = bn_weight / running_std
220
+ weight_shape = [1] * len(conv_weight.shape)
221
+ weight_shape[0] = -1
222
+ bias_shape = [1] * len(conv_weight.shape)
223
+ bias_shape[1] = -1
224
+ scaled_weight = conv_weight * scale_factor.reshape(weight_shape)
225
+ scaled_weight = _append_qdq(
226
+ scaled_weight, is_per_channel, is_bias=False, kwargs=kwargs,
227
+ )
228
+ if has_bias:
229
+ zero_bias = torch.zeros_like(kwargs["conv_bias"], dtype=x.dtype)
230
+ if bias_is_quantized:
231
+ zero_bias = _append_qdq(
232
+ zero_bias, is_per_channel, is_bias=True, kwargs=kwargs,
233
+ )
234
+ x = conv_fn(x, scaled_weight, zero_bias)
235
+ else:
236
+ x = conv_fn(x, scaled_weight, None)
237
+ x = x / scale_factor.reshape(bias_shape)
238
+ if has_bias:
239
+ x = x + kwargs["conv_bias"].reshape(bias_shape)
240
+ x = F.batch_norm(x, bn_running_mean, bn_running_var, bn_weight, bn_bias, training=bn_is_training, eps=bn_eps)
241
+ return x
242
+ return _WrapperModule(_quantized_qat_conv_bn_pattern)
243
+
244
+ def _get_folded_quantized_qat_conv_bn_pattern(
245
+ is_per_channel: bool,
246
+ has_bias: bool,
247
+ bias_is_quantized: bool,
248
+ conv_fn: Callable,
249
+ bn_is_training: bool,
250
+ ) -> Callable:
251
+ """
252
+ Quantized QAT conv - bn pattern with bn weights being folded into conv.
253
+ """
254
+ # TODO: allow setting eps
255
+ bn_eps = 1e-5
256
+
257
+ def _folded_quantized_qat_conv_bn_pattern(
258
+ x: torch.Tensor,
259
+ conv_weight: torch.Tensor,
260
+ bn_weight: torch.Tensor,
261
+ bn_bias: torch.Tensor,
262
+ bn_running_mean: torch.Tensor,
263
+ bn_running_var: torch.Tensor,
264
+ **kwargs,
265
+ ) -> torch.Tensor:
266
+ conv_weight = _append_qdq(
267
+ conv_weight, is_per_channel, is_bias=False, kwargs=kwargs,
268
+ )
269
+ if has_bias:
270
+ bias = kwargs["conv_bias"]
271
+ if bias_is_quantized:
272
+ bias = _append_qdq(
273
+ bias, is_per_channel, is_bias=True, kwargs=kwargs,
274
+ )
275
+ else:
276
+ bias = None
277
+ x = conv_fn(x, conv_weight, bias)
278
+ x = F.batch_norm(x, bn_running_mean, bn_running_var, bn_weight, bn_bias, training=bn_is_training, eps=bn_eps)
279
+ return x
280
+ return _WrapperModule(_folded_quantized_qat_conv_bn_pattern)
281
+
282
+ def _has_conv_bias_filter(
283
+ match: "InternalMatch",
284
+ original_graph: Graph,
285
+ pattern_graph: Graph,
286
+ ) -> bool:
287
+ """
288
+ Match filter for the subgraph rewriter that returns True if the conv node in
289
+ the original graph has bias.
290
+ """
291
+ for n in match.nodes_map.values():
292
+ if _is_conv_or_conv_transpose_node(n):
293
+ return len(n.args) > 2 and n.args[2] is not None
294
+ raise ValueError("Could not find conv node in matched conv + bn pattern")
295
+
296
+ def _no_conv_bias_filter(
297
+ match: "InternalMatch",
298
+ original_graph: Graph,
299
+ pattern_graph: Graph,
300
+ ) -> bool:
301
+ """
302
+ Match filter for the subgraph rewriter that returns True if the conv node in
303
+ the original graph does NOT have bias.
304
+ """
305
+ return not _has_conv_bias_filter(match, original_graph, pattern_graph)
306
+
307
+ def _is_quantize(n: Node) -> bool:
308
+ return n.target in [
309
+ torch.ops.quantized_decomposed.quantize_per_tensor.default,
310
+ torch.ops.quantized_decomposed.quantize_per_tensor.tensor,
311
+ torch.ops.quantized_decomposed.quantize_per_channel.default,
312
+ ]
313
+
314
+ def _is_dequantize(n: Node) -> bool:
315
+ return n.target in [
316
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default,
317
+ torch.ops.quantized_decomposed.dequantize_per_tensor.tensor,
318
+ torch.ops.quantized_decomposed.dequantize_per_channel.default,
319
+ ]
320
+
321
+ def _get_conv_bn_pattern_nodes(r: ReplacedPatterns) -> Dict[str, Tuple[Node, Node]]:
322
+ """
323
+ Helper function to extract the nodes in the conv-bn fusion pattern after
324
+ subgraph rewriting, in the form of a map:
325
+
326
+ {name: (original_node, replacement_node)}
327
+
328
+ The following names must exist in the map:
329
+
330
+ "conv", "conv_weight", "conv_input", "bn", "getitem"
331
+
332
+ The following names may exist in the map:
333
+
334
+ "conv_weight_q", "conv_weight_dq", "conv_bias",
335
+ "conv_bias_q", "conv_bias_dq"
336
+ """
337
+ def _get_nodes(nodes: List[Node]) -> Tuple[Node, Node, Node]:
338
+ """
339
+ Return a 3-tuple of (conv_node, bn_node, getitem_node).
340
+ This asserts that the match contains exactly one of each node.
341
+ """
342
+ conv_node, bn_node, getitem_node = None, None, None
343
+ for n in nodes:
344
+ if n.op != "call_function":
345
+ continue
346
+ if _is_conv_or_conv_transpose_node(n):
347
+ assert conv_node is None
348
+ conv_node = n
349
+ if _is_bn_node(n):
350
+ assert bn_node is None
351
+ bn_node = n
352
+ if n.target == operator.getitem:
353
+ assert getitem_node is None
354
+ getitem_node = n
355
+ assert conv_node is not None
356
+ assert bn_node is not None
357
+ assert getitem_node is not None
358
+ return (conv_node, bn_node, getitem_node)
359
+
360
+ def _get_q_dq_nodes(n: Node) -> Tuple[Node, Node, Node]:
361
+ """
362
+ Return a 3-tuple of (orig_node, q_node, dq_node).
363
+ """
364
+ assert _is_dequantize(n)
365
+ q_node = n.args[0]
366
+ assert isinstance(q_node, Node)
367
+ assert _is_quantize(q_node)
368
+ orig_node = q_node.args[0]
369
+ assert isinstance(orig_node, Node)
370
+ return (orig_node, q_node, n)
371
+
372
+ original_nodes = list(_filter_nodes_map(r.nodes_map).values())
373
+ o_conv, o_bn, o_getitem = _get_nodes(original_nodes)
374
+ r_conv, r_bn, r_getitem = _get_nodes(r.replacements)
375
+
376
+ # Create the mapping from original node to replacement node
377
+ mapping = {
378
+ "conv": (o_conv, r_conv),
379
+ "bn": (o_bn, r_bn),
380
+ "getitem": (o_getitem, r_getitem),
381
+ }
382
+
383
+ # Extract conv input and weight
384
+ # Note: here we extract the original nodes indirectly through the pattern nodes
385
+ # because the args of the original nodes are no longer available after replacement
386
+ (p_conv, _, _) = _get_nodes(list(r.nodes_map.keys()))
387
+ (p_conv_input, p_conv_weight, *_) = p_conv.args
388
+ (r_conv_input, r_conv_weight, *_) = r_conv.args
389
+ assert isinstance(p_conv_input, Node)
390
+ assert isinstance(p_conv_weight, Node)
391
+ assert isinstance(r_conv_input, Node)
392
+ assert isinstance(r_conv_weight, Node)
393
+ o_conv_input = r.nodes_map[p_conv_input]
394
+ o_conv_weight = r.nodes_map[p_conv_weight]
395
+
396
+ # If conv weight is quantized, extract the q - dq nodes
397
+ if _is_dequantize(p_conv_weight):
398
+ p_conv_weight, p_conv_weight_q, p_conv_weight_dq = _get_q_dq_nodes(p_conv_weight)
399
+ r_conv_weight, r_conv_weight_q, r_conv_weight_dq = _get_q_dq_nodes(r_conv_weight)
400
+ o_conv_weight = r.nodes_map[p_conv_weight]
401
+ o_conv_weight_q = r.nodes_map[p_conv_weight_q]
402
+ o_conv_weight_dq = r.nodes_map[p_conv_weight_dq]
403
+ mapping["conv_weight_q"] = (o_conv_weight_q, r_conv_weight_q)
404
+ mapping["conv_weight_dq"] = (o_conv_weight_dq, r_conv_weight_dq)
405
+ mapping["conv_input"] = (o_conv_input, r_conv_input)
406
+ mapping["conv_weight"] = (o_conv_weight, r_conv_weight)
407
+
408
+ # Extract conv bias
409
+ if len(p_conv.args) > 2 and len(r_conv.args) > 2:
410
+ p_conv_bias = p_conv.args[2]
411
+ r_conv_bias = r_conv.args[2]
412
+ assert isinstance(p_conv_bias, Node)
413
+ assert isinstance(r_conv_bias, Node)
414
+ o_conv_bias = r.nodes_map[p_conv_bias]
415
+
416
+ # If conv bias is quantized, extract the q - dq nodes
417
+ if _is_dequantize(p_conv_bias):
418
+ p_conv_bias, p_conv_bias_q, p_conv_bias_dq = _get_q_dq_nodes(p_conv_bias)
419
+ r_conv_bias, r_conv_bias_q, r_conv_bias_dq = _get_q_dq_nodes(r_conv_bias)
420
+ o_conv_bias = r.nodes_map[p_conv_bias]
421
+ o_conv_bias_q = r.nodes_map[p_conv_bias_q]
422
+ o_conv_bias_dq = r.nodes_map[p_conv_bias_dq]
423
+ mapping["conv_bias_q"] = (o_conv_bias_q, r_conv_bias_q)
424
+ mapping["conv_bias_dq"] = (o_conv_bias_dq, r_conv_bias_dq)
425
+ mapping["conv_bias"] = (o_conv_bias, r_conv_bias)
426
+ return mapping
427
+
428
+ def _filter_nodes_map(nodes_map: Dict[Node, Node]) -> Dict[Node, Node]:
429
+ """
430
+ Return a filtered `nodes_map` returned from the subgraph rewriter.
431
+ The filtered `nodes_map` will contain only nodes that are actually
432
+ matched in the pattern, excluding None or placeholder nodes.
433
+ """
434
+ new_nodes_map: Dict[Node, Node] = {}
435
+ for pattern_node, graph_node in nodes_map.items():
436
+ # bias can be None
437
+ if graph_node is None:
438
+ continue
439
+ # skip pattern placeholder nodes
440
+ if pattern_node.op == "placeholder":
441
+ continue
442
+ new_nodes_map[pattern_node] = graph_node
443
+ return new_nodes_map
444
+
445
+ # TODO: this is error prone, use the replace_literals_with_placeholders hack instead
446
+ def _copy_over_literal_conv_args(original_node: Node, new_node: Node):
447
+ """
448
+ Copy over literal args in conv, such as stride and padding, from the matched node
449
+ in the original graph to its replacement in the new graph.
450
+
451
+ This is needed due to the following limitation in the subgraph rewriter when used
452
+ with dynamo export: literal (non-tensor) args are not supported in the match and
453
+ replacement patterns. This is because dynamo export automatically inlines these
454
+ literal args, making them dead placeholder nodes. In the future, we should check
455
+ if dynamo export can optionally disable this inlining, or if subgraph rewriter
456
+ can do the copying for us. See https://github.com/pytorch/pytorch/issues/100419.
457
+
458
+ Note: Unlike other tensor args like conv weights and biases, literal args are
459
+ preserved in the original nodes after replacement, so we can access them here.
460
+ """
461
+ assert _is_conv_or_conv_transpose_node(original_node)
462
+ assert _is_conv_or_conv_transpose_node(new_node)
463
+ # x, weight, bias, [stride, padding, dilation, transposed, output_padding, groups]
464
+ new_args = list(new_node.args)
465
+ if len(new_args) < 3:
466
+ # bias is optional, when it is not present, it means it is None
467
+ new_args.append(None)
468
+ new_node.args = tuple(new_args[:3]) + original_node.args[3:]
469
+
470
+ def _update_conv_input_qspec_map_after_replacement(original_node: Node, replacement_node: Node):
471
+ """
472
+ Update the `input_qspec_map` in the annotation after subgraph rewriting.
473
+
474
+ The original annotation referred to the nodes in the original graph,
475
+ so the keys in the `input_qspec_map` will need to be updated to reflect
476
+ the corresponding nodes in the replacement graph.
477
+ """
478
+ assert _is_conv_or_conv_transpose_node(original_node)
479
+ assert _is_conv_or_conv_transpose_node(replacement_node)
480
+ if "quantization_annotation" not in original_node.meta:
481
+ return
482
+ original_input_qspec_map = original_node.meta["quantization_annotation"].input_qspec_map
483
+ input_qspec_map = {}
484
+ # get the list of configs, it should be ordered as input, weight, bias
485
+ # note: this is really hacky, we need a better solution, hopefully
486
+ # in subgraph_rewriter, issue tracking the problem: https://github.com/pytorch/pytorch/issues/101820
487
+ all_configs = list(original_input_qspec_map.items())
488
+ # input activation
489
+ input_qspec_map[replacement_node.args[0]] = all_configs[0][1]
490
+ # weight
491
+ input_qspec_map[replacement_node.args[1]] = all_configs[1][1]
492
+ # bias
493
+ if len(replacement_node.args) > 2 and len(all_configs) > 2:
494
+ input_qspec_map[replacement_node.args[2]] = all_configs[2][1]
495
+ replacement_node.meta["quantization_annotation"].input_qspec_map = input_qspec_map
496
+
497
+ def _update_special_qspecs_after_replacement(
498
+ node: Node,
499
+ original_to_replacement_node: Dict[Node, Node],
500
+ ):
501
+ """
502
+ Update the `SharedQuantizationSpec`s and `DerivedQuantizationSpec`s
503
+ used in `node`'s quantization annotation after subgraph rewriting.
504
+
505
+ The original annotation referred to the nodes in the original graph,
506
+ so the nodes used in these special quantization specs will need to
507
+ be updated to the corresponding nodes in the replacement graph.
508
+ """
509
+ def _get_new_edge_or_node(edge_or_node: EdgeOrNode):
510
+ if isinstance(edge_or_node, Node):
511
+ _node = edge_or_node
512
+ return original_to_replacement_node.get(_node, _node)
513
+ elif isinstance(edge_or_node, tuple) and len(edge_or_node) == 2 and all(isinstance(x, Node) for x in edge_or_node):
514
+ src, dest = edge_or_node
515
+ return (
516
+ original_to_replacement_node.get(src, src),
517
+ original_to_replacement_node.get(dest, dest),
518
+ )
519
+ else:
520
+ raise ValueError("unexpected type for edge_or_node: ", type(edge_or_node))
521
+
522
+ def _get_new_qspec(qspec: QuantizationSpecBase):
523
+ if isinstance(qspec, SharedQuantizationSpec):
524
+ new_edge_or_node = _get_new_edge_or_node(qspec.edge_or_node)
525
+ return SharedQuantizationSpec(new_edge_or_node)
526
+ elif isinstance(qspec, DerivedQuantizationSpec):
527
+ new_derived_from = [_get_new_edge_or_node(x) for x in qspec.derived_from]
528
+ return dataclasses.replace(qspec, derived_from=new_derived_from)
529
+ else:
530
+ return qspec
531
+
532
+ if "quantization_annotation" not in node.meta:
533
+ return
534
+ annotation = node.meta["quantization_annotation"]
535
+ for input_node, qspec in annotation.input_qspec_map.items():
536
+ annotation.input_qspec_map[input_node] = _get_new_qspec(qspec)
537
+ annotation.output_qspec = _get_new_qspec(annotation.output_qspec)
538
+
539
+ def _fuse_conv_bn_qat(m: GraphModule) -> GraphModule:
540
+ has_bn = any(_is_bn_node(n) for n in m.graph.nodes)
541
+ if not has_bn:
542
+ return m
543
+ is_cuda_options = [True, False] if torch.cuda.is_available() else [False]
544
+ for is_cuda in is_cuda_options:
545
+ m = _fuse_conv_bn_qat_helper(m, F.conv1d, _conv1d_bn_example_inputs, is_cuda=is_cuda)
546
+ m = _fuse_conv_bn_qat_helper(m, F.conv2d, _conv2d_bn_example_inputs, is_cuda=is_cuda)
547
+ m = _fuse_conv_bn_qat_helper(m, F.conv_transpose1d, _conv1d_bn_example_inputs, is_cuda=is_cuda)
548
+ m = _fuse_conv_bn_qat_helper(m, F.conv_transpose2d, _conv2d_bn_example_inputs, is_cuda=is_cuda)
549
+ return m
550
+
551
+ def _fuse_conv_bn_qat_helper(
552
+ m: GraphModule,
553
+ conv_fn: Callable,
554
+ example_inputs: Tuple[Any, ...],
555
+ is_cuda: bool,
556
+ ) -> GraphModule:
557
+ """
558
+ Given a graph of decomposed aten ops, replace the (conv + bn) pattern with
559
+ the fused QAT subgraph equivalent. The input graph should already be annotated.
560
+ The annotations in the original nodes will be preserved in the corresponding
561
+ nodes in the new subgraph.
562
+
563
+ Note: This also handles the (conv + bn + relu) pattern.
564
+ """
565
+ m.graph.eliminate_dead_code()
566
+ m.recompile()
567
+ conv_bn_pattern = _get_conv_bn_pattern(conv_fn)
568
+ match_pattern = _get_aten_graph_module_for_pattern(conv_bn_pattern, example_inputs, is_cuda)
569
+
570
+ # Step (1): Replace patterns with conv bias
571
+ #
572
+ # Here we do replacement separately for cases with and without conv bias, since
573
+ # the replacement patterns for these two cases are substantially different.
574
+ # TODO: use the public replace_pattern API once it also returns replacement nodes
575
+
576
+ qat_conv_bn_pattern = _get_qat_conv_bn_pattern(conv_fn)
577
+ replacement_pattern_with_conv_bias = _get_aten_graph_module_for_pattern(
578
+ qat_conv_bn_pattern,
579
+ example_inputs,
580
+ is_cuda,
581
+ )
582
+ replacements_with_conv_bias = replace_pattern_with_filters(
583
+ m,
584
+ match_pattern,
585
+ replacement_pattern_with_conv_bias,
586
+ match_filters=[_has_conv_bias_filter],
587
+ ignore_literals=True,
588
+ )
589
+ m.recompile()
590
+
591
+ # Step (2): Replace patterns without conv bias
592
+
593
+ qat_conv_bn_pattern_no_conv_bias = _get_qat_conv_bn_pattern_no_conv_bias(conv_fn)
594
+ replacement_pattern_no_conv_bias = _get_aten_graph_module_for_pattern(
595
+ qat_conv_bn_pattern_no_conv_bias,
596
+ example_inputs,
597
+ is_cuda,
598
+ )
599
+ replacements_no_conv_bias = replace_pattern_with_filters(
600
+ m,
601
+ match_pattern,
602
+ replacement_pattern_no_conv_bias,
603
+ match_filters=[_no_conv_bias_filter],
604
+ ignore_literals=True,
605
+ )
606
+ m.recompile()
607
+
608
+ # Step (3): Post processing
609
+ #
610
+ # Due to limited functionality in the subgraph rewriter, here we manually
611
+ # update the replacement graph as follows:
612
+ #
613
+ # (a) Copy over metadata from original subgraph. This ensures the stack traces
614
+ # and annotations are preserved in the new subgraph
615
+ #
616
+ # (b) Copy over literal args for conv from the original subgraph
617
+ # TODO: do this for literal args for batchnorm as well
618
+ #
619
+ # (c) Update all references of the old nodes in the original subgraph to refer
620
+ # to the corresponding nodes in the new subgraph in the annotations
621
+ #
622
+ # In the future, we should try to push as much of this functionality into the
623
+ # subgraph rewriter as possible, so we don't have to manually copy anything over.
624
+ # For more detail, see https://github.com/pytorch/pytorch/issues/100419.
625
+
626
+ all_original_to_replacement_nodes = {}
627
+ for r in replacements_with_conv_bias + replacements_no_conv_bias:
628
+ for original_node, replacement_node in _get_conv_bn_pattern_nodes(r).values():
629
+ # Step (3a): Copy over metadata for all nodes in [conv - bn - getitem]
630
+ replacement_node.meta = original_node.meta
631
+ if _is_conv_or_conv_transpose_node(original_node):
632
+ # Step (3b): Copy over conv literal args
633
+ _copy_over_literal_conv_args(original_node, replacement_node)
634
+ # Step (3c): Update old references in the conv node's input_qspec_map
635
+ _update_conv_input_qspec_map_after_replacement(original_node, replacement_node)
636
+ all_original_to_replacement_nodes[original_node] = replacement_node
637
+
638
+ # Step (3c): Update old references in the special qspecs for all nodes in the graph
639
+ for n in m.graph.nodes:
640
+ _update_special_qspecs_after_replacement(n, all_original_to_replacement_nodes)
641
+
642
+ return m
643
+
644
+ def _duplicate_dequantize_node(m: GraphModule):
645
+ """
646
+ Helper function to duplicate all dequantize nodes in the graph if the
647
+ node has more than one user. For example:
648
+
649
+ Before:
650
+ quantize -> dequantize -> a
651
+ \\--> b
652
+ \\--> c
653
+
654
+ After:
655
+ quantize -> dequantize_1 -> a
656
+ \\--> dequantize_2 -> b
657
+ \\--> dequantize_3 -> c
658
+
659
+ This is useful for subgraph rewriting. E.g. if we wish to match the
660
+ pattern [dequantize - a] above, subgraph matching would fail because
661
+ the dequantize node has users outside the matched portion of the graph.
662
+ Instead, we match [dequantize_1 - a], which is safe.
663
+ """
664
+ dq_op = torch.ops.quantized_decomposed.dequantize_per_tensor
665
+ for n in m.graph.nodes:
666
+ if n.op != "call_function" or n.target != dq_op or len(n.users) == 1:
667
+ continue
668
+ for user in list(n.users):
669
+ with m.graph.inserting_before(n):
670
+ new_node = m.graph.create_node("call_function", dq_op, n.args, n.kwargs)
671
+ user.replace_input_with(n, new_node)
672
+ m.graph.erase_node(n)
673
+ m.recompile()
674
+
675
+ def _remove_extra_dequantize(m: GraphModule):
676
+ """
677
+ Removes duplicate dequant nodes in the graph, for an operator that has
678
+ multiple dequant nodes as a user, replace them with a single dequant node
679
+ that can be shared across all the uses. This should be seen as the "reverse"
680
+ of `_duplicate_dequantize_node`.
681
+ """
682
+ dq_op = torch.ops.quantized_decomposed.dequantize_per_tensor
683
+ for n in m.graph.nodes:
684
+ dq_users = [user for user in n.users if user.op == "call_function" and user.target == dq_op]
685
+ if len(dq_users) > 1:
686
+ with m.graph.inserting_after(dq_users[0]):
687
+ new_node = m.graph.create_node("call_function", dq_op, dq_users[0].args, {})
688
+ for dq_user in dq_users:
689
+ dq_user.replace_all_uses_with(new_node)
690
+ m.graph.erase_node(dq_user)
691
+ m.recompile()
692
+
693
+ def _copy_over_q_dq_args(original_node: Node, replacement_node: Node):
694
+ """
695
+ Given a pair of quantize or dequantize nodes, copy over all literal args
696
+ from the original node to the replacement node.
697
+ """
698
+ # For quantize_per_tensor, scale and zp are literals and need to be copied
699
+ # For quantize_per_channel, scale and zp are get_attr nodes and should be skipped
700
+ assert original_node.target == replacement_node.target
701
+ if original_node.target in (
702
+ torch.ops.quantized_decomposed.quantize_per_tensor.default,
703
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default,
704
+ ):
705
+ # Args: input, [scale, zp, qmin, qmax, dtype]
706
+ start_copy_arg_index = 1
707
+ elif original_node.target in (
708
+ torch.ops.quantized_decomposed.quantize_per_channel.default,
709
+ torch.ops.quantized_decomposed.dequantize_per_channel.default,
710
+ ):
711
+ # Args: input, scale, zp, [axis, qmin, qmax, dtype]
712
+ start_copy_arg_index = 3
713
+ else:
714
+ raise ValueError(f"Expected quantize/dequantize nodes, got '{original_node.target}'")
715
+ replacement_node.args = (
716
+ replacement_node.args[:start_copy_arg_index] + original_node.args[start_copy_arg_index:]
717
+ )
718
+
719
+ def _fold_conv_bn_qat(m: GraphModule) -> GraphModule:
720
+ has_bn = any(_is_bn_node(n) for n in m.graph.nodes)
721
+ if not has_bn:
722
+ return m
723
+ is_cuda_options = [True, False] if torch.cuda.is_available() else [False]
724
+ for is_cuda in is_cuda_options:
725
+ m = _fold_conv_bn_qat_helper(m, F.conv1d, _quantized_conv1d_bn_example_inputs, is_cuda=is_cuda)
726
+ m = _fold_conv_bn_qat_helper(m, F.conv2d, _quantized_conv2d_bn_example_inputs, is_cuda=is_cuda)
727
+ m = _fold_conv_bn_qat_helper(m, F.conv_transpose1d, _quantized_conv1d_bn_example_inputs, is_cuda=is_cuda)
728
+ m = _fold_conv_bn_qat_helper(m, F.conv_transpose2d, _quantized_conv2d_bn_example_inputs, is_cuda=is_cuda)
729
+ return m
730
+
731
+ def _fold_conv_bn_qat_helper(
732
+ m: GraphModule,
733
+ conv_fn: Callable,
734
+ example_inputs: Tuple[Any, ...],
735
+ is_cuda: bool,
736
+ ) -> GraphModule:
737
+ """
738
+ Replace the quantized (conv + bn) pattern with conv with bn weights folded into the weights of conv.
739
+ """
740
+ m.graph.eliminate_dead_code()
741
+ m.recompile()
742
+ _duplicate_dequantize_node(m)
743
+
744
+ # Step (1): Replace QAT pattern with simple [conv - bn] pattern
745
+ replacements = []
746
+ replacement_options = itertools.product(
747
+ [True, False], # is_per_channel
748
+ [True, False], # has_bias
749
+ [True, False], # bias_is_quantized
750
+ [True, False], # bn_is_training
751
+ )
752
+ for is_per_channel, has_bias, bias_is_quantized, bn_is_training in replacement_options:
753
+ # For the cases without bias, `bias_is_quantized` is irrelevant, so here we arbitrarily
754
+ # filter out one of the values for this flag to avoid having duplicate patterns
755
+ if not has_bias and bias_is_quantized:
756
+ continue
757
+ kwargs = _get_quantized_conv_bn_example_inputs_kwargs(is_per_channel, has_bias, bias_is_quantized, is_cuda)
758
+ match_pattern = _get_quantized_qat_conv_bn_pattern(
759
+ is_per_channel, has_bias, bias_is_quantized, conv_fn, bn_is_training
760
+ )
761
+ match_pattern = _get_aten_graph_module_for_pattern(match_pattern, example_inputs, is_cuda, **kwargs)
762
+ replacement_pattern = _get_folded_quantized_qat_conv_bn_pattern(
763
+ is_per_channel, has_bias, bias_is_quantized, conv_fn, bn_is_training
764
+ )
765
+ replacement_pattern = _get_aten_graph_module_for_pattern(replacement_pattern, example_inputs, is_cuda, **kwargs)
766
+ replacements.extend(
767
+ replace_pattern_with_filters(
768
+ m,
769
+ match_pattern,
770
+ replacement_pattern,
771
+ ignore_literals=True,
772
+ )
773
+ )
774
+ m.recompile()
775
+ _remove_extra_dequantize(m)
776
+
777
+ for r in replacements:
778
+ node_map = _get_conv_bn_pattern_nodes(r)
779
+
780
+ # Step (2): Copy over metadata from original subgraph
781
+ for original_node, replacement_node in node_map.values():
782
+ replacement_node.meta = original_node.meta
783
+
784
+ # Step (3): Copy over args for weight (and optionally bias) q - dq nodes
785
+ _copy_over_q_dq_args(*node_map["conv_weight_q"])
786
+ _copy_over_q_dq_args(*node_map["conv_weight_dq"])
787
+ if "conv_bias_q" in node_map:
788
+ assert "conv_bias_dq" in node_map
789
+ _copy_over_q_dq_args(*node_map["conv_bias_q"])
790
+ _copy_over_q_dq_args(*node_map["conv_bias_dq"])
791
+
792
+ # Step (4): Fold BN weights into conv
793
+ conv_bias = None
794
+ (_, conv_node) = node_map["conv"]
795
+ (_, bn_node) = node_map["bn"]
796
+ (_, conv_weight) = node_map["conv_weight"]
797
+ if "conv_bias" in node_map:
798
+ (_, conv_bias) = node_map["conv_bias"]
799
+ fold_bn_weights_into_conv_node(conv_node, conv_weight, conv_bias, bn_node, m)
800
+
801
+ # Copy over literal args for conv
802
+ for original_node in _filter_nodes_map(r.nodes_map).values():
803
+ if _is_conv_or_conv_transpose_node(original_node):
804
+ _copy_over_literal_conv_args(original_node, conv_node)
805
+
806
+ m.graph.eliminate_dead_code()
807
+ m.recompile()
808
+ return m