ZTWHHH commited on
Commit
e4dcac0
·
verified ·
1 Parent(s): 121f0ee

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/__pycache__/__init__.cpython-310.pyc +0 -0
  2. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__init__.py +41 -0
  3. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  4. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__pycache__/fused.cpython-310.pyc +0 -0
  5. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/fused.py +245 -0
  6. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/__init__.py +1 -0
  7. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/__pycache__/__init__.cpython-310.pyc +0 -0
  8. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__init__.py +32 -0
  9. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  10. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/conv_fused.cpython-310.pyc +0 -0
  11. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/linear_fused.cpython-310.pyc +0 -0
  12. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/linear_relu.cpython-310.pyc +0 -0
  13. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/conv_fused.py +1050 -0
  14. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/linear_fused.py +193 -0
  15. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/linear_relu.py +51 -0
  16. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/__init__.py +15 -0
  17. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/__pycache__/__init__.cpython-310.pyc +0 -0
  18. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/__init__.py +1 -0
  19. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/__pycache__/__init__.cpython-310.pyc +0 -0
  20. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__init__.py +6 -0
  21. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  22. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__pycache__/linear_relu.cpython-310.pyc +0 -0
  23. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/linear_relu.py +60 -0
  24. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  25. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/bn_relu.cpython-310.pyc +0 -0
  26. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/conv_relu.cpython-310.pyc +0 -0
  27. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/linear_relu.cpython-310.pyc +0 -0
  28. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/bn_relu.py +105 -0
  29. videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/linear_relu.py +187 -0
  30. videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/__init__.py +1 -0
  31. videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/__pycache__/__init__.cpython-310.pyc +0 -0
  32. videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/__init__.py +1 -0
  33. videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/__pycache__/__init__.cpython-310.pyc +0 -0
  34. videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/__init__.py +4 -0
  35. videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  36. videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/__pycache__/linear.cpython-310.pyc +0 -0
  37. videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/linear.py +35 -0
  38. videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/modules/__init__.py +13 -0
  39. videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  40. videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/modules/__pycache__/conv.cpython-310.pyc +0 -0
  41. videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/modules/__pycache__/embedding_ops.cpython-310.pyc +0 -0
  42. videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/modules/__pycache__/linear.cpython-310.pyc +0 -0
  43. videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/modules/conv.py +310 -0
  44. videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/modules/embedding_ops.py +248 -0
  45. videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/modules/linear.py +96 -0
  46. videochat2/lib/python3.10/site-packages/torch/ao/ns/__init__.py +0 -0
  47. videochat2/lib/python3.10/site-packages/torch/ao/ns/__pycache__/__init__.cpython-310.pyc +0 -0
  48. videochat2/lib/python3.10/site-packages/torch/ao/ns/__pycache__/_numeric_suite.cpython-310.pyc +0 -0
  49. videochat2/lib/python3.10/site-packages/torch/ao/ns/__pycache__/_numeric_suite_fx.cpython-310.pyc +0 -0
  50. videochat2/lib/python3.10/site-packages/torch/ao/ns/_numeric_suite.py +563 -0
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (738 Bytes). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__init__.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .fused import ( # noqa: F401
2
+ _FusedModule,
3
+ BNReLU2d,
4
+ BNReLU3d,
5
+ ConvAdd2d,
6
+ ConvAddReLU2d,
7
+ ConvBn1d,
8
+ ConvBn2d,
9
+ ConvBn3d,
10
+ ConvBnReLU1d,
11
+ ConvBnReLU2d,
12
+ ConvBnReLU3d,
13
+ ConvReLU1d,
14
+ ConvReLU2d,
15
+ ConvReLU3d,
16
+ LinearBn1d,
17
+ LinearLeakyReLU,
18
+ LinearReLU,
19
+ LinearTanh,
20
+ )
21
+
22
+
23
+ __all__ = [
24
+ "ConvBn1d",
25
+ "ConvBn2d",
26
+ "ConvBn3d",
27
+ "ConvBnReLU1d",
28
+ "ConvBnReLU2d",
29
+ "ConvBnReLU3d",
30
+ "ConvReLU1d",
31
+ "ConvReLU2d",
32
+ "ConvReLU3d",
33
+ "LinearReLU",
34
+ "BNReLU2d",
35
+ "BNReLU3d",
36
+ "LinearBn1d",
37
+ "LinearLeakyReLU",
38
+ "LinearTanh",
39
+ "ConvAdd2d",
40
+ "ConvAddReLU2d",
41
+ ]
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (693 Bytes). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__pycache__/fused.cpython-310.pyc ADDED
Binary file (8.35 kB). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/fused.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ from torch.nn import (
4
+ BatchNorm1d,
5
+ BatchNorm2d,
6
+ BatchNorm3d,
7
+ Conv1d,
8
+ Conv2d,
9
+ Conv3d,
10
+ Linear,
11
+ ReLU,
12
+ )
13
+ from torch.nn.utils.parametrize import type_before_parametrizations
14
+
15
+
16
+ __all__ = [
17
+ "ConvReLU1d",
18
+ "ConvReLU2d",
19
+ "ConvReLU3d",
20
+ "LinearReLU",
21
+ "ConvBn1d",
22
+ "ConvBn2d",
23
+ "ConvBnReLU1d",
24
+ "ConvBnReLU2d",
25
+ "ConvBn3d",
26
+ "ConvBnReLU3d",
27
+ "BNReLU2d",
28
+ "BNReLU3d",
29
+ "LinearBn1d",
30
+ "LinearLeakyReLU",
31
+ "LinearTanh",
32
+ "ConvAdd2d",
33
+ "ConvAddReLU2d",
34
+ ]
35
+
36
+
37
+ # Used for identifying intrinsic modules used in quantization
38
+ class _FusedModule(torch.nn.Sequential):
39
+ pass
40
+
41
+
42
+ class ConvReLU1d(_FusedModule):
43
+ r"""This is a sequential container which calls the Conv1d and ReLU modules.
44
+ During quantization this will be replaced with the corresponding fused module."""
45
+
46
+ def __init__(self, conv, relu):
47
+ assert (
48
+ type_before_parametrizations(conv) == Conv1d
49
+ and type_before_parametrizations(relu) == ReLU
50
+ ), f"Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(relu)}"
51
+ super().__init__(conv, relu)
52
+
53
+
54
+ class ConvReLU2d(_FusedModule):
55
+ r"""This is a sequential container which calls the Conv2d and ReLU modules.
56
+ During quantization this will be replaced with the corresponding fused module."""
57
+
58
+ def __init__(self, conv, relu):
59
+ assert (
60
+ type_before_parametrizations(conv) == Conv2d
61
+ and type_before_parametrizations(relu) == ReLU
62
+ ), f"Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(relu)}"
63
+ super().__init__(conv, relu)
64
+
65
+
66
+ class ConvReLU3d(_FusedModule):
67
+ r"""This is a sequential container which calls the Conv3d and ReLU modules.
68
+ During quantization this will be replaced with the corresponding fused module."""
69
+
70
+ def __init__(self, conv, relu):
71
+ assert (
72
+ type_before_parametrizations(conv) == Conv3d
73
+ and type_before_parametrizations(relu) == ReLU
74
+ ), f"Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(relu)}"
75
+ super().__init__(conv, relu)
76
+
77
+
78
+ class LinearReLU(_FusedModule):
79
+ r"""This is a sequential container which calls the Linear and ReLU modules.
80
+ During quantization this will be replaced with the corresponding fused module."""
81
+
82
+ def __init__(self, linear, relu):
83
+ assert (
84
+ type_before_parametrizations(linear) == Linear
85
+ and type_before_parametrizations(relu) == ReLU
86
+ ), f"Incorrect types for input modules{type_before_parametrizations(linear)}{type_before_parametrizations(relu)}"
87
+ super().__init__(linear, relu)
88
+
89
+
90
+ class ConvBn1d(_FusedModule):
91
+ r"""This is a sequential container which calls the Conv 1d and Batch Norm 1d modules.
92
+ During quantization this will be replaced with the corresponding fused module."""
93
+
94
+ def __init__(self, conv, bn):
95
+ assert (
96
+ type_before_parametrizations(conv) == Conv1d
97
+ and type_before_parametrizations(bn) == BatchNorm1d
98
+ ), f"Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}"
99
+ super().__init__(conv, bn)
100
+
101
+
102
+ class ConvBn2d(_FusedModule):
103
+ r"""This is a sequential container which calls the Conv 2d and Batch Norm 2d modules.
104
+ During quantization this will be replaced with the corresponding fused module."""
105
+
106
+ def __init__(self, conv, bn):
107
+ assert (
108
+ type_before_parametrizations(conv) == Conv2d
109
+ and type_before_parametrizations(bn) == BatchNorm2d
110
+ ), f"Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}"
111
+ super().__init__(conv, bn)
112
+
113
+
114
+ class ConvBnReLU1d(_FusedModule):
115
+ r"""This is a sequential container which calls the Conv 1d, Batch Norm 1d, and ReLU modules.
116
+ During quantization this will be replaced with the corresponding fused module."""
117
+
118
+ def __init__(self, conv, bn, relu):
119
+ assert (
120
+ type_before_parametrizations(conv) == Conv1d
121
+ and type_before_parametrizations(bn) == BatchNorm1d
122
+ and type_before_parametrizations(relu) == ReLU
123
+ ), f"Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}{type_before_parametrizations(relu)}" # noqa: B950
124
+ super().__init__(conv, bn, relu)
125
+
126
+
127
+ class ConvBnReLU2d(_FusedModule):
128
+ r"""This is a sequential container which calls the Conv 2d, Batch Norm 2d, and ReLU modules.
129
+ During quantization this will be replaced with the corresponding fused module."""
130
+
131
+ def __init__(self, conv, bn, relu):
132
+ assert (
133
+ type_before_parametrizations(conv) == Conv2d
134
+ and type_before_parametrizations(bn) == BatchNorm2d
135
+ and type_before_parametrizations(relu) == ReLU
136
+ ), f"Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}{type_before_parametrizations(relu)}" # noqa: B950
137
+ super().__init__(conv, bn, relu)
138
+
139
+
140
+ class ConvBn3d(_FusedModule):
141
+ r"""This is a sequential container which calls the Conv 3d and Batch Norm 3d modules.
142
+ During quantization this will be replaced with the corresponding fused module."""
143
+
144
+ def __init__(self, conv, bn):
145
+ assert (
146
+ type_before_parametrizations(conv) == Conv3d
147
+ and type_before_parametrizations(bn) == BatchNorm3d
148
+ ), f"Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}"
149
+ super().__init__(conv, bn)
150
+
151
+
152
+ class ConvBnReLU3d(_FusedModule):
153
+ r"""This is a sequential container which calls the Conv 3d, Batch Norm 3d, and ReLU modules.
154
+ During quantization this will be replaced with the corresponding fused module."""
155
+
156
+ def __init__(self, conv, bn, relu):
157
+ assert (
158
+ type_before_parametrizations(conv) == Conv3d
159
+ and type_before_parametrizations(bn) == BatchNorm3d
160
+ and type_before_parametrizations(relu) == ReLU
161
+ ), f"Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}{type_before_parametrizations(relu)}" # noqa: B950
162
+ super().__init__(conv, bn, relu)
163
+
164
+
165
+ class BNReLU2d(_FusedModule):
166
+ r"""This is a sequential container which calls the BatchNorm 2d and ReLU modules.
167
+ During quantization this will be replaced with the corresponding fused module."""
168
+
169
+ def __init__(self, batch_norm, relu):
170
+ assert (
171
+ type_before_parametrizations(batch_norm) == BatchNorm2d
172
+ and type_before_parametrizations(relu) == ReLU
173
+ ), f"Incorrect types for input modules{type_before_parametrizations(batch_norm)}{type_before_parametrizations(relu)}"
174
+ super().__init__(batch_norm, relu)
175
+
176
+
177
+ class BNReLU3d(_FusedModule):
178
+ r"""This is a sequential container which calls the BatchNorm 3d and ReLU modules.
179
+ During quantization this will be replaced with the corresponding fused module."""
180
+
181
+ def __init__(self, batch_norm, relu):
182
+ assert (
183
+ type_before_parametrizations(batch_norm) == BatchNorm3d
184
+ and type_before_parametrizations(relu) == ReLU
185
+ ), f"Incorrect types for input modules{type_before_parametrizations(batch_norm)}{type_before_parametrizations(relu)}"
186
+ super().__init__(batch_norm, relu)
187
+
188
+
189
+ class LinearBn1d(_FusedModule):
190
+ r"""This is a sequential container which calls the Linear and BatchNorm1d modules.
191
+ During quantization this will be replaced with the corresponding fused module."""
192
+
193
+ def __init__(self, linear, bn):
194
+ assert (
195
+ type_before_parametrizations(linear) == Linear
196
+ and type_before_parametrizations(bn) == BatchNorm1d
197
+ ), f"Incorrect types for input modules{type_before_parametrizations(linear)}{type_before_parametrizations(bn)}"
198
+ super().__init__(linear, bn)
199
+
200
+
201
+ class LinearLeakyReLU(_FusedModule):
202
+ r"""This is a sequential container which calls the Linear and LeakyReLU modules.
203
+ During quantization this will be replaced with the corresponding fused module."""
204
+
205
+ def __init__(self, linear, leaky_relu):
206
+ assert (
207
+ type(linear) == Linear and type(leaky_relu) == torch.nn.LeakyReLU
208
+ ), f"Incorrect types for input modules{type(linear)}{type(leaky_relu)}"
209
+ super().__init__(linear, leaky_relu)
210
+
211
+
212
+ class LinearTanh(_FusedModule):
213
+ r"""This is a sequential container which calls the Linear and Tanh modules.
214
+ During quantization this will be replaced with the corresponding fused module."""
215
+
216
+ def __init__(self, linear, tanh):
217
+ assert (
218
+ type(linear) == Linear and type(tanh) == torch.nn.Tanh
219
+ ), f"Incorrect types for input modules{type(linear)}{type(tanh)}"
220
+ super().__init__(linear, tanh)
221
+
222
+
223
+ class ConvAdd2d(_FusedModule):
224
+ r"""This is a sequential container which calls the Conv2d modules with extra Add.
225
+ During quantization this will be replaced with the corresponding fused module."""
226
+
227
+ def __init__(self, conv, add):
228
+ super().__init__(conv)
229
+ self.add = add
230
+
231
+ def forward(self, x1, x2):
232
+ return self.add(self[0](x1), x2)
233
+
234
+
235
+ class ConvAddReLU2d(_FusedModule):
236
+ r"""This is a sequential container which calls the Conv2d, add, Relu.
237
+ During quantization this will be replaced with the corresponding fused module."""
238
+
239
+ def __init__(self, conv, add, relu):
240
+ super().__init__(conv)
241
+ self.add = add
242
+ self.relu = relu
243
+
244
+ def forward(self, x1, x2):
245
+ return self.relu(self.add(self[0](x1), x2))
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modules import * # noqa: F403
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (205 Bytes). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__init__.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .conv_fused import (
2
+ ConvBn1d,
3
+ ConvBn2d,
4
+ ConvBn3d,
5
+ ConvBnReLU1d,
6
+ ConvBnReLU2d,
7
+ ConvBnReLU3d,
8
+ ConvReLU1d,
9
+ ConvReLU2d,
10
+ ConvReLU3d,
11
+ freeze_bn_stats,
12
+ update_bn_stats,
13
+ )
14
+ from .linear_fused import LinearBn1d
15
+ from .linear_relu import LinearReLU
16
+
17
+
18
+ __all__ = [
19
+ "LinearReLU",
20
+ "LinearBn1d",
21
+ "ConvReLU1d",
22
+ "ConvReLU2d",
23
+ "ConvReLU3d",
24
+ "ConvBn1d",
25
+ "ConvBn2d",
26
+ "ConvBn3d",
27
+ "ConvBnReLU1d",
28
+ "ConvBnReLU2d",
29
+ "ConvBnReLU3d",
30
+ "update_bn_stats",
31
+ "freeze_bn_stats",
32
+ ]
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (633 Bytes). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/conv_fused.cpython-310.pyc ADDED
Binary file (19 kB). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/linear_fused.cpython-310.pyc ADDED
Binary file (4.98 kB). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/linear_relu.cpython-310.pyc ADDED
Binary file (2.2 kB). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/conv_fused.py ADDED
@@ -0,0 +1,1050 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import math
3
+ from typing import TypeVar
4
+
5
+ import torch
6
+ import torch.ao.nn.intrinsic as nni
7
+ import torch.ao.nn.qat as nnqat
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ from torch.nn import init
11
+ from torch.nn.modules.utils import _pair, _single, _triple
12
+ from torch.nn.parameter import Parameter
13
+ from torch.nn.utils import fuse_conv_bn_weights
14
+
15
+
16
+ __all__ = [
17
+ "ConvBn1d",
18
+ "ConvBnReLU1d",
19
+ "ConvReLU1d",
20
+ "ConvBn2d",
21
+ "ConvBnReLU2d",
22
+ "ConvReLU2d",
23
+ "ConvBn3d",
24
+ "ConvBnReLU3d",
25
+ "ConvReLU3d",
26
+ "update_bn_stats",
27
+ "freeze_bn_stats",
28
+ ]
29
+ _BN_CLASS_MAP = {
30
+ 1: nn.BatchNorm1d,
31
+ 2: nn.BatchNorm2d,
32
+ 3: nn.BatchNorm3d,
33
+ }
34
+
35
+
36
+ MOD = TypeVar("MOD", bound=nn.modules.conv._ConvNd)
37
+
38
+
39
+ class _ConvBnNd(nn.modules.conv._ConvNd, nni._FusedModule):
40
+ _version = 2
41
+ _FLOAT_MODULE = MOD
42
+
43
+ def __init__(
44
+ self,
45
+ # ConvNd args
46
+ in_channels,
47
+ out_channels,
48
+ kernel_size,
49
+ stride,
50
+ padding,
51
+ dilation,
52
+ transposed,
53
+ output_padding,
54
+ groups,
55
+ bias,
56
+ padding_mode,
57
+ # BatchNormNd args
58
+ # num_features: out_channels
59
+ eps=1e-05,
60
+ momentum=0.1,
61
+ # affine: True
62
+ # track_running_stats: True
63
+ # Args for this module
64
+ freeze_bn=False,
65
+ qconfig=None,
66
+ dim=2,
67
+ ):
68
+ nn.modules.conv._ConvNd.__init__(
69
+ self,
70
+ in_channels,
71
+ out_channels,
72
+ kernel_size,
73
+ stride,
74
+ padding,
75
+ dilation,
76
+ transposed,
77
+ output_padding,
78
+ groups,
79
+ False,
80
+ padding_mode,
81
+ )
82
+ assert qconfig, "qconfig must be provided for QAT module"
83
+ self.qconfig = qconfig
84
+ self.freeze_bn = freeze_bn if self.training else True
85
+ self.bn = _BN_CLASS_MAP[dim](out_channels, eps, momentum, True, True)
86
+ self.weight_fake_quant = self.qconfig.weight()
87
+ if bias:
88
+ self.bias = Parameter(torch.empty(out_channels))
89
+ else:
90
+ self.register_parameter("bias", None)
91
+ self.reset_bn_parameters()
92
+
93
+ # this needs to be called after reset_bn_parameters,
94
+ # as they modify the same state
95
+ if self.training:
96
+ if freeze_bn:
97
+ self.freeze_bn_stats()
98
+ else:
99
+ self.update_bn_stats()
100
+ else:
101
+ self.freeze_bn_stats()
102
+
103
+ self._enable_slow_path_for_better_numerical_stability = False
104
+
105
+ def reset_running_stats(self):
106
+ self.bn.reset_running_stats()
107
+
108
+ def reset_bn_parameters(self):
109
+ self.bn.reset_running_stats()
110
+ init.uniform_(self.bn.weight)
111
+ init.zeros_(self.bn.bias)
112
+ # note: below is actually for conv, not BN
113
+ if self.bias is not None:
114
+ fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
115
+ bound = 1 / math.sqrt(fan_in)
116
+ init.uniform_(self.bias, -bound, bound)
117
+
118
+ def reset_parameters(self):
119
+ super().reset_parameters()
120
+
121
+ def update_bn_stats(self):
122
+ self.freeze_bn = False
123
+ self.bn.training = True
124
+ return self
125
+
126
+ def freeze_bn_stats(self):
127
+ self.freeze_bn = True
128
+ self.bn.training = False
129
+ return self
130
+
131
+ def _forward(self, input):
132
+ if self._enable_slow_path_for_better_numerical_stability:
133
+ return self._forward_slow(input)
134
+ return self._forward_approximate(input)
135
+
136
+ def _forward_approximate(self, input):
137
+ """Approximated method to fuse conv and bn. It requires only one forward pass.
138
+ conv_orig = conv / scale_factor where scale_factor = bn.weight / running_std
139
+ """
140
+ assert self.bn.running_var is not None
141
+ running_std = torch.sqrt(self.bn.running_var + self.bn.eps)
142
+ scale_factor = self.bn.weight / running_std
143
+ weight_shape = [1] * len(self.weight.shape)
144
+ weight_shape[0] = -1
145
+ bias_shape = [1] * len(self.weight.shape)
146
+ bias_shape[1] = -1
147
+ scaled_weight = self.weight_fake_quant(
148
+ self.weight * scale_factor.reshape(weight_shape)
149
+ )
150
+ # using zero bias here since the bias for original conv
151
+ # will be added later
152
+ if self.bias is not None:
153
+ zero_bias = torch.zeros_like(self.bias, dtype=input.dtype)
154
+ else:
155
+ zero_bias = torch.zeros(
156
+ self.out_channels, device=scaled_weight.device, dtype=input.dtype
157
+ )
158
+ conv = self._conv_forward(input, scaled_weight, zero_bias)
159
+ conv_orig = conv / scale_factor.reshape(bias_shape)
160
+ if self.bias is not None:
161
+ conv_orig = conv_orig + self.bias.reshape(bias_shape)
162
+ conv = self.bn(conv_orig)
163
+ return conv
164
+
165
+ def _forward_slow(self, input):
166
+ """
167
+ A more accurate but slow method to compute conv bn fusion, following https://arxiv.org/pdf/1806.08342.pdf
168
+ It requires two forward passes but handles the case bn.weight == 0
169
+
170
+ Conv: Y = WX + B_c
171
+ Conv without bias: Y0 = WX = Y - B_c, Y = Y0 + B_c
172
+
173
+ Batch statistics:
174
+ mean_Y = Y.mean()
175
+ = Y0.mean() + B_c
176
+ var_Y = (Y - mean_Y)^2.mean()
177
+ = (Y0 - Y0.mean())^2.mean()
178
+ BN (r: bn.weight, beta: bn.bias):
179
+ Z = r * (Y - mean_Y) / sqrt(var_Y + eps) + beta
180
+ = r * (Y0 - Y0.mean()) / sqrt(var_Y + eps) + beta
181
+
182
+ Fused Conv BN training (std_Y = sqrt(var_Y + eps)):
183
+ Z = (r * W / std_Y) * X + r * (B_c - mean_Y) / std_Y + beta
184
+ = (r * W / std_Y) * X - r * Y0.mean() / std_Y + beta
185
+
186
+ Fused Conv BN inference (running_std = sqrt(running_var + eps)):
187
+ Z = (r * W / running_std) * X - r * (running_mean - B_c) / running_std + beta
188
+
189
+ QAT with fused conv bn:
190
+ Z_train = fake_quant(r * W / running_std) * X * (running_std / std_Y) - r * Y0.mean() / std_Y + beta
191
+ = conv(X, fake_quant(r * W / running_std)) * (running_std / std_Y) - r * Y0.mean() / std_Y + beta
192
+ Z_inference = conv(X, fake_quant(r * W / running_std)) - r * (running_mean - B_c) / running_std + beta
193
+ """
194
+
195
+ assert self.bn.running_var is not None
196
+ assert self.bn.running_mean is not None
197
+
198
+ # using zero bias here since the bias for original conv
199
+ # will be added later
200
+ zero_bias = torch.zeros(
201
+ self.out_channels, device=self.weight.device, dtype=input.dtype
202
+ )
203
+
204
+ weight_shape = [1] * len(self.weight.shape)
205
+ weight_shape[0] = -1
206
+ bias_shape = [1] * len(self.weight.shape)
207
+ bias_shape[1] = -1
208
+
209
+ if self.bn.training:
210
+ # needed to compute batch mean/std
211
+ conv_out = self._conv_forward(input, self.weight, zero_bias)
212
+ # update bn statistics
213
+ with torch.no_grad():
214
+ conv_out_bias = (
215
+ conv_out
216
+ if self.bias is None
217
+ else conv_out + self.bias.reshape(bias_shape)
218
+ )
219
+ self.bn(conv_out_bias)
220
+
221
+ # fused conv + bn without bias using bn running statistics
222
+ running_std = torch.sqrt(self.bn.running_var + self.bn.eps)
223
+ scale_factor = self.bn.weight / running_std
224
+ scaled_weight = self.weight_fake_quant(
225
+ self.weight * scale_factor.reshape(weight_shape)
226
+ )
227
+ # fused conv without bias for inference: (r * W / running_std) * X
228
+ conv_bn = self._conv_forward(input, scaled_weight, zero_bias)
229
+
230
+ if self.bn.training:
231
+ avg_dims = [0] + list(range(2, len(self.weight.shape)))
232
+ batch_mean = conv_out.mean(avg_dims) # type: ignore[possibly-undefined]
233
+ batch_var = torch.square(conv_out - batch_mean.reshape(bias_shape)).mean(
234
+ avg_dims
235
+ )
236
+ batch_std = torch.sqrt(batch_var + self.bn.eps)
237
+
238
+ # scale to use batch std in training mode
239
+ # conv(X, r * W / std_Y) = conv(X, r * W / running_std) * (running_std / std_Y)
240
+ unscale_factor = running_std / batch_std
241
+ conv_bn *= unscale_factor.reshape(bias_shape)
242
+
243
+ fused_mean = batch_mean
244
+ fused_std = batch_std
245
+ else:
246
+ fused_mean = self.bn.running_mean - (
247
+ self.bias if self.bias is not None else 0
248
+ )
249
+ fused_std = running_std
250
+
251
+ # fused bias = beta - r * mean / std
252
+ fused_bias = self.bn.bias - self.bn.weight * fused_mean / fused_std
253
+ conv_bn += fused_bias.reshape(bias_shape)
254
+
255
+ # HACK to let conv bias participate in loss to avoid DDP error (parameters
256
+ # were not used in producing loss)
257
+ if self.bias is not None:
258
+ conv_bn += (self.bias - self.bias).reshape(bias_shape)
259
+
260
+ return conv_bn
261
+
262
+ def extra_repr(self):
263
+ # TODO(jerryzh): extend
264
+ return super().extra_repr()
265
+
266
+ def forward(self, input):
267
+ return self._forward(input)
268
+
269
+ def train(self, mode=True):
270
+ """
271
+ Batchnorm's training behavior is using the self.training flag. Prevent
272
+ changing it if BN is frozen. This makes sure that calling `model.train()`
273
+ on a model with a frozen BN will behave properly.
274
+ """
275
+ self.training = mode
276
+ if not self.freeze_bn:
277
+ for module in self.children():
278
+ module.train(mode)
279
+ return self
280
+
281
+ # ===== Serialization version history =====
282
+ #
283
+ # Version 1/None
284
+ # self
285
+ # |--- weight : Tensor
286
+ # |--- bias : Tensor
287
+ # |--- gamma : Tensor
288
+ # |--- beta : Tensor
289
+ # |--- running_mean : Tensor
290
+ # |--- running_var : Tensor
291
+ # |--- num_batches_tracked : Tensor
292
+ #
293
+ # Version 2
294
+ # self
295
+ # |--- weight : Tensor
296
+ # |--- bias : Tensor
297
+ # |--- bn : Module
298
+ # |--- weight : Tensor (moved from v1.self.gamma)
299
+ # |--- bias : Tensor (moved from v1.self.beta)
300
+ # |--- running_mean : Tensor (moved from v1.self.running_mean)
301
+ # |--- running_var : Tensor (moved from v1.self.running_var)
302
+ # |--- num_batches_tracked : Tensor (moved from v1.self.num_batches_tracked)
303
+ def _load_from_state_dict(
304
+ self,
305
+ state_dict,
306
+ prefix,
307
+ local_metadata,
308
+ strict,
309
+ missing_keys,
310
+ unexpected_keys,
311
+ error_msgs,
312
+ ):
313
+ version = local_metadata.get("version", None)
314
+ if version is None or version == 1:
315
+ # BN related parameters and buffers were moved into the BN module for v2
316
+ v2_to_v1_names = {
317
+ "bn.weight": "gamma",
318
+ "bn.bias": "beta",
319
+ "bn.running_mean": "running_mean",
320
+ "bn.running_var": "running_var",
321
+ "bn.num_batches_tracked": "num_batches_tracked",
322
+ }
323
+ for v2_name, v1_name in v2_to_v1_names.items():
324
+ if prefix + v1_name in state_dict:
325
+ state_dict[prefix + v2_name] = state_dict[prefix + v1_name]
326
+ state_dict.pop(prefix + v1_name)
327
+ elif prefix + v2_name in state_dict:
328
+ # there was a brief period where forward compatibility
329
+ # for this module was broken (between
330
+ # https://github.com/pytorch/pytorch/pull/38478
331
+ # and https://github.com/pytorch/pytorch/pull/38820)
332
+ # and modules emitted the v2 state_dict format while
333
+ # specifying that version == 1. This patches the forward
334
+ # compatibility issue by allowing the v2 style entries to
335
+ # be used.
336
+ pass
337
+ elif strict:
338
+ missing_keys.append(prefix + v2_name)
339
+
340
+ super()._load_from_state_dict(
341
+ state_dict,
342
+ prefix,
343
+ local_metadata,
344
+ strict,
345
+ missing_keys,
346
+ unexpected_keys,
347
+ error_msgs,
348
+ )
349
+
350
+ @classmethod
351
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
352
+ r"""Create a qat module from a float module or qparams_dict
353
+
354
+ Args: `mod` a float module, either produced by torch.ao.quantization utilities
355
+ or directly from user
356
+ """
357
+ # The ignore is because _FLOAT_MODULE is a TypeVar here where the bound
358
+ # has no __name__ (code is fine though)
359
+ assert type(mod) == cls._FLOAT_MODULE, (
360
+ "qat."
361
+ + cls.__name__
362
+ + ".from_float only works for "
363
+ + cls._FLOAT_MODULE.__name__ # type: ignore[attr-defined]
364
+ )
365
+ assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
366
+ assert mod.qconfig, "Input float module must have a valid qconfig"
367
+ qconfig = mod.qconfig
368
+ conv, bn = mod[0], mod[1]
369
+ qat_convbn = cls(
370
+ conv.in_channels,
371
+ conv.out_channels,
372
+ conv.kernel_size,
373
+ conv.stride,
374
+ conv.padding,
375
+ conv.dilation,
376
+ conv.groups,
377
+ conv.bias is not None,
378
+ conv.padding_mode,
379
+ bn.eps,
380
+ bn.momentum,
381
+ False,
382
+ qconfig,
383
+ )
384
+ qat_convbn.weight = conv.weight
385
+ qat_convbn.bias = conv.bias
386
+ qat_convbn.bn.weight = bn.weight
387
+ qat_convbn.bn.bias = bn.bias
388
+ qat_convbn.bn.running_mean = bn.running_mean
389
+ qat_convbn.bn.running_var = bn.running_var
390
+ # mypy error: Cannot determine type of 'num_batches_tracked'
391
+ qat_convbn.bn.num_batches_tracked = bn.num_batches_tracked # type: ignore[has-type]
392
+ return qat_convbn
393
+
394
+ def to_float(self):
395
+ cls = type(self)
396
+ conv = cls._FLOAT_CONV_MODULE( # type: ignore[attr-defined]
397
+ self.in_channels,
398
+ self.out_channels,
399
+ self.kernel_size,
400
+ self.stride,
401
+ self.padding,
402
+ self.dilation,
403
+ self.groups,
404
+ self.bias is not None,
405
+ self.padding_mode,
406
+ )
407
+ conv.weight = torch.nn.Parameter(self.weight.detach())
408
+ if self.bias is not None:
409
+ conv.bias = torch.nn.Parameter(self.bias.detach())
410
+
411
+ if cls._FLOAT_BN_MODULE: # type: ignore[attr-defined]
412
+ # fuse bn into conv
413
+ assert self.bn.running_var is not None and self.bn.running_mean is not None
414
+ conv.weight, conv.bias = fuse_conv_bn_weights(
415
+ conv.weight,
416
+ conv.bias,
417
+ self.bn.running_mean,
418
+ self.bn.running_var,
419
+ self.bn.eps,
420
+ self.bn.weight,
421
+ self.bn.bias,
422
+ )
423
+
424
+ if cls._FLOAT_RELU_MODULE: # type: ignore[attr-defined]
425
+ modules = []
426
+ modules.append(conv)
427
+ relu = cls._FLOAT_RELU_MODULE() # type: ignore[attr-defined]
428
+ modules.append(relu)
429
+ conv_relu = cls._FUSED_FLOAT_MODULE(*modules) # type: ignore[attr-defined]
430
+ conv_relu.train(self.training)
431
+ return conv_relu
432
+ else:
433
+ conv.train(self.training)
434
+ return conv
435
+
436
+
437
+ class ConvBn1d(_ConvBnNd, nn.Conv1d):
438
+ r"""
439
+ A ConvBn1d module is a module fused from Conv1d and BatchNorm1d,
440
+ attached with FakeQuantize modules for weight,
441
+ used in quantization aware training.
442
+
443
+ We combined the interface of :class:`torch.nn.Conv1d` and
444
+ :class:`torch.nn.BatchNorm1d`.
445
+
446
+ Similar to :class:`torch.nn.Conv1d`, with FakeQuantize modules initialized
447
+ to default.
448
+
449
+ Attributes:
450
+ freeze_bn:
451
+ weight_fake_quant: fake quant module for weight
452
+
453
+ """
454
+ _FLOAT_BN_MODULE = nn.BatchNorm1d
455
+ _FLOAT_RELU_MODULE: None = None
456
+ _FLOAT_MODULE = nni.ConvBn1d
457
+ _FLOAT_CONV_MODULE = nn.Conv1d
458
+
459
+ def __init__(
460
+ self,
461
+ # Conv1d args
462
+ in_channels,
463
+ out_channels,
464
+ kernel_size,
465
+ stride=1,
466
+ padding=0,
467
+ dilation=1,
468
+ groups=1,
469
+ bias=None,
470
+ padding_mode="zeros",
471
+ # BatchNorm1d args
472
+ # num_features: out_channels
473
+ eps=1e-05,
474
+ momentum=0.1,
475
+ # affine: True
476
+ # track_running_stats: True
477
+ # Args for this module
478
+ freeze_bn=False,
479
+ qconfig=None,
480
+ ):
481
+ kernel_size = _single(kernel_size)
482
+ stride = _single(stride)
483
+ padding = _single(padding)
484
+ dilation = _single(dilation)
485
+ _ConvBnNd.__init__(
486
+ self,
487
+ in_channels,
488
+ out_channels,
489
+ kernel_size,
490
+ stride,
491
+ padding,
492
+ dilation,
493
+ False,
494
+ _single(0),
495
+ groups,
496
+ bias,
497
+ padding_mode,
498
+ eps,
499
+ momentum,
500
+ freeze_bn,
501
+ qconfig,
502
+ dim=1,
503
+ )
504
+
505
+
506
+ class ConvBnReLU1d(ConvBn1d):
507
+ r"""
508
+ A ConvBnReLU1d module is a module fused from Conv1d, BatchNorm1d and ReLU,
509
+ attached with FakeQuantize modules for weight,
510
+ used in quantization aware training.
511
+
512
+ We combined the interface of :class:`torch.nn.Conv1d` and
513
+ :class:`torch.nn.BatchNorm1d` and :class:`torch.nn.ReLU`.
514
+
515
+ Similar to `torch.nn.Conv1d`, with FakeQuantize modules initialized to
516
+ default.
517
+
518
+ Attributes:
519
+ weight_fake_quant: fake quant module for weight
520
+
521
+ """
522
+ # base class defines _FLOAT_MODULE as "ConvBn1d"
523
+ _FLOAT_MODULE = nni.ConvBnReLU1d # type: ignore[assignment]
524
+ _FLOAT_CONV_MODULE = nn.Conv1d
525
+ _FLOAT_BN_MODULE = nn.BatchNorm1d
526
+ _FLOAT_RELU_MODULE = nn.ReLU # type: ignore[assignment]
527
+ # module class after fusing bn into conv
528
+ _FUSED_FLOAT_MODULE = nni.ConvReLU1d
529
+
530
+ def __init__(
531
+ self,
532
+ # Conv1d args
533
+ in_channels,
534
+ out_channels,
535
+ kernel_size,
536
+ stride=1,
537
+ padding=0,
538
+ dilation=1,
539
+ groups=1,
540
+ bias=None,
541
+ padding_mode="zeros",
542
+ # BatchNorm1d args
543
+ # num_features: out_channels
544
+ eps=1e-05,
545
+ momentum=0.1,
546
+ # affine: True
547
+ # track_running_stats: True
548
+ # Args for this module
549
+ freeze_bn=False,
550
+ qconfig=None,
551
+ ):
552
+ super().__init__(
553
+ in_channels,
554
+ out_channels,
555
+ kernel_size,
556
+ stride,
557
+ padding,
558
+ dilation,
559
+ groups,
560
+ bias,
561
+ padding_mode,
562
+ eps,
563
+ momentum,
564
+ freeze_bn,
565
+ qconfig,
566
+ )
567
+
568
+ def forward(self, input):
569
+ return F.relu(ConvBn1d._forward(self, input))
570
+
571
+ @classmethod
572
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
573
+ return super().from_float(mod, use_precomputed_fake_quant)
574
+
575
+
576
+ class ConvReLU1d(nnqat.Conv1d, nni._FusedModule):
577
+ r"""A ConvReLU1d module is a fused module of Conv1d and ReLU, attached with
578
+ FakeQuantize modules for weight for
579
+ quantization aware training.
580
+
581
+ We combined the interface of :class:`~torch.nn.Conv1d` and
582
+ :class:`~torch.nn.BatchNorm1d`.
583
+
584
+ Attributes:
585
+ weight_fake_quant: fake quant module for weight
586
+
587
+ """
588
+ _FLOAT_MODULE = nni.ConvReLU1d # type: ignore[assignment]
589
+ _FLOAT_CONV_MODULE = nn.Conv1d
590
+ _FLOAT_BN_MODULE: None = None
591
+ _FLOAT_RELU_MODULE = nn.ReLU
592
+
593
+ def __init__(
594
+ self,
595
+ in_channels,
596
+ out_channels,
597
+ kernel_size,
598
+ stride=1,
599
+ padding=0,
600
+ dilation=1,
601
+ groups=1,
602
+ bias=True,
603
+ padding_mode="zeros",
604
+ qconfig=None,
605
+ ):
606
+ super().__init__(
607
+ in_channels,
608
+ out_channels,
609
+ kernel_size,
610
+ stride=stride,
611
+ padding=padding,
612
+ dilation=dilation,
613
+ groups=groups,
614
+ bias=bias,
615
+ padding_mode=padding_mode,
616
+ qconfig=qconfig,
617
+ )
618
+ assert qconfig, "qconfig must be provided for QAT module"
619
+ self.qconfig = qconfig
620
+ self.weight_fake_quant = self.qconfig.weight()
621
+
622
+ def forward(self, input):
623
+ return F.relu(
624
+ self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias)
625
+ )
626
+
627
+ @classmethod
628
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
629
+ return super().from_float(
630
+ mod, use_precomputed_fake_quant=use_precomputed_fake_quant
631
+ )
632
+
633
+
634
+ class ConvBn2d(_ConvBnNd, nn.Conv2d):
635
+ r"""
636
+ A ConvBn2d module is a module fused from Conv2d and BatchNorm2d,
637
+ attached with FakeQuantize modules for weight,
638
+ used in quantization aware training.
639
+
640
+ We combined the interface of :class:`torch.nn.Conv2d` and
641
+ :class:`torch.nn.BatchNorm2d`.
642
+
643
+ Similar to :class:`torch.nn.Conv2d`, with FakeQuantize modules initialized
644
+ to default.
645
+
646
+ Attributes:
647
+ freeze_bn:
648
+ weight_fake_quant: fake quant module for weight
649
+
650
+ """
651
+ _FLOAT_MODULE = nni.ConvBn2d
652
+ _FLOAT_CONV_MODULE = nn.Conv2d
653
+ _FLOAT_BN_MODULE = nn.BatchNorm2d
654
+ _FLOAT_RELU_MODULE: None = None
655
+
656
+ def __init__(
657
+ self,
658
+ # ConvNd args
659
+ in_channels,
660
+ out_channels,
661
+ kernel_size,
662
+ stride=1,
663
+ padding=0,
664
+ dilation=1,
665
+ groups=1,
666
+ bias=None,
667
+ padding_mode="zeros",
668
+ # BatchNorm2d args
669
+ # num_features: out_channels
670
+ eps=1e-05,
671
+ momentum=0.1,
672
+ # affine: True
673
+ # track_running_stats: True
674
+ # Args for this module
675
+ freeze_bn=False,
676
+ qconfig=None,
677
+ ):
678
+ kernel_size = _pair(kernel_size)
679
+ stride = _pair(stride)
680
+ padding = _pair(padding)
681
+ dilation = _pair(dilation)
682
+ _ConvBnNd.__init__(
683
+ self,
684
+ in_channels,
685
+ out_channels,
686
+ kernel_size,
687
+ stride,
688
+ padding,
689
+ dilation,
690
+ False,
691
+ _pair(0),
692
+ groups,
693
+ bias,
694
+ padding_mode,
695
+ eps,
696
+ momentum,
697
+ freeze_bn,
698
+ qconfig,
699
+ dim=2,
700
+ )
701
+
702
+
703
+ class ConvBnReLU2d(ConvBn2d):
704
+ r"""
705
+ A ConvBnReLU2d module is a module fused from Conv2d, BatchNorm2d and ReLU,
706
+ attached with FakeQuantize modules for weight,
707
+ used in quantization aware training.
708
+
709
+ We combined the interface of :class:`torch.nn.Conv2d` and
710
+ :class:`torch.nn.BatchNorm2d` and :class:`torch.nn.ReLU`.
711
+
712
+ Similar to `torch.nn.Conv2d`, with FakeQuantize modules initialized to
713
+ default.
714
+
715
+ Attributes:
716
+ weight_fake_quant: fake quant module for weight
717
+
718
+ """
719
+ # base class defines _FLOAT_MODULE as "ConvBn2d"
720
+ _FLOAT_MODULE = nni.ConvBnReLU2d # type: ignore[assignment]
721
+ _FLOAT_CONV_MODULE = nn.Conv2d
722
+ _FLOAT_BN_MODULE = nn.BatchNorm2d
723
+ _FLOAT_RELU_MODULE = nn.ReLU # type: ignore[assignment]
724
+ # module class after fusing bn into conv
725
+ _FUSED_FLOAT_MODULE = nni.ConvReLU2d
726
+
727
+ def __init__(
728
+ self,
729
+ # Conv2d args
730
+ in_channels,
731
+ out_channels,
732
+ kernel_size,
733
+ stride=1,
734
+ padding=0,
735
+ dilation=1,
736
+ groups=1,
737
+ bias=None,
738
+ padding_mode="zeros",
739
+ # BatchNorm2d args
740
+ # num_features: out_channels
741
+ eps=1e-05,
742
+ momentum=0.1,
743
+ # affine: True
744
+ # track_running_stats: True
745
+ # Args for this module
746
+ freeze_bn=False,
747
+ qconfig=None,
748
+ ):
749
+ super().__init__(
750
+ in_channels,
751
+ out_channels,
752
+ kernel_size,
753
+ stride,
754
+ padding,
755
+ dilation,
756
+ groups,
757
+ bias,
758
+ padding_mode,
759
+ eps,
760
+ momentum,
761
+ freeze_bn,
762
+ qconfig,
763
+ )
764
+
765
+ def forward(self, input):
766
+ return F.relu(ConvBn2d._forward(self, input))
767
+
768
+ @classmethod
769
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
770
+ return super().from_float(mod, use_precomputed_fake_quant)
771
+
772
+
773
+ class ConvReLU2d(nnqat.Conv2d, nni._FusedModule):
774
+ r"""A ConvReLU2d module is a fused module of Conv2d and ReLU, attached with
775
+ FakeQuantize modules for weight for
776
+ quantization aware training.
777
+
778
+ We combined the interface of :class:`~torch.nn.Conv2d` and
779
+ :class:`~torch.nn.BatchNorm2d`.
780
+
781
+ Attributes:
782
+ weight_fake_quant: fake quant module for weight
783
+
784
+ """
785
+ _FLOAT_MODULE = nni.ConvReLU2d # type: ignore[assignment]
786
+ _FLOAT_CONV_MODULE = nn.Conv2d
787
+ _FLOAT_BN_MODULE: None = None
788
+ _FLOAT_RELU_MODULE = nn.ReLU
789
+
790
+ def __init__(
791
+ self,
792
+ in_channels,
793
+ out_channels,
794
+ kernel_size,
795
+ stride=1,
796
+ padding=0,
797
+ dilation=1,
798
+ groups=1,
799
+ bias=True,
800
+ padding_mode="zeros",
801
+ qconfig=None,
802
+ ):
803
+ super().__init__(
804
+ in_channels,
805
+ out_channels,
806
+ kernel_size,
807
+ stride=stride,
808
+ padding=padding,
809
+ dilation=dilation,
810
+ groups=groups,
811
+ bias=bias,
812
+ padding_mode=padding_mode,
813
+ qconfig=qconfig,
814
+ )
815
+ assert qconfig, "qconfig must be provided for QAT module"
816
+ self.qconfig = qconfig
817
+ self.weight_fake_quant = self.qconfig.weight()
818
+
819
+ def forward(self, input):
820
+ return F.relu(
821
+ self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias)
822
+ )
823
+
824
+ @classmethod
825
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
826
+ return super().from_float(
827
+ mod, use_precomputed_fake_quant=use_precomputed_fake_quant
828
+ )
829
+
830
+
831
+ class ConvBn3d(_ConvBnNd, nn.Conv3d):
832
+ r"""
833
+ A ConvBn3d module is a module fused from Conv3d and BatchNorm3d,
834
+ attached with FakeQuantize modules for weight,
835
+ used in quantization aware training.
836
+
837
+ We combined the interface of :class:`torch.nn.Conv3d` and
838
+ :class:`torch.nn.BatchNorm3d`.
839
+
840
+ Similar to :class:`torch.nn.Conv3d`, with FakeQuantize modules initialized
841
+ to default.
842
+
843
+ Attributes:
844
+ freeze_bn:
845
+ weight_fake_quant: fake quant module for weight
846
+
847
+ """
848
+ _FLOAT_MODULE = nni.ConvBn3d
849
+ _FLOAT_CONV_MODULE = nn.Conv3d
850
+ _FLOAT_BN_MODULE = nn.BatchNorm3d
851
+ _FLOAT_RELU_MODULE: None = None
852
+
853
+ def __init__(
854
+ self,
855
+ # ConvNd args
856
+ in_channels,
857
+ out_channels,
858
+ kernel_size,
859
+ stride=1,
860
+ padding=0,
861
+ dilation=1,
862
+ groups=1,
863
+ bias=None,
864
+ padding_mode="zeros",
865
+ # BatchNorm3d args
866
+ # num_features: out_channels
867
+ eps=1e-05,
868
+ momentum=0.1,
869
+ # affine: True
870
+ # track_running_stats: True
871
+ # Args for this module
872
+ freeze_bn=False,
873
+ qconfig=None,
874
+ ):
875
+ kernel_size = _triple(kernel_size)
876
+ stride = _triple(stride)
877
+ padding = _triple(padding)
878
+ dilation = _triple(dilation)
879
+ _ConvBnNd.__init__(
880
+ self,
881
+ in_channels,
882
+ out_channels,
883
+ kernel_size,
884
+ stride,
885
+ padding,
886
+ dilation,
887
+ False,
888
+ _triple(0),
889
+ groups,
890
+ bias,
891
+ padding_mode,
892
+ eps,
893
+ momentum,
894
+ freeze_bn,
895
+ qconfig,
896
+ dim=3,
897
+ )
898
+
899
+
900
+ class ConvBnReLU3d(ConvBn3d):
901
+ r"""
902
+ A ConvBnReLU3d module is a module fused from Conv3d, BatchNorm3d and ReLU,
903
+ attached with FakeQuantize modules for weight,
904
+ used in quantization aware training.
905
+
906
+ We combined the interface of :class:`torch.nn.Conv3d` and
907
+ :class:`torch.nn.BatchNorm3d` and :class:`torch.nn.ReLU`.
908
+
909
+ Similar to `torch.nn.Conv3d`, with FakeQuantize modules initialized to
910
+ default.
911
+
912
+ Attributes:
913
+ weight_fake_quant: fake quant module for weight
914
+
915
+ """
916
+ _FLOAT_MODULE = nni.ConvBnReLU3d # type: ignore[assignment]
917
+ _FLOAT_CONV_MODULE = nn.Conv3d
918
+ _FLOAT_BN_MODULE = nn.BatchNorm3d
919
+ _FLOAT_RELU_MODULE = nn.ReLU # type: ignore[assignment]
920
+ # module class after fusing bn into conv
921
+ _FUSED_FLOAT_MODULE = nni.ConvReLU3d
922
+
923
+ def __init__(
924
+ self,
925
+ # Conv3d args
926
+ in_channels,
927
+ out_channels,
928
+ kernel_size,
929
+ stride=1,
930
+ padding=0,
931
+ dilation=1,
932
+ groups=1,
933
+ bias=None,
934
+ padding_mode="zeros",
935
+ # BatchNorm3d args
936
+ # num_features: out_channels
937
+ eps=1e-05,
938
+ momentum=0.1,
939
+ # affine: True
940
+ # track_running_stats: True
941
+ # Args for this module
942
+ freeze_bn=False,
943
+ qconfig=None,
944
+ ):
945
+ super().__init__(
946
+ in_channels,
947
+ out_channels,
948
+ kernel_size,
949
+ stride,
950
+ padding,
951
+ dilation,
952
+ groups,
953
+ bias,
954
+ padding_mode,
955
+ eps,
956
+ momentum,
957
+ freeze_bn,
958
+ qconfig,
959
+ )
960
+
961
+ def forward(self, input):
962
+ return F.relu(ConvBn3d._forward(self, input))
963
+
964
+ @classmethod
965
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
966
+ return super().from_float(
967
+ mod, use_precomputed_fake_quant=use_precomputed_fake_quant
968
+ )
969
+
970
+
971
+ class ConvReLU3d(nnqat.Conv3d, nni._FusedModule):
972
+ r"""A ConvReLU3d module is a fused module of Conv3d and ReLU, attached with
973
+ FakeQuantize modules for weight for
974
+ quantization aware training.
975
+
976
+ We combined the interface of :class:`~torch.nn.Conv3d` and
977
+ :class:`~torch.nn.BatchNorm3d`.
978
+
979
+ Attributes:
980
+ weight_fake_quant: fake quant module for weight
981
+
982
+ """
983
+ _FLOAT_MODULE = nni.ConvReLU3d # type: ignore[assignment]
984
+ _FLOAT_CONV_MODULE = nn.Conv3d
985
+ _FLOAT_BN_MODULE: None = None
986
+ _FLOAT_RELU_MODULE = nn.ReLU
987
+
988
+ def __init__(
989
+ self,
990
+ in_channels,
991
+ out_channels,
992
+ kernel_size,
993
+ stride=1,
994
+ padding=0,
995
+ dilation=1,
996
+ groups=1,
997
+ bias=True,
998
+ padding_mode="zeros",
999
+ qconfig=None,
1000
+ ):
1001
+ super().__init__(
1002
+ in_channels,
1003
+ out_channels,
1004
+ kernel_size,
1005
+ stride=stride,
1006
+ padding=padding,
1007
+ dilation=dilation,
1008
+ groups=groups,
1009
+ bias=bias,
1010
+ padding_mode=padding_mode,
1011
+ qconfig=qconfig,
1012
+ )
1013
+ assert qconfig, "qconfig must be provided for QAT module"
1014
+ self.qconfig = qconfig
1015
+ self.weight_fake_quant = self.qconfig.weight()
1016
+
1017
+ def forward(self, input):
1018
+ return F.relu(
1019
+ self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias)
1020
+ )
1021
+
1022
+ @classmethod
1023
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
1024
+ return super().from_float(
1025
+ mod, use_precomputed_fake_quant=use_precomputed_fake_quant
1026
+ )
1027
+
1028
+
1029
+ def update_bn_stats(mod):
1030
+ if type(mod) in {
1031
+ ConvBnReLU1d,
1032
+ ConvBnReLU2d,
1033
+ ConvBnReLU3d,
1034
+ ConvBn1d,
1035
+ ConvBn2d,
1036
+ ConvBn3d,
1037
+ }:
1038
+ mod.update_bn_stats()
1039
+
1040
+
1041
+ def freeze_bn_stats(mod):
1042
+ if type(mod) in {
1043
+ ConvBnReLU1d,
1044
+ ConvBnReLU2d,
1045
+ ConvBnReLU3d,
1046
+ ConvBn1d,
1047
+ ConvBn2d,
1048
+ ConvBn3d,
1049
+ }:
1050
+ mod.freeze_bn_stats()
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/linear_fused.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ import torch.ao.nn.intrinsic as nni
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ from torch.nn import init
7
+ from torch.nn.parameter import Parameter
8
+ from torch.nn.utils.fusion import fuse_linear_bn_weights
9
+
10
+
11
+ __all__ = [
12
+ "LinearBn1d",
13
+ ]
14
+
15
+
16
+ class LinearBn1d(nn.modules.linear.Linear, nni._FusedModule):
17
+ r"""
18
+ A LinearBn1d module is a module fused from Linear and BatchNorm1d, attached
19
+ with FakeQuantize modules for weight, used in quantization aware training.
20
+
21
+ We combined the interface of :class:`torch.nn.Linear` and
22
+ :class:torch.nn.BatchNorm1d`.
23
+
24
+ Similar to :class:`torch.nn.Linear`, with FakeQuantize modules initialized
25
+ to default.
26
+
27
+ Attributes:
28
+ freeze_bn:
29
+ weight_fake_quant: fake quant module for weight
30
+
31
+ """
32
+
33
+ def __init__(
34
+ self,
35
+ # Linear args
36
+ in_features,
37
+ out_features,
38
+ bias=True,
39
+ # BatchNorm1d args
40
+ # num_features: out_features
41
+ eps=1e-05,
42
+ momentum=0.1,
43
+ # affine: True
44
+ # track_running_stats: True
45
+ # Args for this module
46
+ freeze_bn=False,
47
+ qconfig=None,
48
+ ):
49
+ nn.modules.linear.Linear.__init__(self, in_features, out_features, bias)
50
+ assert qconfig, "qconfig must be provided for QAT module"
51
+ self.qconfig = qconfig
52
+ self.freeze_bn = freeze_bn if self.training else True
53
+ self.bn = nn.BatchNorm1d(out_features, eps, momentum, True, True)
54
+ self.weight_fake_quant = self.qconfig.weight()
55
+ if bias:
56
+ self.bias = Parameter(torch.empty(out_features))
57
+ else:
58
+ self.register_parameter("bias", None)
59
+ self.reset_bn_parameters()
60
+
61
+ # this needs to be called after reset_bn_parameters,
62
+ # as they modify the same state
63
+ if self.training:
64
+ if freeze_bn:
65
+ self.freeze_bn_stats()
66
+ else:
67
+ self.update_bn_stats()
68
+ else:
69
+ self.freeze_bn_stats()
70
+
71
+ def reset_running_stats(self):
72
+ self.bn.reset_running_stats()
73
+
74
+ def reset_bn_parameters(self):
75
+ self.bn.reset_running_stats()
76
+ init.uniform_(self.bn.weight)
77
+ init.zeros_(self.bn.bias)
78
+
79
+ def reset_parameters(self):
80
+ super().reset_parameters()
81
+
82
+ def update_bn_stats(self):
83
+ self.freeze_bn = False
84
+ self.bn.training = True
85
+ return self
86
+
87
+ def freeze_bn_stats(self):
88
+ self.freeze_bn = True
89
+ self.bn.training = False
90
+ return self
91
+
92
+ def forward(self, input):
93
+ assert self.bn.running_var is not None
94
+
95
+ # Scale the linear weights by BN's running statistics to reduce
96
+ # weight jitter, see https://arxiv.org/pdf/1806.08342.pdf, page 18
97
+ # for motivation.
98
+ #
99
+ # Instead of
100
+ #
101
+ # x1 = F.linear(x0, fq(w), b)
102
+ # x2 = self.bn(x1)
103
+ #
104
+ # We have
105
+ #
106
+ # # scale the weight by previous batch's running statistics
107
+ # scale_factor = bn.w / bn.running_std_from_prev_batch
108
+ # # do the linear transformation without bias
109
+ # x1_scaled = F.linear(x0, fq(w * scale_factor), 0)
110
+ # # reverse the scaling and add original bias
111
+ # x1_orig = x1_scaled / scale_factor + b
112
+ # x2 = self.bn(x1_orig)
113
+
114
+ running_std = torch.sqrt(self.bn.running_var + self.bn.eps)
115
+ scale_factor = self.bn.weight / running_std
116
+ weight_shape = [1] * len(self.weight.shape)
117
+ weight_shape[0] = -1
118
+ bias_shape = [1] * len(self.weight.shape)
119
+ bias_shape[1] = -1
120
+ scaled_weight = self.weight_fake_quant(
121
+ self.weight * scale_factor.reshape(weight_shape)
122
+ )
123
+ if self.bias is not None:
124
+ zero_bias = torch.zeros_like(self.bias)
125
+ else:
126
+ zero_bias = torch.zeros(self.out_features, device=scaled_weight.device)
127
+ linear_out = F.linear(input, scaled_weight, zero_bias)
128
+ linear_out_orig = linear_out / scale_factor.reshape(bias_shape)
129
+ if self.bias is not None:
130
+ linear_out_orig = linear_out_orig + self.bias.reshape(bias_shape)
131
+ bn_out = self.bn(linear_out_orig)
132
+ return bn_out
133
+
134
+ def train(self, mode=True):
135
+ """
136
+ Batchnorm's training behavior is using the self.training flag. Prevent
137
+ changing it if BN is frozen. This makes sure that calling `model.train()`
138
+ on a model with a frozen BN will behave properly.
139
+ """
140
+ self.training = mode
141
+ if not self.freeze_bn:
142
+ for module in self.children():
143
+ module.train(mode)
144
+ return self
145
+
146
+ @classmethod
147
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
148
+ r"""Create a qat module from a float module or qparams_dict
149
+
150
+ Args: `mod' a float module, either produced by torch.ao.quantization
151
+ utilities or directly from user
152
+ """
153
+ assert type(mod) == nni.LinearBn1d, (
154
+ "qat."
155
+ + cls.__name__
156
+ + ".from_float only works for "
157
+ + nni.LinearBn1d.__name__
158
+ )
159
+ assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
160
+ assert mod.qconfig, "Input float module must have a valid config"
161
+ qconfig = mod.qconfig
162
+ linear, bn = mod[0], mod[1]
163
+ qat_linearbn = cls(
164
+ linear.in_features,
165
+ linear.out_features,
166
+ linear.bias is not None,
167
+ bn.eps,
168
+ bn.momentum,
169
+ False,
170
+ qconfig,
171
+ )
172
+ qat_linearbn.weight = linear.weight
173
+ qat_linearbn.bias = linear.bias
174
+ qat_linearbn.bn.weight = bn.weight
175
+ qat_linearbn.bn.bias = bn.bias
176
+ qat_linearbn.bn.running_mean = bn.running_mean
177
+ qat_linearbn.bn.running_var = bn.running_var
178
+ qat_linearbn.bn.num_batches_tracked = bn.num_batches_tracked
179
+ return qat_linearbn
180
+
181
+ def to_float(self):
182
+ linear = torch.nn.Linear(self.in_features, self.out_features)
183
+ assert self.bn.running_var is not None and self.bn.running_mean is not None
184
+ linear.weight, linear.bias = fuse_linear_bn_weights(
185
+ self.weight,
186
+ self.bias,
187
+ self.bn.running_mean,
188
+ self.bn.running_var,
189
+ self.bn.eps,
190
+ self.bn.weight,
191
+ self.bn.bias,
192
+ )
193
+ return linear
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/linear_relu.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ import torch.ao.nn.intrinsic as nni
4
+ import torch.ao.nn.qat as nnqat
5
+ import torch.nn.functional as F
6
+
7
+
8
+ class LinearReLU(nnqat.Linear, nni._FusedModule):
9
+ r"""
10
+ A LinearReLU module fused from Linear and ReLU modules, attached with
11
+ FakeQuantize modules for weight, used in
12
+ quantization aware training.
13
+
14
+ We adopt the same interface as :class:`torch.nn.Linear`.
15
+
16
+ Similar to `torch.ao.nn.intrinsic.LinearReLU`, with FakeQuantize modules initialized to
17
+ default.
18
+
19
+ Attributes:
20
+ weight: fake quant module for weight
21
+
22
+ Examples::
23
+
24
+ >>> # xdoctest: +SKIP
25
+ >>> m = nn.qat.LinearReLU(20, 30)
26
+ >>> input = torch.randn(128, 20)
27
+ >>> output = m(input)
28
+ >>> print(output.size())
29
+ torch.Size([128, 30])
30
+ """
31
+ _FLOAT_MODULE = nni.LinearReLU # type: ignore[assignment]
32
+
33
+ def __init__(self, in_features, out_features, bias=True, qconfig=None):
34
+ super().__init__(in_features, out_features, bias, qconfig)
35
+
36
+ def forward(self, input):
37
+ return F.relu(F.linear(input, self.weight_fake_quant(self.weight), self.bias))
38
+
39
+ @classmethod
40
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
41
+ return super().from_float(mod, use_precomputed_fake_quant)
42
+
43
+ def to_float(self):
44
+ linear = torch.nn.Linear(
45
+ self.in_features, self.out_features, self.bias is not None
46
+ )
47
+ linear.weight = torch.nn.Parameter(self.weight.detach())
48
+ if self.bias is not None:
49
+ linear.bias = torch.nn.Parameter(self.bias.detach())
50
+ relu = torch.nn.ReLU()
51
+ return torch.ao.nn.intrinsic.LinearReLU(linear, relu)
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .modules import * # noqa: F403
2
+
3
+
4
+ __all__ = [
5
+ "BNReLU2d",
6
+ "BNReLU3d",
7
+ "ConvReLU1d",
8
+ "ConvReLU2d",
9
+ "ConvReLU3d",
10
+ "LinearReLU",
11
+ "LinearLeakyReLU",
12
+ "LinearTanh",
13
+ "ConvAdd2d",
14
+ "ConvAddReLU2d",
15
+ ]
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (355 Bytes). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modules import * # noqa: F403
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (219 Bytes). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from .linear_relu import LinearReLU
2
+
3
+
4
+ __all__ = [
5
+ "LinearReLU",
6
+ ]
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (273 Bytes). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__pycache__/linear_relu.cpython-310.pyc ADDED
Binary file (2.43 kB). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/linear_relu.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ import torch.ao.nn.intrinsic as nni
4
+ import torch.ao.nn.quantized.dynamic as nnqd
5
+
6
+
7
+ __all__ = ["LinearReLU"]
8
+
9
+
10
+ class LinearReLU(nnqd.Linear):
11
+ r"""
12
+ A LinearReLU module fused from Linear and ReLU modules that can be used
13
+ for dynamic quantization.
14
+ Supports both, FP16 and INT8 quantization.
15
+
16
+ We adopt the same interface as :class:`torch.ao.nn.quantized.dynamic.Linear`.
17
+
18
+ Attributes:
19
+ Same as torch.ao.nn.quantized.dynamic.Linear
20
+
21
+ Examples::
22
+
23
+ >>> # xdoctest: +SKIP
24
+ >>> m = nn.intrinsic.quantized.dynamic.LinearReLU(20, 30)
25
+ >>> input = torch.randn(128, 20)
26
+ >>> output = m(input)
27
+ >>> print(output.size())
28
+ torch.Size([128, 30])
29
+ """
30
+ _FLOAT_MODULE = nni.LinearReLU # type: ignore[assignment]
31
+
32
+ def __init__(self, in_features, out_features, bias=True, dtype=torch.qint8):
33
+ super().__init__(in_features, out_features, bias, dtype)
34
+
35
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
36
+ if self._packed_params.dtype == torch.qint8:
37
+ # TODO check if we should set reduce_rage = True by default here
38
+ Y = torch.ops.quantized.linear_relu_dynamic(
39
+ x, self._packed_params._packed_params, reduce_range=True
40
+ )
41
+ elif self._packed_params.dtype == torch.float16:
42
+ Y = torch.ops.quantized.linear_relu_dynamic_fp16(
43
+ x, self._packed_params._packed_params
44
+ )
45
+ else:
46
+ raise RuntimeError("Unsupported dtype on dynamic quantized linear relu!")
47
+ return Y.to(x.dtype)
48
+
49
+ def _get_name(self):
50
+ return "DynamicQuantizedLinearReLU"
51
+
52
+ @classmethod
53
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
54
+ return super().from_float(
55
+ mod, use_precomputed_fake_quant=use_precomputed_fake_quant
56
+ )
57
+
58
+ @classmethod
59
+ def from_reference(cls, ref_qlinear_relu):
60
+ return super().from_reference(ref_qlinear_relu[0])
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (570 Bytes). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/bn_relu.cpython-310.pyc ADDED
Binary file (3.12 kB). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/conv_relu.cpython-310.pyc ADDED
Binary file (5.84 kB). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/linear_relu.cpython-310.pyc ADDED
Binary file (6.4 kB). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/bn_relu.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import torch
4
+ import torch.ao.nn.intrinsic
5
+ import torch.ao.nn.intrinsic.qat
6
+ import torch.ao.nn.quantized as nnq
7
+
8
+
9
+ __all__ = ["BNReLU2d", "BNReLU3d"]
10
+
11
+
12
+ class BNReLU2d(nnq.BatchNorm2d):
13
+ r"""
14
+ A BNReLU2d module is a fused module of BatchNorm2d and ReLU
15
+
16
+ We adopt the same interface as :class:`torch.ao.nn.quantized.BatchNorm2d`.
17
+
18
+ Attributes:
19
+ Same as torch.ao.nn.quantized.BatchNorm2d
20
+
21
+ """
22
+ _FLOAT_MODULE = torch.ao.nn.intrinsic.BNReLU2d
23
+
24
+ def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None):
25
+ super().__init__(
26
+ num_features, eps=eps, momentum=momentum, device=device, dtype=dtype
27
+ )
28
+
29
+ def forward(self, input):
30
+ # Temporarily using len(shape) instead of ndim due to JIT issue
31
+ # https://github.com/pytorch/pytorch/issues/23890
32
+ if len(input.shape) != 4:
33
+ raise ValueError("Input shape must be `(N, C, H, W)`!")
34
+ return torch.ops.quantized.batch_norm2d_relu(
35
+ input,
36
+ self.weight,
37
+ self.bias,
38
+ self.running_mean,
39
+ self.running_var,
40
+ self.eps,
41
+ self.scale,
42
+ self.zero_point,
43
+ )
44
+
45
+ def _get_name(self):
46
+ return "QuantizedBNReLU2d"
47
+
48
+ @classmethod
49
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
50
+ # TODO: Add qat support for BNReLU2d
51
+ return super().from_float(
52
+ mod, use_precomputed_fake_quant=use_precomputed_fake_quant
53
+ )
54
+
55
+ @classmethod
56
+ def from_reference(cls, bn_relu, output_scale, output_zero_point):
57
+ return super().from_reference(bn_relu[0], output_scale, output_zero_point)
58
+
59
+
60
+ class BNReLU3d(nnq.BatchNorm3d):
61
+ r"""
62
+ A BNReLU3d module is a fused module of BatchNorm3d and ReLU
63
+
64
+ We adopt the same interface as :class:`torch.ao.nn.quantized.BatchNorm3d`.
65
+
66
+ Attributes:
67
+ Same as torch.ao.nn.quantized.BatchNorm3d
68
+
69
+ """
70
+ _FLOAT_MODULE = torch.ao.nn.intrinsic.BNReLU3d
71
+
72
+ def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None):
73
+ super().__init__(
74
+ num_features, eps=eps, momentum=momentum, device=device, dtype=dtype
75
+ )
76
+
77
+ def forward(self, input):
78
+ # Temporarily using len(shape) instead of ndim due to JIT issue
79
+ # https://github.com/pytorch/pytorch/issues/23890
80
+ if len(input.shape) != 5:
81
+ raise ValueError("Input shape must be `(N, C, D, H, W)`!")
82
+ return torch.ops.quantized.batch_norm3d_relu(
83
+ input,
84
+ self.weight,
85
+ self.bias,
86
+ self.running_mean,
87
+ self.running_var,
88
+ self.eps,
89
+ self.scale,
90
+ self.zero_point,
91
+ )
92
+
93
+ def _get_name(self):
94
+ return "QuantizedBNReLU3d"
95
+
96
+ @classmethod
97
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
98
+ # TODO: Add qat support for BNReLU3d
99
+ return super().from_float(
100
+ mod, use_precomputed_fake_quant=use_precomputed_fake_quant
101
+ )
102
+
103
+ @classmethod
104
+ def from_reference(cls, bn_relu, output_scale, output_zero_point):
105
+ return super().from_reference(bn_relu[0], output_scale, output_zero_point)
videochat2/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/linear_relu.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ import torch.ao.nn.intrinsic as nni
4
+ import torch.ao.nn.quantized as nnq
5
+ from torch.ao.nn.quantized.modules.utils import _quantize_weight
6
+
7
+
8
+ __all__ = [
9
+ "LinearReLU",
10
+ "LinearLeakyReLU",
11
+ "LinearTanh",
12
+ ]
13
+
14
+
15
+ class LinearReLU(nnq.Linear):
16
+ r"""
17
+ A LinearReLU module fused from Linear and ReLU modules
18
+
19
+ We adopt the same interface as :class:`torch.ao.nn.quantized.Linear`.
20
+
21
+ Attributes:
22
+ Same as torch.ao.nn.quantized.Linear
23
+
24
+ Examples::
25
+
26
+ >>> # xdoctest: +SKIP
27
+ >>> m = nn.intrinsic.LinearReLU(20, 30)
28
+ >>> input = torch.randn(128, 20)
29
+ >>> output = m(input)
30
+ >>> print(output.size())
31
+ torch.Size([128, 30])
32
+ """
33
+ _FLOAT_MODULE = nni.LinearReLU # type: ignore[assignment]
34
+
35
+ def __init__(self, in_features, out_features, bias=True, dtype=torch.qint8):
36
+ super().__init__(in_features, out_features, bias, dtype)
37
+
38
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
39
+ return torch.ops.quantized.linear_relu(
40
+ x, self._packed_params._packed_params, self.scale, self.zero_point
41
+ )
42
+
43
+ def _get_name(self):
44
+ return "QuantizedLinearReLU"
45
+
46
+ @classmethod
47
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
48
+ return super().from_float(mod, use_precomputed_fake_quant)
49
+
50
+ @classmethod
51
+ def from_reference(cls, ref_linear_relu, output_scale, output_zero_point):
52
+ return super().from_reference(
53
+ ref_linear_relu[0], output_scale, output_zero_point
54
+ )
55
+
56
+
57
+ class LinearLeakyReLU(nnq.Linear):
58
+ r"""
59
+ For onednn backend only
60
+ A LinearLeakyReLU module fused from Linear and LeakyReLU modules
61
+ We adopt the same interface as :class:`torch.ao.nn.quantized.Linear`.
62
+ Attributes:
63
+ Same as torch.ao.nn.quantized.Linear
64
+ + negative_slope
65
+ Examples::
66
+ >>> # xdoctest: +SKIP
67
+ >>> m = nn.intrinsic.LinearLeakyReLU(20, 30, 0.01)
68
+ >>> input = torch.randn(128, 20)
69
+ >>> output = m(input)
70
+ >>> print(output.size())
71
+ torch.Size([128, 30])
72
+ """
73
+ _FLOAT_MODULE = nni.LinearLeakyReLU # type: ignore[assignment]
74
+
75
+ def __init__(
76
+ self, in_features, out_features, negative_slope, bias=True, dtype=torch.qint8
77
+ ):
78
+ super().__init__(in_features, out_features, bias, dtype)
79
+ self.negative_slope = negative_slope
80
+
81
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
82
+ return torch.ops.quantized.linear_leaky_relu(
83
+ x,
84
+ self._packed_params._packed_params,
85
+ self.scale,
86
+ self.zero_point,
87
+ self.negative_slope,
88
+ )
89
+
90
+ def _get_name(self):
91
+ return "QuantizedLinearLeakyReLU"
92
+
93
+ @classmethod
94
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
95
+ assert (
96
+ type(mod) == nni.LinearLeakyReLU
97
+ ), "Input float module should be LinearLeakyReLU"
98
+ assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
99
+ activation_post_process = mod.activation_post_process
100
+ leaky_relu = mod[1]
101
+ mod = mod[0]
102
+ weight_post_process = mod.qconfig.weight()
103
+ weight_post_process(mod.weight)
104
+ dtype = weight_post_process.dtype
105
+ act_scale, act_zp = activation_post_process.calculate_qparams() # type: ignore[union-attr,operator]
106
+ assert dtype == torch.qint8, "Weight observer must have dtype torch.qint8"
107
+ qweight = _quantize_weight(mod.weight.float(), weight_post_process)
108
+ qlinear_leaky_relu = cls(
109
+ mod.in_features, mod.out_features, leaky_relu.negative_slope, dtype=dtype
110
+ )
111
+ qlinear_leaky_relu.set_weight_bias(qweight, mod.bias)
112
+ qlinear_leaky_relu.scale = float(act_scale)
113
+ qlinear_leaky_relu.zero_point = int(act_zp)
114
+ return qlinear_leaky_relu
115
+
116
+ @classmethod
117
+ def from_reference(cls, ref_mod, output_scale, output_zero_point):
118
+ linear = ref_mod[0]
119
+ leaky_relu = ref_mod[1]
120
+ qlinear_leaky_relu = cls(
121
+ linear.in_features, linear.out_features, leaky_relu.negative_slope
122
+ )
123
+ qweight = linear.get_quantized_weight()
124
+ qlinear_leaky_relu.set_weight_bias(qweight, linear.bias)
125
+ qlinear_leaky_relu.scale = float(output_scale)
126
+ qlinear_leaky_relu.zero_point = int(output_zero_point)
127
+ return qlinear_leaky_relu
128
+
129
+
130
+ class LinearTanh(nnq.Linear):
131
+ r"""
132
+ A LinearTanh module fused from Linear and Tanh modules
133
+
134
+ We adopt the same interface as :class:`torch.ao.nn.quantized.Linear`.
135
+
136
+ Attributes:
137
+ Same as torch.ao.nn.quantized.Linear
138
+
139
+ Examples::
140
+
141
+ >>> # xdoctest: +SKIP
142
+ >>> m = nn.intrinsic.LinearTanh(20, 30)
143
+ >>> input = torch.randn(128, 20)
144
+ >>> output = m(input)
145
+ >>> print(output.size())
146
+ torch.Size([128, 30])
147
+ """
148
+ _FLOAT_MODULE = nni.LinearTanh # type: ignore[assignment]
149
+
150
+ def __init__(self, in_features, out_features, bias=True, dtype=torch.qint8):
151
+ super().__init__(in_features, out_features, bias, dtype)
152
+
153
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
154
+ return torch.ops.quantized.linear_tanh(
155
+ x, self._packed_params._packed_params, self.scale, self.zero_point
156
+ )
157
+
158
+ def _get_name(self):
159
+ return "QuantizedLinearTanh"
160
+
161
+ @classmethod
162
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
163
+ assert type(mod) == nni.LinearTanh, "Input float module should be LinearTanh"
164
+ assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
165
+ activation_post_process = mod.activation_post_process
166
+ mod = mod[0]
167
+ weight_post_process = mod.qconfig.weight()
168
+ weight_post_process(mod.weight)
169
+ dtype = weight_post_process.dtype
170
+ act_scale, act_zp = activation_post_process.calculate_qparams() # type: ignore[union-attr,operator]
171
+ assert dtype == torch.qint8, "Weight observer must have dtype torch.qint8"
172
+ qweight = _quantize_weight(mod.weight.float(), weight_post_process)
173
+ qlinear_tanh = cls(mod.in_features, mod.out_features, dtype=dtype)
174
+ qlinear_tanh.set_weight_bias(qweight, mod.bias)
175
+ qlinear_tanh.scale = float(act_scale)
176
+ qlinear_tanh.zero_point = int(act_zp)
177
+ return qlinear_tanh
178
+
179
+ @classmethod
180
+ def from_reference(cls, ref_mod, output_scale, output_zero_point):
181
+ linear = ref_mod[0]
182
+ qlinear_tanh = cls(linear.in_features, linear.out_features)
183
+ qweight = linear.get_quantized_weight()
184
+ qlinear_tanh.set_weight_bias(qweight, linear.bias)
185
+ qlinear_tanh.scale = float(output_scale)
186
+ qlinear_tanh.zero_point = int(output_zero_point)
187
+ return qlinear_tanh
videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modules import * # noqa: F403
videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (195 Bytes). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modules import * # noqa: F403
videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (203 Bytes). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .linear import Linear
2
+
3
+
4
+ __all__ = ["Linear"]
videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (246 Bytes). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/__pycache__/linear.cpython-310.pyc ADDED
Binary file (1.28 kB). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/linear.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+
4
+
5
+ __all__ = ["Linear"]
6
+
7
+
8
+ class Linear(torch.ao.nn.qat.Linear):
9
+ r"""
10
+ A linear module attached with FakeQuantize modules for weight,
11
+ used for dynamic quantization aware training.
12
+
13
+ We adopt the same interface as `torch.nn.Linear`, please see
14
+ https://pytorch.org/docs/stable/nn.html#torch.nn.Linear
15
+ for documentation.
16
+
17
+ Similar to `torch.nn.Linear`, with FakeQuantize modules initialized to
18
+ default.
19
+ """
20
+
21
+ def __init__(
22
+ self,
23
+ in_features,
24
+ out_features,
25
+ bias=True,
26
+ qconfig=None,
27
+ device=None,
28
+ dtype=None,
29
+ ) -> None:
30
+ super().__init__(in_features, out_features, bias, qconfig, device, dtype)
31
+ if not torch.ao.quantization.qconfig._activation_is_memoryless(qconfig):
32
+ raise ValueError(
33
+ "Dynamic QAT requires a memoryless observer."
34
+ + "This means a MovingAverage observer with averaging constant equal to 1"
35
+ )
videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/modules/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .conv import Conv1d, Conv2d, Conv3d
2
+ from .embedding_ops import Embedding, EmbeddingBag
3
+ from .linear import Linear
4
+
5
+
6
+ __all__ = [
7
+ "Linear",
8
+ "Conv1d",
9
+ "Conv2d",
10
+ "Conv3d",
11
+ "Embedding",
12
+ "EmbeddingBag",
13
+ ]
videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (406 Bytes). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/modules/__pycache__/conv.cpython-310.pyc ADDED
Binary file (7 kB). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/modules/__pycache__/embedding_ops.cpython-310.pyc ADDED
Binary file (5.5 kB). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/modules/__pycache__/linear.cpython-310.pyc ADDED
Binary file (2.88 kB). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/modules/conv.py ADDED
@@ -0,0 +1,310 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from typing import Tuple, TypeVar, Union
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from torch.ao.nn.intrinsic import _FusedModule
7
+ from torch.nn.common_types import _size_1_t, _size_2_t, _size_3_t
8
+ from torch.nn.modules.utils import _pair, _single, _triple
9
+
10
+
11
+ __all__ = ["Conv1d", "Conv2d", "Conv3d"]
12
+
13
+ MOD = TypeVar("MOD", bound=nn.modules.conv._ConvNd)
14
+
15
+
16
+ class _ConvNd(nn.modules.conv._ConvNd):
17
+ _FLOAT_MODULE = MOD
18
+
19
+ def __init__(
20
+ self,
21
+ in_channels: int,
22
+ out_channels: int,
23
+ kernel_size: Tuple[int, ...],
24
+ stride: Tuple[int, ...],
25
+ padding: Tuple[int, ...],
26
+ dilation: Tuple[int, ...],
27
+ transposed: bool,
28
+ output_padding: Tuple[int, ...],
29
+ groups: int,
30
+ bias: bool,
31
+ padding_mode: str,
32
+ qconfig=None,
33
+ device=None,
34
+ dtype=None,
35
+ ) -> None:
36
+ factory_kwargs = {"device": device, "dtype": dtype}
37
+ nn.modules.conv._ConvNd.__init__(
38
+ self,
39
+ in_channels,
40
+ out_channels,
41
+ kernel_size,
42
+ stride,
43
+ padding,
44
+ dilation,
45
+ transposed,
46
+ output_padding,
47
+ groups,
48
+ bias,
49
+ padding_mode,
50
+ **factory_kwargs,
51
+ )
52
+ assert qconfig, "qconfig must be provided for QAT module"
53
+ self.qconfig = qconfig
54
+ self.weight_fake_quant = qconfig.weight(factory_kwargs=factory_kwargs)
55
+
56
+ def forward(self, input):
57
+ return self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias)
58
+
59
+ @staticmethod
60
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
61
+ r"""Create a qat module from a float module
62
+
63
+ Args:
64
+ `mod`: a float module, either produced by torch.ao.quantization utilities
65
+ or directly from user
66
+ """
67
+ assert type(mod) == cls._FLOAT_MODULE, (
68
+ "qat."
69
+ + cls.__name__
70
+ + ".from_float only works for "
71
+ + cls._FLOAT_MODULE.__name__ # type: ignore[attr-defined]
72
+ )
73
+ assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
74
+ assert mod.qconfig, "Input float module must have a valid qconfig"
75
+ if issubclass(type(mod), _FusedModule):
76
+ mod = mod[0] # type: ignore[index]
77
+ qconfig = mod.qconfig
78
+ qat_conv = cls(
79
+ mod.in_channels,
80
+ mod.out_channels,
81
+ mod.kernel_size,
82
+ stride=mod.stride,
83
+ padding=mod.padding,
84
+ dilation=mod.dilation,
85
+ groups=mod.groups,
86
+ bias=mod.bias is not None,
87
+ padding_mode=mod.padding_mode,
88
+ qconfig=qconfig,
89
+ )
90
+ qat_conv.weight = mod.weight
91
+ qat_conv.bias = mod.bias
92
+ return qat_conv
93
+
94
+ def to_float(self):
95
+ """This works for both single qat conv, and the qat conv - relu modules
96
+ to convert the qat module to a floating point module
97
+ """
98
+ cls = type(self)
99
+ conv = cls._FLOAT_CONV_MODULE( # type: ignore[attr-defined, operator]
100
+ self.in_channels,
101
+ self.out_channels,
102
+ self.kernel_size, # type: ignore[arg-type]
103
+ self.stride, # type: ignore[arg-type]
104
+ self.padding, # type: ignore[arg-type]
105
+ self.dilation, # type: ignore[arg-type]
106
+ self.groups,
107
+ self.bias is not None,
108
+ self.padding_mode,
109
+ )
110
+ conv.weight = torch.nn.Parameter(self.weight.detach())
111
+ if self.bias is not None:
112
+ conv.bias = torch.nn.Parameter(self.bias.detach())
113
+ # conv relu
114
+ if issubclass(cls, _FusedModule):
115
+ modules = [conv]
116
+ assert hasattr(cls, "_FLOAT_RELU_MODULE")
117
+ relu = cls._FLOAT_RELU_MODULE() # type: ignore[attr-defined]
118
+ modules.append(relu)
119
+ fused = cls._FLOAT_MODULE(*modules) # type: ignore[arg-type, attr-defined, operator]
120
+ fused.train(self.training)
121
+ return fused
122
+ else:
123
+ return conv
124
+
125
+
126
+ class Conv1d(_ConvNd, nn.Conv1d):
127
+ r"""
128
+ A Conv1d module attached with FakeQuantize modules for weight,
129
+ used for quantization aware training.
130
+
131
+ We adopt the same interface as :class:`~torch.nn.Conv1d`
132
+
133
+ Similar to :class:`~torch.nn.Conv2d`, with FakeQuantize modules initialized to
134
+ default.
135
+
136
+ Attributes:
137
+ weight_fake_quant: fake quant module for weight
138
+ """
139
+ _FLOAT_MODULE = nn.Conv1d
140
+ _FLOAT_CONV_MODULE = nn.Conv1d
141
+
142
+ def __init__(
143
+ self,
144
+ in_channels: int,
145
+ out_channels: int,
146
+ kernel_size: _size_1_t,
147
+ stride: _size_1_t = 1,
148
+ padding: Union[str, _size_1_t] = 0,
149
+ dilation: _size_1_t = 1,
150
+ groups: int = 1,
151
+ bias: bool = True,
152
+ padding_mode: str = "zeros",
153
+ qconfig=None,
154
+ device=None,
155
+ dtype=None,
156
+ ) -> None:
157
+ kernel_size_ = _single(kernel_size)
158
+ stride_ = _single(stride)
159
+ padding_ = padding if isinstance(padding, str) else _single(padding)
160
+ dilation_ = _single(dilation)
161
+ super().__init__(
162
+ in_channels,
163
+ out_channels,
164
+ kernel_size_,
165
+ stride=stride_,
166
+ padding=padding_,
167
+ dilation=dilation_,
168
+ transposed=False,
169
+ output_padding=_single(0),
170
+ groups=groups,
171
+ bias=bias,
172
+ padding_mode=padding_mode,
173
+ qconfig=qconfig,
174
+ device=device,
175
+ dtype=dtype,
176
+ )
177
+
178
+ @classmethod
179
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
180
+ return super().from_float(
181
+ cls, mod, use_precomputed_fake_quant=use_precomputed_fake_quant
182
+ )
183
+
184
+
185
+ class Conv2d(_ConvNd, nn.Conv2d):
186
+ r"""
187
+ A Conv2d module attached with FakeQuantize modules for weight,
188
+ used for quantization aware training.
189
+
190
+ We adopt the same interface as `torch.nn.Conv2d`, please see
191
+ https://pytorch.org/docs/stable/nn.html?highlight=conv2d#torch.nn.Conv2d
192
+ for documentation.
193
+
194
+ Similar to `torch.nn.Conv2d`, with FakeQuantize modules initialized to
195
+ default.
196
+
197
+ Attributes:
198
+ weight_fake_quant: fake quant module for weight
199
+ """
200
+ _FLOAT_MODULE = nn.Conv2d
201
+ _FLOAT_CONV_MODULE = nn.Conv2d
202
+
203
+ def __init__(
204
+ self,
205
+ in_channels: int,
206
+ out_channels: int,
207
+ kernel_size: _size_2_t,
208
+ stride: _size_2_t = 1,
209
+ padding: Union[str, _size_2_t] = 0,
210
+ dilation: _size_2_t = 1,
211
+ groups: int = 1,
212
+ bias: bool = True,
213
+ padding_mode: str = "zeros",
214
+ qconfig=None,
215
+ device=None,
216
+ dtype=None,
217
+ ) -> None:
218
+ kernel_size_ = _pair(kernel_size)
219
+ stride_ = _pair(stride)
220
+ padding_ = padding if isinstance(padding, str) else _pair(padding)
221
+ dilation_ = _pair(dilation)
222
+ super().__init__(
223
+ in_channels,
224
+ out_channels,
225
+ kernel_size_,
226
+ stride=stride_,
227
+ padding=padding_,
228
+ dilation=dilation_,
229
+ transposed=False,
230
+ output_padding=_pair(0),
231
+ groups=groups,
232
+ bias=bias,
233
+ padding_mode=padding_mode,
234
+ qconfig=qconfig,
235
+ device=device,
236
+ dtype=dtype,
237
+ )
238
+
239
+ def forward(self, input):
240
+ return self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias)
241
+
242
+ @classmethod
243
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
244
+ return super().from_float(
245
+ cls, mod, use_precomputed_fake_quant=use_precomputed_fake_quant
246
+ )
247
+
248
+
249
+ class Conv3d(_ConvNd, nn.Conv3d):
250
+ r"""
251
+ A Conv3d module attached with FakeQuantize modules for weight,
252
+ used for quantization aware training.
253
+
254
+ We adopt the same interface as `torch.nn.Conv3d`, please see
255
+ https://pytorch.org/docs/stable/nn.html?highlight=conv3d#torch.nn.Conv3d
256
+ for documentation.
257
+
258
+ Similar to `torch.nn.Conv3d`, with FakeQuantize modules initialized to
259
+ default.
260
+
261
+ Attributes:
262
+ weight_fake_quant: fake quant module for weight
263
+ """
264
+ _FLOAT_MODULE = nn.Conv3d
265
+ _FLOAT_CONV_MODULE = nn.Conv3d
266
+
267
+ def __init__(
268
+ self,
269
+ in_channels: int,
270
+ out_channels: int,
271
+ kernel_size: _size_3_t,
272
+ stride: _size_3_t = 1,
273
+ padding: Union[str, _size_3_t] = 0,
274
+ dilation: _size_3_t = 1,
275
+ groups: int = 1,
276
+ bias: bool = True,
277
+ padding_mode: str = "zeros",
278
+ qconfig=None,
279
+ device=None,
280
+ dtype=None,
281
+ ) -> None:
282
+ kernel_size_ = _triple(kernel_size)
283
+ stride_ = _triple(stride)
284
+ padding_ = padding if isinstance(padding, str) else _triple(padding)
285
+ dilation_ = _triple(dilation)
286
+ super().__init__(
287
+ in_channels,
288
+ out_channels,
289
+ kernel_size_,
290
+ stride=stride_,
291
+ padding=padding_,
292
+ dilation=dilation_,
293
+ transposed=False,
294
+ output_padding=_triple(0),
295
+ groups=groups,
296
+ bias=bias,
297
+ padding_mode=padding_mode,
298
+ qconfig=qconfig,
299
+ device=device,
300
+ dtype=dtype,
301
+ )
302
+
303
+ def forward(self, input):
304
+ return self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias)
305
+
306
+ @classmethod
307
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
308
+ return super().from_float(
309
+ cls, mod, use_precomputed_fake_quant=use_precomputed_fake_quant
310
+ )
videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/modules/embedding_ops.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from torch import Tensor
6
+
7
+
8
+ __all__ = ["Embedding", "EmbeddingBag"]
9
+
10
+
11
+ class Embedding(nn.Embedding):
12
+ r"""
13
+ An embedding bag module attached with FakeQuantize modules for weight,
14
+ used for quantization aware training.
15
+
16
+ We adopt the same interface as `torch.nn.Embedding`, please see
17
+ https://pytorch.org/docs/stable/generated/torch.nn.Embedding.html#torch.nn.Embedding
18
+ for documentation.
19
+
20
+ Similar to `torch.nn.Embedding`, with FakeQuantize modules initialized to
21
+ default.
22
+
23
+ Attributes:
24
+ weight: fake quant module for weight
25
+ """
26
+ _FLOAT_MODULE = nn.Embedding
27
+
28
+ def __init__(
29
+ self,
30
+ num_embeddings,
31
+ embedding_dim,
32
+ padding_idx=None,
33
+ max_norm=None,
34
+ norm_type=2.0,
35
+ scale_grad_by_freq=False,
36
+ sparse=False,
37
+ _weight=None,
38
+ device=None,
39
+ dtype=None,
40
+ qconfig=None,
41
+ ) -> None:
42
+ factory_kwargs = {"device": device, "dtype": dtype}
43
+ super().__init__(
44
+ num_embeddings,
45
+ embedding_dim,
46
+ padding_idx,
47
+ max_norm,
48
+ norm_type,
49
+ scale_grad_by_freq,
50
+ sparse,
51
+ _weight,
52
+ **factory_kwargs,
53
+ )
54
+ assert qconfig, "qconfig must be provided for QAT module"
55
+ assert qconfig.weight().qscheme == torch.per_channel_affine_float_qparams, (
56
+ "Embedding weights requires a qscheme of torch.per_channel_affine_float_qparams Got "
57
+ + str(qconfig.weight().qscheme)
58
+ )
59
+ self.qconfig = qconfig
60
+ self.weight_fake_quant = qconfig.weight(factory_kwargs=factory_kwargs)
61
+
62
+ def forward(self, input) -> Tensor:
63
+ return F.embedding(
64
+ input,
65
+ self.weight_fake_quant(self.weight),
66
+ self.padding_idx,
67
+ self.max_norm,
68
+ self.norm_type,
69
+ self.scale_grad_by_freq,
70
+ self.sparse,
71
+ )
72
+
73
+ @classmethod
74
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
75
+ r"""Create a qat module from a float module
76
+
77
+ Args: `mod` a float module, either produced by torch.ao.quantization utilities
78
+ or directly from user
79
+ """
80
+ assert type(mod) == cls._FLOAT_MODULE, (
81
+ " qat."
82
+ + cls.__name__
83
+ + ".from_float only works for "
84
+ + cls._FLOAT_MODULE.__name__
85
+ )
86
+ assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
87
+ assert mod.qconfig, "Input float module must have a valid qconfig"
88
+ weight_qscheme = mod.qconfig.weight().qscheme # type: ignore[union-attr, operator]
89
+ assert weight_qscheme == torch.per_channel_affine_float_qparams, (
90
+ "Embedding weights requires a qscheme of torch.per_channel_affine_float_qparams Got "
91
+ + str(weight_qscheme)
92
+ )
93
+
94
+ qconfig = mod.qconfig
95
+ qat_embedding_bag = cls(
96
+ mod.num_embeddings,
97
+ mod.embedding_dim,
98
+ mod.padding_idx,
99
+ mod.max_norm,
100
+ mod.norm_type,
101
+ mod.scale_grad_by_freq,
102
+ mod.sparse,
103
+ mod.weight,
104
+ qconfig=qconfig,
105
+ )
106
+
107
+ return qat_embedding_bag
108
+
109
+ def to_float(self):
110
+ embedding_bag = torch.nn.Embedding(
111
+ self.num_embeddings,
112
+ self.embedding_dim,
113
+ self.padding_idx,
114
+ self.max_norm,
115
+ self.norm_type,
116
+ self.scale_grad_by_freq,
117
+ self.sparse,
118
+ None,
119
+ )
120
+ embedding_bag.weight = torch.nn.Parameter(self.weight.detach())
121
+ embedding_bag.train(self.training)
122
+ return embedding_bag
123
+
124
+
125
+ class EmbeddingBag(nn.EmbeddingBag):
126
+ r"""
127
+ An embedding bag module attached with FakeQuantize modules for weight,
128
+ used for quantization aware training.
129
+
130
+ We adopt the same interface as `torch.nn.EmbeddingBag`, please see
131
+ https://pytorch.org/docs/stable/generated/torch.nn.EmbeddingBag.html#torch.nn.EmbeddingBag
132
+ for documentation.
133
+
134
+ Similar to `torch.nn.EmbeddingBag`, with FakeQuantize modules initialized to
135
+ default.
136
+
137
+ Attributes:
138
+ weight: fake quant module for weight
139
+ """
140
+ _FLOAT_MODULE = nn.EmbeddingBag
141
+
142
+ def __init__(
143
+ self,
144
+ num_embeddings,
145
+ embedding_dim,
146
+ max_norm=None,
147
+ norm_type=2.0,
148
+ scale_grad_by_freq=False,
149
+ mode="mean",
150
+ sparse=False,
151
+ _weight=None,
152
+ include_last_offset=False,
153
+ padding_idx=None,
154
+ qconfig=None,
155
+ device=None,
156
+ dtype=None,
157
+ ) -> None:
158
+ factory_kwargs = {"device": device, "dtype": dtype}
159
+ super().__init__(
160
+ num_embeddings,
161
+ embedding_dim,
162
+ max_norm,
163
+ norm_type,
164
+ scale_grad_by_freq,
165
+ mode,
166
+ sparse,
167
+ _weight,
168
+ include_last_offset,
169
+ padding_idx,
170
+ **factory_kwargs,
171
+ )
172
+ assert qconfig, "qconfig must be provided for QAT module"
173
+ assert qconfig.weight().qscheme == torch.per_channel_affine_float_qparams, (
174
+ "Embedding Bag weights requires a qscheme of torch.per_channel_affine_float_qparams Got "
175
+ + str(qconfig.weight().qscheme)
176
+ )
177
+ self.qconfig = qconfig
178
+ self.weight_fake_quant = qconfig.weight(factory_kwargs=factory_kwargs)
179
+
180
+ def forward(self, input, offsets=None, per_sample_weights=None) -> Tensor:
181
+ return F.embedding_bag(
182
+ input,
183
+ self.weight_fake_quant(self.weight),
184
+ offsets,
185
+ self.max_norm,
186
+ self.norm_type,
187
+ self.scale_grad_by_freq,
188
+ self.mode,
189
+ self.sparse,
190
+ per_sample_weights,
191
+ self.include_last_offset,
192
+ self.padding_idx,
193
+ )
194
+
195
+ @classmethod
196
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
197
+ r"""Create a qat module from a float module
198
+
199
+ Args: `mod` a float module, either produced by torch.ao.quantization utilities
200
+ or directly from user
201
+ """
202
+ assert type(mod) == cls._FLOAT_MODULE, (
203
+ " qat."
204
+ + cls.__name__
205
+ + ".from_float only works for "
206
+ + cls._FLOAT_MODULE.__name__
207
+ )
208
+ assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
209
+ assert mod.qconfig, "Input float module must have a valid qconfig"
210
+ weight_qscheme = mod.qconfig.weight().qscheme # type: ignore[union-attr, operator]
211
+ assert weight_qscheme == torch.per_channel_affine_float_qparams, (
212
+ "Embedding Bag weights requires a qscheme of torch.per_channel_affine_float_qparams Got "
213
+ + str(weight_qscheme)
214
+ )
215
+
216
+ qconfig = mod.qconfig
217
+ qat_embedding_bag = cls(
218
+ mod.num_embeddings,
219
+ mod.embedding_dim,
220
+ mod.max_norm,
221
+ mod.norm_type,
222
+ mod.scale_grad_by_freq,
223
+ mod.mode,
224
+ mod.sparse,
225
+ mod.weight,
226
+ mod.include_last_offset,
227
+ mod.padding_idx,
228
+ qconfig=qconfig,
229
+ )
230
+
231
+ return qat_embedding_bag
232
+
233
+ def to_float(self):
234
+ embedding_bag = torch.nn.EmbeddingBag(
235
+ self.num_embeddings,
236
+ self.embedding_dim,
237
+ self.max_norm,
238
+ self.norm_type,
239
+ self.scale_grad_by_freq,
240
+ self.mode,
241
+ self.sparse,
242
+ None,
243
+ self.include_last_offset,
244
+ self.padding_idx,
245
+ )
246
+ embedding_bag.weight = torch.nn.Parameter(self.weight.detach())
247
+ embedding_bag.train(self.training)
248
+ return embedding_bag
videochat2/lib/python3.10/site-packages/torch/ao/nn/qat/modules/linear.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from torch.ao.nn.intrinsic import LinearReLU
6
+ from torch.nn.utils.parametrize import (
7
+ is_parametrized,
8
+ transfer_parametrizations_and_params,
9
+ type_before_parametrizations,
10
+ )
11
+
12
+
13
+ __all__ = ["Linear"]
14
+
15
+
16
+ class Linear(nn.Linear):
17
+ r"""
18
+ A linear module attached with FakeQuantize modules for weight,
19
+ used for quantization aware training.
20
+
21
+ We adopt the same interface as `torch.nn.Linear`, please see
22
+ https://pytorch.org/docs/stable/nn.html#torch.nn.Linear
23
+ for documentation.
24
+
25
+ Similar to `torch.nn.Linear`, with FakeQuantize modules initialized to
26
+ default.
27
+
28
+ Attributes:
29
+ weight: fake quant module for weight
30
+ """
31
+ _FLOAT_MODULE = nn.Linear
32
+
33
+ def __init__(
34
+ self,
35
+ in_features,
36
+ out_features,
37
+ bias=True,
38
+ qconfig=None,
39
+ device=None,
40
+ dtype=None,
41
+ ) -> None:
42
+ factory_kwargs = {"device": device, "dtype": dtype}
43
+ super().__init__(in_features, out_features, bias, **factory_kwargs)
44
+ assert qconfig, "qconfig must be provided for QAT module"
45
+ self.qconfig = qconfig
46
+ self.weight_fake_quant = qconfig.weight(factory_kwargs=factory_kwargs)
47
+
48
+ def forward(self, input):
49
+ return F.linear(input, self.weight_fake_quant(self.weight), self.bias)
50
+
51
+ @classmethod
52
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
53
+ r"""Create a qat module from a float module or qparams_dict
54
+ Args: `mod` a float module, either produced by torch.ao.quantization utilities
55
+ or directly from user
56
+ """
57
+ assert type_before_parametrizations(mod) == cls._FLOAT_MODULE, (
58
+ " qat."
59
+ + cls.__name__
60
+ + ".from_float only works for "
61
+ + cls._FLOAT_MODULE.__name__
62
+ )
63
+ assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
64
+ assert mod.qconfig, "Input float module must have a valid qconfig"
65
+ if type_before_parametrizations(mod) == LinearReLU:
66
+ mod = mod[0]
67
+
68
+ qconfig = mod.qconfig
69
+ qat_linear = cls(
70
+ mod.in_features,
71
+ mod.out_features,
72
+ bias=mod.bias is not None,
73
+ qconfig=qconfig,
74
+ )
75
+
76
+ if is_parametrized(mod, "weight"):
77
+ transfer_parametrizations_and_params(mod, qat_linear, "weight")
78
+ else:
79
+ qat_linear.weight = mod.weight
80
+
81
+ if is_parametrized(mod, "bias"):
82
+ transfer_parametrizations_and_params(mod, qat_linear, "bias")
83
+ else:
84
+ qat_linear.bias = mod.bias
85
+
86
+ return qat_linear
87
+
88
+ def to_float(self):
89
+ linear = torch.nn.Linear(
90
+ self.in_features, self.out_features, self.bias is not None
91
+ )
92
+ linear.weight = torch.nn.Parameter(self.weight.detach())
93
+ if self.bias is not None:
94
+ linear.bias = torch.nn.Parameter(self.bias.detach())
95
+ linear.train(self.training)
96
+ return linear
videochat2/lib/python3.10/site-packages/torch/ao/ns/__init__.py ADDED
File without changes
videochat2/lib/python3.10/site-packages/torch/ao/ns/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (167 Bytes). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/ns/__pycache__/_numeric_suite.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/ns/__pycache__/_numeric_suite_fx.cpython-310.pyc ADDED
Binary file (26.3 kB). View file
 
videochat2/lib/python3.10/site-packages/torch/ao/ns/_numeric_suite.py ADDED
@@ -0,0 +1,563 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from typing import Any, Callable, Dict, List, Optional, Set, Union
3
+
4
+ import torch
5
+ import torch.ao.nn.quantized as nnq
6
+ import torch.ao.nn.quantized.dynamic as nnqd
7
+ import torch.nn as nn
8
+ from torch.ao.quantization import prepare
9
+ from torch.ao.quantization.quantization_mappings import (
10
+ get_default_compare_output_module_list,
11
+ )
12
+
13
+
14
+ NON_LEAF_MODULE_TO_ADD_OBSERVER_ALLOW_LIST = {
15
+ nnqd.Linear,
16
+ nnq.Linear,
17
+ nnqd.LSTM,
18
+ nn.LSTM,
19
+ }
20
+
21
+
22
+ def _find_match(
23
+ str_list: Union[Dict[str, Any], List[str]],
24
+ key_str: str,
25
+ postfix: str,
26
+ ) -> Optional[str]:
27
+ split_str = key_str.split(".")
28
+ if split_str[-1] == postfix:
29
+ match_string = "".join(key_str.split(".")[0:-1])
30
+ for s2 in str_list:
31
+ pattern1 = "".join(s2.split(".")[0:-1])
32
+ pattern2 = "".join(s2.split(".")[0:-2])
33
+ if match_string == pattern1:
34
+ return s2
35
+ if match_string == pattern2:
36
+ return s2
37
+
38
+ # For matching "fc.weight" and "fc._packed_params._packed_params"
39
+ if postfix == "_packed_params":
40
+ match_string = "".join(key_str.split(".")[0:-2])
41
+ if len(match_string) == 0:
42
+ return None
43
+ for s2 in str_list:
44
+ pattern1 = "".join(s2.split(".")[0:-1])
45
+ pattern2 = "".join(s2.split(".")[0:-2])
46
+ if match_string == pattern1:
47
+ return s2
48
+ if match_string == pattern2:
49
+ return s2
50
+ return None
51
+ else:
52
+ return None
53
+
54
+
55
+ def compare_weights(
56
+ float_dict: Dict[str, Any], quantized_dict: Dict[str, Any]
57
+ ) -> Dict[str, Dict[str, torch.Tensor]]:
58
+ r"""Compare the weights of the float module with its corresponding quantized
59
+ module. Return a dict with key corresponding to module names and each entry being
60
+ a dictionary with two keys 'float' and 'quantized', containing the float and
61
+ quantized weights. This dict can be used to compare and compute the quantization
62
+ error of the weights of float and quantized models.
63
+
64
+ Example usage::
65
+
66
+ wt_compare_dict = compare_weights(
67
+ float_model.state_dict(), qmodel.state_dict())
68
+ for key in wt_compare_dict:
69
+ print(
70
+ key,
71
+ compute_error(
72
+ wt_compare_dict[key]['float'],
73
+ wt_compare_dict[key]['quantized'].dequantize()
74
+ )
75
+ )
76
+
77
+ Args:
78
+ float_dict: state dict of the float model
79
+ quantized_dict: state dict of the quantized model
80
+
81
+ Return:
82
+ weight_dict: dict with key corresponding to module names and each entry being
83
+ a dictionary with two keys 'float' and 'quantized', containing the float and
84
+ quantized weights
85
+ """
86
+ torch._C._log_api_usage_once("quantization_api._numeric_suite.compare_weights")
87
+ weight_dict: Dict[str, Dict] = {}
88
+ for key in quantized_dict:
89
+ match_key = _find_match(float_dict, key, "weight")
90
+ if match_key is not None:
91
+ weight_dict[key] = {}
92
+ weight_dict[key]["float"] = float_dict[match_key]
93
+ weight_dict[key]["quantized"] = quantized_dict[key]
94
+ continue
95
+
96
+ # For matching "fc.weight" and "fc._packed_params._packed_params"
97
+ match_key = _find_match(float_dict, key, "_packed_params")
98
+ if match_key is not None:
99
+ weight_dict[key] = {}
100
+ weight_dict[key]["float"] = float_dict[match_key]
101
+ weight_dict[key]["quantized"] = quantized_dict[key][0]
102
+
103
+ # For LSTM
104
+ split_str = key.split(".")
105
+ if split_str[-1] == "param" and split_str[-3] == "_all_weight_values":
106
+ layer = split_str[-2]
107
+ module_name = ".".join(split_str[:-3])
108
+ float_weight_ih_key = module_name + ".weight_ih_l" + layer
109
+ float_weight_hh_key = module_name + ".weight_hh_l" + layer
110
+ if float_weight_ih_key in float_dict and float_weight_hh_key in float_dict:
111
+ weight_dict[key] = {}
112
+ weight_dict[key]["float"] = float_dict[float_weight_ih_key]
113
+ weight_dict[key]["quantized"] = (
114
+ quantized_dict[key].__getstate__()[0][4][0].__getstate__()[0][0]
115
+ )
116
+ weight_dict[key]["float"] = float_dict[float_weight_hh_key]
117
+ weight_dict[key]["quantized"] = (
118
+ quantized_dict[key].__getstate__()[0][4][1].__getstate__()[0][0]
119
+ )
120
+
121
+ return weight_dict
122
+
123
+
124
+ def _get_logger_dict_helper(
125
+ mod: nn.Module,
126
+ target_dict: Dict[str, Any],
127
+ prefix: str = "",
128
+ ) -> None:
129
+ r"""This is the helper function for get_logger_dict
130
+
131
+ Args:
132
+ mod: module we want to save all logger stats
133
+ prefix: prefix for the current module
134
+ target_dict: the dictionary used to save all logger stats
135
+ """
136
+
137
+ def get_prefix(prefix):
138
+ return prefix if prefix == "" else prefix + "."
139
+
140
+ for name, child in mod.named_children():
141
+ if isinstance(child, Logger):
142
+ target_dict[get_prefix(prefix) + "stats"] = child.stats
143
+ break
144
+
145
+ for name, child in mod.named_children():
146
+ module_prefix = get_prefix(prefix) + name if prefix else name
147
+ _get_logger_dict_helper(child, target_dict, module_prefix)
148
+
149
+
150
+ def get_logger_dict(mod: nn.Module, prefix: str = "") -> Dict[str, Dict]:
151
+ r"""Traverse the modules and save all logger stats into target dict.
152
+ This is mainly used for quantization accuracy debug.
153
+
154
+ Type of loggers supported:
155
+ ShadowLogger: used to log the outputs of the quantized module and its matching float shadow module,
156
+ OutputLogger: used to log the outputs of the modules
157
+
158
+ Args:
159
+ mod: module we want to save all logger stats
160
+ prefix: prefix for the current module
161
+
162
+ Return:
163
+ target_dict: the dictionary used to save all logger stats
164
+
165
+ """
166
+ torch._C._log_api_usage_once("quantization_api._numeric_suite.get_logger_dict")
167
+
168
+ target_dict: Dict[str, Dict] = {}
169
+ _get_logger_dict_helper(mod, target_dict, prefix)
170
+ return target_dict
171
+
172
+
173
+ class Logger(nn.Module):
174
+ r"""Base class for stats logging"""
175
+
176
+ def __init__(self):
177
+ super().__init__()
178
+ self.stats = {}
179
+ # We only insert observer if the op is quantized with static quantization,
180
+ # which is identified by activation_observer.dtype == quint8. This is needed
181
+ # when attaching Logger as observer for FX mode
182
+ self.dtype = torch.quint8
183
+
184
+ def forward(self, x):
185
+ # fmt: off
186
+ """
187
+ """ # blank docblock to make autodoc happy
188
+ # fmt: on
189
+
190
+
191
+ class ShadowLogger(Logger):
192
+ r"""Class used in Shadow module to record the outputs of the original and
193
+ shadow modules.
194
+ """
195
+
196
+ def __init__(self):
197
+ super().__init__()
198
+ self.stats["float"] = []
199
+ self.stats["quantized"] = []
200
+
201
+ def forward(self, x, y):
202
+ # fmt: off
203
+ """
204
+ """ # blank docblock to make autodoc happy
205
+ # fmt: on
206
+ if len(x) > 1:
207
+ x = x[0]
208
+ if len(y) > 1:
209
+ y = y[0]
210
+ self.stats["quantized"].append(x.detach())
211
+ self.stats["float"].append(y.detach())
212
+
213
+
214
+ class OutputLogger(Logger):
215
+ r"""Class used to log the outputs of the module"""
216
+
217
+ def __init__(self):
218
+ super().__init__()
219
+ self.stats["tensor_val"] = []
220
+
221
+ def forward(self, x):
222
+ # fmt: off
223
+ """
224
+ """ # blank docblock to make autodoc happy
225
+ # fmt: on
226
+ self.stats["tensor_val"].append(x)
227
+ return x
228
+
229
+
230
+ def _convert_tuple_to_list(t: Any) -> Any:
231
+ return [_convert_tuple_to_list(x) for x in t] if type(t) is tuple else t
232
+
233
+
234
+ def _dequantize_tensor_list(t: Any) -> Any:
235
+ return (
236
+ [_dequantize_tensor_list(x) for x in t]
237
+ if type(t) is list
238
+ else t.dequantize()
239
+ if t.is_quantized
240
+ else t
241
+ )
242
+
243
+
244
+ class Shadow(nn.Module):
245
+ r"""Shadow module attaches the float module to its matching quantized module
246
+ as the shadow. Then it uses Logger module to process the outputs of both
247
+ modules.
248
+
249
+ Args:
250
+ q_module: module quantized from float_module that we want to shadow
251
+ float_module: float module used to shadow q_module
252
+ logger_cls: type of logger used to process the outputs of q_module and
253
+ float_module. ShadowLogger or custom loggers can be used.
254
+ """
255
+
256
+ def __init__(self, q_module, float_module, logger_cls):
257
+ super().__init__()
258
+ self.orig_module = q_module
259
+ self.shadow_module = float_module
260
+ self.dequant = nnq.DeQuantize()
261
+ self.logger = logger_cls()
262
+
263
+ def forward(self, *x) -> torch.Tensor:
264
+ # fmt: off
265
+ """
266
+ """ # blank docblock to make autodoc happy
267
+ # fmt: on
268
+ xl = _convert_tuple_to_list(x)
269
+ output = self.orig_module(*xl)
270
+ xl_float = _dequantize_tensor_list(xl)
271
+ shadow_output = self.shadow_module(*xl_float)
272
+ self.logger(output, shadow_output)
273
+ return output
274
+
275
+ def add(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
276
+ # fmt: off
277
+ """
278
+ """ # blank docblock to make autodoc happy
279
+ # fmt: on
280
+ output = self.orig_module.add(x, y)
281
+ x = x.dequantize()
282
+ y = y.dequantize()
283
+ shadow_output = self.shadow_module.add(x, y)
284
+ self.logger(output, shadow_output)
285
+ return output
286
+
287
+ def add_scalar(self, x: torch.Tensor, y: float) -> torch.Tensor:
288
+ # fmt: off
289
+ """
290
+ """ # blank docblock to make autodoc happy
291
+ # fmt: on
292
+ output = self.orig_module.add_scalar(x, y)
293
+ x = x.dequantize()
294
+ shadow_output = self.shadow_module.add_scalar(x, y)
295
+ self.logger(output, shadow_output)
296
+ return output
297
+
298
+ def mul(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
299
+ # fmt: off
300
+ """
301
+ """ # blank docblock to make autodoc happy
302
+ # fmt: on
303
+ output = self.orig_module.mul(x, y)
304
+ x = x.dequantize()
305
+ y = y.dequantize()
306
+ shadow_output = self.shadow_module.mul(x, y)
307
+ self.logger(output, shadow_output)
308
+ return output
309
+
310
+ def mul_scalar(self, x: torch.Tensor, y: float) -> torch.Tensor:
311
+ # fmt: off
312
+ """
313
+ """ # blank docblock to make autodoc happy
314
+ # fmt: on
315
+ output = self.orig_module.mul_scalar(x, y)
316
+ x = x.dequantize()
317
+ shadow_output = self.shadow_module.mul_scalar(x, y)
318
+ self.logger(output, shadow_output)
319
+ return output
320
+
321
+ def cat(self, x: List[torch.Tensor], dim: int = 0) -> torch.Tensor:
322
+ # fmt: off
323
+ """
324
+ """ # blank docblock to make autodoc happy
325
+ # fmt: on
326
+ output = self.orig_module.cat(x, dim)
327
+ x = [y.dequantize() for y in x]
328
+ shadow_output = self.shadow_module.cat(x, dim)
329
+ self.logger(output, shadow_output)
330
+ return output
331
+
332
+ def add_relu(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
333
+ # fmt: off
334
+ """
335
+ """ # blank docblock to make autodoc happy
336
+ # fmt: on
337
+ output = self.orig_module.add_relu(x, y)
338
+ x = x.dequantize()
339
+ y = y.dequantize()
340
+ shadow_output = self.shadow_module.add_relu(x, y)
341
+ self.logger(output, shadow_output)
342
+ return output
343
+
344
+
345
+ def prepare_model_with_stubs(
346
+ float_module: nn.Module,
347
+ q_module: nn.Module,
348
+ module_swap_list: Set[type],
349
+ logger_cls: Callable,
350
+ ) -> None:
351
+ r"""Prepare the model by attaching the float module to its matching quantized
352
+ module as the shadow if the float module type is in module_swap_list.
353
+
354
+ Example usage::
355
+
356
+ prepare_model_with_stubs(float_model, q_model, module_swap_list, Logger)
357
+ q_model(data)
358
+ ob_dict = get_logger_dict(q_model)
359
+
360
+ Args:
361
+ float_module: float module used to generate the q_module
362
+ q_module: module quantized from float_module
363
+ module_swap_list: list of float module types to attach the shadow
364
+ logger_cls: type of logger to be used in shadow module to process the outputs of
365
+ quantized module and its float shadow module
366
+ """
367
+ torch._C._log_api_usage_once(
368
+ "quantization_api._numeric_suite.prepare_model_with_stubs"
369
+ )
370
+
371
+ float_module_children = {}
372
+ for name, mod in float_module.named_children():
373
+ float_module_children[name] = mod
374
+
375
+ reassign = {}
376
+ for name, mod in q_module.named_children():
377
+ if name not in float_module_children:
378
+ continue
379
+
380
+ float_mod = float_module_children[name]
381
+
382
+ if type(float_mod) not in module_swap_list:
383
+ prepare_model_with_stubs(float_mod, mod, module_swap_list, logger_cls)
384
+
385
+ # Insert shadow module only if the module is not of the same type as
386
+ # the floating point module
387
+ if type(float_mod) in module_swap_list and not _is_identical_module_type(
388
+ mod, float_mod
389
+ ):
390
+ reassign[name] = Shadow(mod, float_mod, logger_cls)
391
+
392
+ for key, value in reassign.items():
393
+ q_module._modules[key] = value
394
+
395
+
396
+ def _is_identical_module_type(mod1, mod2):
397
+ # Compare if two modules have the same dtype
398
+ mod1_module_types = [type(mod) for mod in mod1.modules()]
399
+ mod2_module_types = [type(mod) for mod in mod2.modules()]
400
+ return mod1_module_types == mod2_module_types
401
+
402
+
403
+ def compare_model_stub(
404
+ float_model: nn.Module,
405
+ q_model: nn.Module,
406
+ module_swap_list: Set[type],
407
+ *data,
408
+ logger_cls=ShadowLogger,
409
+ ) -> Dict[str, Dict]:
410
+ r"""Compare quantized module in a model with its floating point counterpart,
411
+ feeding both of them the same input. Return a dict with key corresponding to
412
+ module names and each entry being a dictionary with two keys 'float' and
413
+ 'quantized', containing the output tensors of quantized and its matching
414
+ float shadow module. This dict can be used to compare and compute the module
415
+ level quantization error.
416
+
417
+ This function first call prepare_model_with_stubs() to swap the quantized
418
+ module that we want to compare with the Shadow module, which takes quantized
419
+ module, corresponding float module and logger as input, and creates a forward
420
+ path inside to make the float module to shadow quantized module sharing the
421
+ same input. The logger can be customizable, default logger is ShadowLogger
422
+ and it will save the outputs of the quantized module and float module that
423
+ can be used to compute the module level quantization error.
424
+
425
+ Example usage::
426
+
427
+ module_swap_list = [torchvision.models.quantization.resnet.QuantizableBasicBlock]
428
+ ob_dict = compare_model_stub(float_model,qmodel,module_swap_list, data)
429
+ for key in ob_dict:
430
+ print(key, compute_error(ob_dict[key]['float'], ob_dict[key]['quantized'].dequantize()))
431
+
432
+ Args:
433
+ float_model: float model used to generate the q_model
434
+ q_model: model quantized from float_model
435
+ module_swap_list: list of float module types at which shadow modules will
436
+ be attached.
437
+ data: input data used to run the prepared q_model
438
+ logger_cls: type of logger to be used in shadow module to process the outputs of
439
+ quantized module and its float shadow module
440
+ """
441
+ torch._C._log_api_usage_once("quantization_api._numeric_suite.compare_model_stub")
442
+ prepare_model_with_stubs(float_model, q_model, module_swap_list, logger_cls)
443
+ q_model(*data)
444
+ ob_dict = get_logger_dict(q_model)
445
+ return ob_dict
446
+
447
+
448
+ def get_matching_activations(
449
+ float_module: nn.Module,
450
+ q_module: nn.Module,
451
+ ) -> Dict[str, Dict[str, torch.Tensor]]:
452
+ r"""Find the matching activation between float and quantized modules.
453
+
454
+ Args:
455
+ float_module: float module used to generate the q_module
456
+ q_module: module quantized from float_module
457
+
458
+ Return:
459
+ act_dict: dict with key corresponding to quantized module names and each
460
+ entry being a dictionary with two keys 'float' and 'quantized', containing
461
+ the matching float and quantized activations
462
+ """
463
+ torch._C._log_api_usage_once(
464
+ "quantization_api._numeric_suite.get_matching_activations"
465
+ )
466
+ float_dict = get_logger_dict(float_module)
467
+ quantized_dict = get_logger_dict(q_module)
468
+ act_dict: Dict[str, Dict] = {}
469
+ for key in quantized_dict:
470
+ if len(quantized_dict[key]["tensor_val"]) == 0:
471
+ continue
472
+ match_key = _find_match(sorted(float_dict, reverse=True), key, "stats")
473
+ if match_key is not None:
474
+ act_dict[key] = {}
475
+ act_dict[key]["float"] = float_dict[match_key]["tensor_val"]
476
+ act_dict[key]["quantized"] = quantized_dict[key]["tensor_val"]
477
+ return act_dict
478
+
479
+
480
+ def prepare_model_outputs(
481
+ float_module: nn.Module,
482
+ q_module: nn.Module,
483
+ logger_cls=OutputLogger,
484
+ allow_list=None,
485
+ ) -> None:
486
+ r"""Prepare the model by attaching the logger to both float module
487
+ and quantized module if they are in the allow_list.
488
+
489
+ Args:
490
+ float_module: float module used to generate the q_module
491
+ q_module: module quantized from float_module
492
+ logger_cls: type of logger to be attached to float_module and q_module
493
+ allow_list: list of module types to attach logger
494
+ """
495
+ torch._C._log_api_usage_once(
496
+ "quantization_api._numeric_suite.prepare_model_outputs"
497
+ )
498
+ if allow_list is None:
499
+ allow_list = get_default_compare_output_module_list()
500
+
501
+ qconfig_debug = torch.ao.quantization.QConfig(activation=logger_cls, weight=None)
502
+ float_module.qconfig = qconfig_debug # type: ignore[assignment]
503
+ prepare(
504
+ float_module, inplace=True, allow_list=allow_list, prepare_custom_config_dict={}
505
+ )
506
+ q_module.qconfig = qconfig_debug # type: ignore[assignment]
507
+ prepare(
508
+ q_module,
509
+ inplace=True,
510
+ allow_list=allow_list,
511
+ observer_non_leaf_module_list=NON_LEAF_MODULE_TO_ADD_OBSERVER_ALLOW_LIST,
512
+ prepare_custom_config_dict={},
513
+ )
514
+
515
+
516
+ def compare_model_outputs(
517
+ float_model: nn.Module,
518
+ q_model: nn.Module,
519
+ *data,
520
+ logger_cls=OutputLogger,
521
+ allow_list=None,
522
+ ) -> Dict[str, Dict[str, torch.Tensor]]:
523
+ r"""Compare output activations between float and quantized models at
524
+ corresponding locations for the same input. Return a dict with key corresponding
525
+ to quantized module names and each entry being a dictionary with two keys
526
+ 'float' and 'quantized', containing the activations of quantized model and
527
+ float model at matching locations. This dict can be used to compare and
528
+ compute the propagation quantization error.
529
+
530
+ Example usage::
531
+
532
+ act_compare_dict = compare_model_outputs(float_model, qmodel, data)
533
+ for key in act_compare_dict:
534
+ print(
535
+ key,
536
+ compute_error(
537
+ act_compare_dict[key]['float'],
538
+ act_compare_dict[key]['quantized'].dequantize()
539
+ )
540
+ )
541
+
542
+ Args:
543
+ float_model: float model used to generate the q_model
544
+ q_model: model quantized from float_model
545
+ data: input data used to run the prepared float_model and q_model
546
+ logger_cls: type of logger to be attached to float_module and q_module
547
+ allow_list: list of module types to attach logger
548
+
549
+ Return:
550
+ act_compare_dict: dict with key corresponding to quantized module names
551
+ and each entry being a dictionary with two keys 'float' and 'quantized',
552
+ containing the matching float and quantized activations
553
+ """
554
+ torch._C._log_api_usage_once(
555
+ "quantization_api._numeric_suite.compare_model_outputs"
556
+ )
557
+ if allow_list is None:
558
+ allow_list = get_default_compare_output_module_list()
559
+ prepare_model_outputs(float_model, q_model, logger_cls, allow_list)
560
+ float_model(*data)
561
+ q_model(*data)
562
+ act_compare_dict = get_matching_activations(float_model, q_model)
563
+ return act_compare_dict