ZTWHHH commited on
Commit
777a113
·
verified ·
1 Parent(s): 2d5eeb3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llava_next/share/locale/pt_BR/LC_MESSAGES/xz.mo +0 -0
  2. llava_next/share/locale/ro/LC_MESSAGES/xz.mo +0 -0
  3. llava_next/share/locale/zh_CN/LC_MESSAGES/xz.mo +0 -0
  4. llava_next/share/terminfo/c/c108-rv +0 -0
  5. llava_next/share/terminfo/c/cgc3 +0 -0
  6. llava_next/share/terminfo/c/cit-80 +0 -0
  7. llava_next/share/terminfo/c/cit101e-n +0 -0
  8. llava_next/share/terminfo/c/cit101e-rv +0 -0
  9. llava_next/share/terminfo/c/concept-avt +0 -0
  10. llava_next/share/terminfo/c/cons60 +0 -0
  11. llava_next/share/terminfo/c/contel321 +0 -0
  12. llava_next/share/terminfo/c/ct82 +0 -0
  13. llava_next/share/terminfo/c/cx +0 -0
  14. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/__init__.py +37 -0
  15. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/__pycache__/__init__.cpython-310.pyc +0 -0
  16. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__init__.py +38 -0
  17. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  18. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__pycache__/fused.cpython-310.pyc +0 -0
  19. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/fused.py +155 -0
  20. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/__init__.py +1 -0
  21. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/__pycache__/__init__.cpython-310.pyc +0 -0
  22. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__init__.py +31 -0
  23. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  24. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/conv_fused.cpython-310.pyc +0 -0
  25. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/linear_fused.cpython-310.pyc +0 -0
  26. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/linear_relu.cpython-310.pyc +0 -0
  27. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/conv_fused.py +826 -0
  28. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/linear_fused.py +172 -0
  29. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/linear_relu.py +49 -0
  30. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/__init__.py +14 -0
  31. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/__pycache__/__init__.cpython-310.pyc +0 -0
  32. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/__init__.py +1 -0
  33. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/__pycache__/__init__.cpython-310.pyc +0 -0
  34. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__init__.py +6 -0
  35. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  36. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__pycache__/linear_relu.cpython-310.pyc +0 -0
  37. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  38. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/bn_relu.cpython-310.pyc +0 -0
  39. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/conv_add.cpython-310.pyc +0 -0
  40. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/conv_relu.cpython-310.pyc +0 -0
  41. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/bn_relu.py +83 -0
  42. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/conv_relu.py +176 -0
  43. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/linear_relu.py +178 -0
  44. parrot/lib/python3.10/site-packages/torch/ao/nn/qat/__init__.py +1 -0
  45. parrot/lib/python3.10/site-packages/torch/ao/nn/qat/__pycache__/__init__.cpython-310.pyc +0 -0
  46. parrot/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/__init__.py +1 -0
  47. parrot/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/__init__.py +3 -0
  48. parrot/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  49. parrot/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/__pycache__/linear.cpython-310.pyc +0 -0
  50. parrot/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/linear.py +26 -0
llava_next/share/locale/pt_BR/LC_MESSAGES/xz.mo ADDED
Binary file (31 kB). View file
 
llava_next/share/locale/ro/LC_MESSAGES/xz.mo ADDED
Binary file (32.6 kB). View file
 
llava_next/share/locale/zh_CN/LC_MESSAGES/xz.mo ADDED
Binary file (29 kB). View file
 
llava_next/share/terminfo/c/c108-rv ADDED
Binary file (940 Bytes). View file
 
llava_next/share/terminfo/c/cgc3 ADDED
Binary file (170 Bytes). View file
 
llava_next/share/terminfo/c/cit-80 ADDED
Binary file (424 Bytes). View file
 
llava_next/share/terminfo/c/cit101e-n ADDED
Binary file (853 Bytes). View file
 
llava_next/share/terminfo/c/cit101e-rv ADDED
Binary file (1.35 kB). View file
 
llava_next/share/terminfo/c/concept-avt ADDED
Binary file (1.23 kB). View file
 
llava_next/share/terminfo/c/cons60 ADDED
Binary file (1.5 kB). View file
 
llava_next/share/terminfo/c/contel321 ADDED
Binary file (551 Bytes). View file
 
llava_next/share/terminfo/c/ct82 ADDED
Binary file (400 Bytes). View file
 
llava_next/share/terminfo/c/cx ADDED
Binary file (1.61 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/__init__.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from .modules import * # noqa: F403
3
+ from .modules.fused import _FusedModule # noqa: F403
4
+
5
+ # # Subpackages
6
+ # from . import qat # noqa: F403
7
+ # from . import quantized # noqa: F403
8
+
9
+ __all__ = [
10
+ 'ConvBn1d',
11
+ 'ConvBn2d',
12
+ 'ConvBn3d',
13
+ 'ConvBnReLU1d',
14
+ 'ConvBnReLU2d',
15
+ 'ConvBnReLU3d',
16
+ 'ConvReLU1d',
17
+ 'ConvReLU2d',
18
+ 'ConvReLU3d',
19
+ 'LinearReLU',
20
+ 'BNReLU2d',
21
+ 'BNReLU3d',
22
+ 'LinearBn1d',
23
+ 'LinearLeakyReLU',
24
+ 'LinearTanh',
25
+ 'ConvAdd2d',
26
+ 'ConvAddReLU2d',
27
+ ]
28
+
29
+ # We are exposing all subpackages to the end-user.
30
+ # Because of possible inter-dependency, we want to avoid
31
+ # the cyclic imports, thus implementing lazy version
32
+ # as per https://peps.python.org/pep-0562/
33
+ def __getattr__(name):
34
+ if name in __all__:
35
+ import importlib
36
+ return importlib.import_module("." + name, __name__)
37
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (734 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__init__.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .fused import _FusedModule # noqa: F401
2
+ from .fused import ConvBn1d
3
+ from .fused import ConvBn2d
4
+ from .fused import ConvBn3d
5
+ from .fused import ConvBnReLU1d
6
+ from .fused import ConvBnReLU2d
7
+ from .fused import ConvBnReLU3d
8
+ from .fused import ConvReLU1d
9
+ from .fused import ConvReLU2d
10
+ from .fused import ConvReLU3d
11
+ from .fused import LinearReLU
12
+ from .fused import BNReLU2d
13
+ from .fused import BNReLU3d
14
+ from .fused import LinearBn1d
15
+ from .fused import LinearLeakyReLU
16
+ from .fused import LinearTanh
17
+ from .fused import ConvAdd2d
18
+ from .fused import ConvAddReLU2d
19
+
20
+ __all__ = [
21
+ 'ConvBn1d',
22
+ 'ConvBn2d',
23
+ 'ConvBn3d',
24
+ 'ConvBnReLU1d',
25
+ 'ConvBnReLU2d',
26
+ 'ConvBnReLU3d',
27
+ 'ConvReLU1d',
28
+ 'ConvReLU2d',
29
+ 'ConvReLU3d',
30
+ 'LinearReLU',
31
+ 'BNReLU2d',
32
+ 'BNReLU3d',
33
+ 'LinearBn1d',
34
+ 'LinearLeakyReLU',
35
+ 'LinearTanh',
36
+ 'ConvAdd2d',
37
+ 'ConvAddReLU2d',
38
+ ]
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (893 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/__pycache__/fused.cpython-310.pyc ADDED
Binary file (8.34 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/modules/fused.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ from torch.nn import Conv1d, Conv2d, Conv3d, ReLU, Linear, BatchNorm1d, BatchNorm2d, BatchNorm3d
4
+ from torch.nn.utils.parametrize import type_before_parametrizations
5
+
6
+ __all__ = ['ConvReLU1d', 'ConvReLU2d', 'ConvReLU3d', 'LinearReLU', 'ConvBn1d', 'ConvBn2d',
7
+ 'ConvBnReLU1d', 'ConvBnReLU2d', 'ConvBn3d', 'ConvBnReLU3d', 'BNReLU2d', 'BNReLU3d',
8
+ 'LinearBn1d', 'LinearLeakyReLU', 'LinearTanh', 'ConvAdd2d', 'ConvAddReLU2d']
9
+
10
+ # Used for identifying intrinsic modules used in quantization
11
+ class _FusedModule(torch.nn.Sequential):
12
+ pass
13
+
14
+ class ConvReLU1d(_FusedModule):
15
+ r"""This is a sequential container which calls the Conv1d and ReLU modules.
16
+ During quantization this will be replaced with the corresponding fused module."""
17
+ def __init__(self, conv, relu):
18
+ assert type_before_parametrizations(conv) == Conv1d and type_before_parametrizations(relu) == ReLU, \
19
+ f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(relu)}'
20
+ super().__init__(conv, relu)
21
+
22
+ class ConvReLU2d(_FusedModule):
23
+ r"""This is a sequential container which calls the Conv2d and ReLU modules.
24
+ During quantization this will be replaced with the corresponding fused module."""
25
+ def __init__(self, conv, relu):
26
+ assert type_before_parametrizations(conv) == Conv2d and type_before_parametrizations(relu) == ReLU, \
27
+ f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(relu)}'
28
+ super().__init__(conv, relu)
29
+
30
+ class ConvReLU3d(_FusedModule):
31
+ r"""This is a sequential container which calls the Conv3d and ReLU modules.
32
+ During quantization this will be replaced with the corresponding fused module."""
33
+ def __init__(self, conv, relu):
34
+ assert type_before_parametrizations(conv) == Conv3d and type_before_parametrizations(relu) == ReLU, \
35
+ f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(relu)}'
36
+ super().__init__(conv, relu)
37
+
38
+ class LinearReLU(_FusedModule):
39
+ r"""This is a sequential container which calls the Linear and ReLU modules.
40
+ During quantization this will be replaced with the corresponding fused module."""
41
+ def __init__(self, linear, relu):
42
+ assert type_before_parametrizations(linear) == Linear and type_before_parametrizations(relu) == ReLU, \
43
+ f'Incorrect types for input modules{type_before_parametrizations(linear)}{type_before_parametrizations(relu)}'
44
+ super().__init__(linear, relu)
45
+
46
+ class ConvBn1d(_FusedModule):
47
+ r"""This is a sequential container which calls the Conv 1d and Batch Norm 1d modules.
48
+ During quantization this will be replaced with the corresponding fused module."""
49
+ def __init__(self, conv, bn):
50
+ assert type_before_parametrizations(conv) == Conv1d and type_before_parametrizations(bn) == BatchNorm1d, \
51
+ f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}'
52
+ super().__init__(conv, bn)
53
+
54
+ class ConvBn2d(_FusedModule):
55
+ r"""This is a sequential container which calls the Conv 2d and Batch Norm 2d modules.
56
+ During quantization this will be replaced with the corresponding fused module."""
57
+ def __init__(self, conv, bn):
58
+ assert type_before_parametrizations(conv) == Conv2d and type_before_parametrizations(bn) == BatchNorm2d, \
59
+ f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}'
60
+ super().__init__(conv, bn)
61
+
62
+ class ConvBnReLU1d(_FusedModule):
63
+ r"""This is a sequential container which calls the Conv 1d, Batch Norm 1d, and ReLU modules.
64
+ During quantization this will be replaced with the corresponding fused module."""
65
+ def __init__(self, conv, bn, relu):
66
+ assert type_before_parametrizations(conv) == Conv1d and type_before_parametrizations(bn) == BatchNorm1d and \
67
+ type_before_parametrizations(relu) == ReLU, f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}{type_before_parametrizations(relu)}' # noqa: B950
68
+ super().__init__(conv, bn, relu)
69
+
70
+ class ConvBnReLU2d(_FusedModule):
71
+ r"""This is a sequential container which calls the Conv 2d, Batch Norm 2d, and ReLU modules.
72
+ During quantization this will be replaced with the corresponding fused module."""
73
+ def __init__(self, conv, bn, relu):
74
+ assert type_before_parametrizations(conv) == Conv2d and type_before_parametrizations(bn) == BatchNorm2d and \
75
+ type_before_parametrizations(relu) == ReLU, f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}{type_before_parametrizations(relu)}' # noqa: B950
76
+ super().__init__(conv, bn, relu)
77
+
78
+ class ConvBn3d(_FusedModule):
79
+ r"""This is a sequential container which calls the Conv 3d and Batch Norm 3d modules.
80
+ During quantization this will be replaced with the corresponding fused module."""
81
+ def __init__(self, conv, bn):
82
+ assert type_before_parametrizations(conv) == Conv3d and type_before_parametrizations(bn) == BatchNorm3d, \
83
+ f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}'
84
+ super().__init__(conv, bn)
85
+
86
+ class ConvBnReLU3d(_FusedModule):
87
+ r"""This is a sequential container which calls the Conv 3d, Batch Norm 3d, and ReLU modules.
88
+ During quantization this will be replaced with the corresponding fused module."""
89
+ def __init__(self, conv, bn, relu):
90
+ assert type_before_parametrizations(conv) == Conv3d and type_before_parametrizations(bn) == BatchNorm3d and \
91
+ type_before_parametrizations(relu) == ReLU, f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}{type_before_parametrizations(relu)}' # noqa: B950
92
+ super().__init__(conv, bn, relu)
93
+
94
+
95
+ class BNReLU2d(_FusedModule):
96
+ r"""This is a sequential container which calls the BatchNorm 2d and ReLU modules.
97
+ During quantization this will be replaced with the corresponding fused module."""
98
+ def __init__(self, batch_norm, relu):
99
+ assert type_before_parametrizations(batch_norm) == BatchNorm2d and type_before_parametrizations(relu) == ReLU, \
100
+ f'Incorrect types for input modules{type_before_parametrizations(batch_norm)}{type_before_parametrizations(relu)}'
101
+ super().__init__(batch_norm, relu)
102
+
103
+ class BNReLU3d(_FusedModule):
104
+ r"""This is a sequential container which calls the BatchNorm 3d and ReLU modules.
105
+ During quantization this will be replaced with the corresponding fused module."""
106
+ def __init__(self, batch_norm, relu):
107
+ assert type_before_parametrizations(batch_norm) == BatchNorm3d and type_before_parametrizations(relu) == ReLU, \
108
+ f'Incorrect types for input modules{type_before_parametrizations(batch_norm)}{type_before_parametrizations(relu)}'
109
+ super().__init__(batch_norm, relu)
110
+
111
+
112
+ class LinearBn1d(_FusedModule):
113
+ r"""This is a sequential container which calls the Linear and BatchNorm1d modules.
114
+ During quantization this will be replaced with the corresponding fused module."""
115
+ def __init__(self, linear, bn):
116
+ assert type_before_parametrizations(linear) == Linear and type_before_parametrizations(bn) == BatchNorm1d, \
117
+ f'Incorrect types for input modules{type_before_parametrizations(linear)}{type_before_parametrizations(bn)}'
118
+ super().__init__(linear, bn)
119
+
120
+ class LinearLeakyReLU(_FusedModule):
121
+ r"""This is a sequential container which calls the Linear and LeakyReLU modules.
122
+ During quantization this will be replaced with the corresponding fused module."""
123
+ def __init__(self, linear, leaky_relu):
124
+ assert type(linear) == Linear and type(leaky_relu) == torch.nn.LeakyReLU, \
125
+ f'Incorrect types for input modules{type(linear)}{type(leaky_relu)}'
126
+ super().__init__(linear, leaky_relu)
127
+
128
+ class LinearTanh(_FusedModule):
129
+ r"""This is a sequential container which calls the Linear and Tanh modules.
130
+ During quantization this will be replaced with the corresponding fused module."""
131
+ def __init__(self, linear, tanh):
132
+ assert type(linear) == Linear and type(tanh) == torch.nn.Tanh, \
133
+ f'Incorrect types for input modules{type(linear)}{type(tanh)}'
134
+ super().__init__(linear, tanh)
135
+
136
+ class ConvAdd2d(_FusedModule):
137
+ r"""This is a sequential container which calls the Conv2d modules with extra Add.
138
+ During quantization this will be replaced with the corresponding fused module."""
139
+ def __init__(self, conv, add):
140
+ super().__init__(conv)
141
+ self.add = add
142
+
143
+ def forward(self, x1, x2):
144
+ return self.add(self[0](x1), x2)
145
+
146
+ class ConvAddReLU2d(_FusedModule):
147
+ r"""This is a sequential container which calls the Conv2d, add, Relu.
148
+ During quantization this will be replaced with the corresponding fused module."""
149
+ def __init__(self, conv, add, relu):
150
+ super().__init__(conv)
151
+ self.add = add
152
+ self.relu = relu
153
+
154
+ def forward(self, x1, x2):
155
+ return self.relu(self.add(self[0](x1), x2))
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modules import * # noqa: F403
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (201 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__init__.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .linear_relu import LinearReLU
2
+ from .linear_fused import LinearBn1d
3
+ from .conv_fused import (
4
+ ConvBn1d,
5
+ ConvBn2d,
6
+ ConvBn3d,
7
+ ConvBnReLU1d,
8
+ ConvBnReLU2d,
9
+ ConvBnReLU3d,
10
+ ConvReLU1d,
11
+ ConvReLU2d,
12
+ ConvReLU3d,
13
+ update_bn_stats,
14
+ freeze_bn_stats,
15
+ )
16
+
17
+ __all__ = [
18
+ "LinearReLU",
19
+ "LinearBn1d",
20
+ "ConvReLU1d",
21
+ "ConvReLU2d",
22
+ "ConvReLU3d",
23
+ "ConvBn1d",
24
+ "ConvBn2d",
25
+ "ConvBn3d",
26
+ "ConvBnReLU1d",
27
+ "ConvBnReLU2d",
28
+ "ConvBnReLU3d",
29
+ "update_bn_stats",
30
+ "freeze_bn_stats",
31
+ ]
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (629 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/conv_fused.cpython-310.pyc ADDED
Binary file (19.3 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/linear_fused.cpython-310.pyc ADDED
Binary file (4.96 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/__pycache__/linear_relu.cpython-310.pyc ADDED
Binary file (2.2 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/conv_fused.py ADDED
@@ -0,0 +1,826 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import math
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.ao.nn.intrinsic as nni
6
+ import torch.ao.nn.qat as nnqat
7
+ import torch.nn.functional as F
8
+ from torch.nn import init
9
+ from torch.nn.utils import fuse_conv_bn_weights
10
+ from torch.nn.modules.utils import _single, _pair, _triple
11
+ from torch.nn.parameter import Parameter
12
+ from typing import TypeVar
13
+
14
+ __all__ = ['ConvBn1d', 'ConvBnReLU1d', 'ConvReLU1d', 'ConvBn2d', 'ConvBnReLU2d', 'ConvReLU2d', 'ConvBn3d',
15
+ 'ConvBnReLU3d', 'ConvReLU3d', 'update_bn_stats', 'freeze_bn_stats']
16
+ _BN_CLASS_MAP = {
17
+ 1: nn.BatchNorm1d,
18
+ 2: nn.BatchNorm2d,
19
+ 3: nn.BatchNorm3d,
20
+ }
21
+
22
+
23
+ MOD = TypeVar('MOD', bound=nn.modules.conv._ConvNd)
24
+
25
+
26
+ class _ConvBnNd(nn.modules.conv._ConvNd, nni._FusedModule):
27
+
28
+ _version = 2
29
+ _FLOAT_MODULE = MOD
30
+
31
+ def __init__(self,
32
+ # ConvNd args
33
+ in_channels, out_channels, kernel_size, stride,
34
+ padding, dilation, transposed, output_padding,
35
+ groups,
36
+ bias,
37
+ padding_mode,
38
+ # BatchNormNd args
39
+ # num_features: out_channels
40
+ eps=1e-05, momentum=0.1,
41
+ # affine: True
42
+ # track_running_stats: True
43
+ # Args for this module
44
+ freeze_bn=False,
45
+ qconfig=None,
46
+ dim=2):
47
+ nn.modules.conv._ConvNd.__init__(self, in_channels, out_channels, kernel_size,
48
+ stride, padding, dilation, transposed,
49
+ output_padding, groups, False, padding_mode)
50
+ assert qconfig, 'qconfig must be provided for QAT module'
51
+ self.qconfig = qconfig
52
+ self.freeze_bn = freeze_bn if self.training else True
53
+ self.bn = _BN_CLASS_MAP[dim](out_channels, eps, momentum, True, True)
54
+ self.weight_fake_quant = self.qconfig.weight()
55
+ if bias:
56
+ self.bias = Parameter(torch.empty(out_channels))
57
+ else:
58
+ self.register_parameter('bias', None)
59
+ self.reset_bn_parameters()
60
+
61
+ # this needs to be called after reset_bn_parameters,
62
+ # as they modify the same state
63
+ if self.training:
64
+ if freeze_bn:
65
+ self.freeze_bn_stats()
66
+ else:
67
+ self.update_bn_stats()
68
+ else:
69
+ self.freeze_bn_stats()
70
+
71
+ self._enable_slow_path_for_better_numerical_stability = False
72
+
73
+ def reset_running_stats(self):
74
+ self.bn.reset_running_stats()
75
+
76
+ def reset_bn_parameters(self):
77
+ self.bn.reset_running_stats()
78
+ init.uniform_(self.bn.weight)
79
+ init.zeros_(self.bn.bias)
80
+ # note: below is actually for conv, not BN
81
+ if self.bias is not None:
82
+ fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
83
+ bound = 1 / math.sqrt(fan_in)
84
+ init.uniform_(self.bias, -bound, bound)
85
+
86
+ def reset_parameters(self):
87
+ super().reset_parameters()
88
+
89
+ def update_bn_stats(self):
90
+ self.freeze_bn = False
91
+ self.bn.training = True
92
+ return self
93
+
94
+ def freeze_bn_stats(self):
95
+ self.freeze_bn = True
96
+ self.bn.training = False
97
+ return self
98
+
99
+ def _forward(self, input):
100
+ if self._enable_slow_path_for_better_numerical_stability:
101
+ return self._forward_slow(input)
102
+ return self._forward_approximate(input)
103
+
104
+ def _forward_approximate(self, input):
105
+ """Approximated method to fuse conv and bn. It requires only one forward pass.
106
+ conv_orig = conv / scale_factor where scale_factor = bn.weight / running_std
107
+ """
108
+ assert self.bn.running_var is not None
109
+ running_std = torch.sqrt(self.bn.running_var + self.bn.eps)
110
+ scale_factor = self.bn.weight / running_std
111
+ weight_shape = [1] * len(self.weight.shape)
112
+ weight_shape[0] = -1
113
+ bias_shape = [1] * len(self.weight.shape)
114
+ bias_shape[1] = -1
115
+ scaled_weight = self.weight_fake_quant(self.weight * scale_factor.reshape(weight_shape))
116
+ # using zero bias here since the bias for original conv
117
+ # will be added later
118
+ if self.bias is not None:
119
+ zero_bias = torch.zeros_like(self.bias, dtype=input.dtype)
120
+ else:
121
+ zero_bias = torch.zeros(self.out_channels, device=scaled_weight.device, dtype=input.dtype)
122
+ conv = self._conv_forward(input, scaled_weight, zero_bias)
123
+ conv_orig = conv / scale_factor.reshape(bias_shape)
124
+ if self.bias is not None:
125
+ conv_orig = conv_orig + self.bias.reshape(bias_shape)
126
+ conv = self.bn(conv_orig)
127
+ return conv
128
+
129
+ def _forward_slow(self, input):
130
+ """
131
+ A more accurate but slow method to compute conv bn fusion, following https://arxiv.org/pdf/1806.08342.pdf
132
+ It requires two forward passes but handles the case bn.weight == 0
133
+
134
+ Conv: Y = WX + B_c
135
+ Conv without bias: Y0 = WX = Y - B_c, Y = Y0 + B_c
136
+
137
+ Batch statistics:
138
+ mean_Y = Y.mean()
139
+ = Y0.mean() + B_c
140
+ var_Y = (Y - mean_Y)^2.mean()
141
+ = (Y0 - Y0.mean())^2.mean()
142
+ BN (r: bn.weight, beta: bn.bias):
143
+ Z = r * (Y - mean_Y) / sqrt(var_Y + eps) + beta
144
+ = r * (Y0 - Y0.mean()) / sqrt(var_Y + eps) + beta
145
+
146
+ Fused Conv BN training (std_Y = sqrt(var_Y + eps)):
147
+ Z = (r * W / std_Y) * X + r * (B_c - mean_Y) / std_Y + beta
148
+ = (r * W / std_Y) * X - r * Y0.mean() / std_Y + beta
149
+
150
+ Fused Conv BN inference (running_std = sqrt(running_var + eps)):
151
+ Z = (r * W / running_std) * X - r * (running_mean - B_c) / running_std + beta
152
+
153
+ QAT with fused conv bn:
154
+ Z_train = fake_quant(r * W / running_std) * X * (running_std / std_Y) - r * Y0.mean() / std_Y + beta
155
+ = conv(X, fake_quant(r * W / running_std)) * (running_std / std_Y) - r * Y0.mean() / std_Y + beta
156
+ Z_inference = conv(X, fake_quant(r * W / running_std)) - r * (running_mean - B_c) / running_std + beta
157
+ """
158
+
159
+ assert self.bn.running_var is not None
160
+ assert self.bn.running_mean is not None
161
+
162
+ # using zero bias here since the bias for original conv
163
+ # will be added later
164
+ zero_bias = torch.zeros(self.out_channels, device=self.weight.device, dtype=input.dtype)
165
+
166
+ weight_shape = [1] * len(self.weight.shape)
167
+ weight_shape[0] = -1
168
+ bias_shape = [1] * len(self.weight.shape)
169
+ bias_shape[1] = -1
170
+
171
+ if self.bn.training:
172
+ # needed to compute batch mean/std
173
+ conv_out = self._conv_forward(input, self.weight, zero_bias)
174
+ # update bn statistics
175
+ with torch.no_grad():
176
+ conv_out_bias = (
177
+ conv_out if self.bias is None else conv_out + self.bias.reshape(bias_shape)
178
+ )
179
+ self.bn(conv_out_bias)
180
+
181
+ # fused conv + bn without bias using bn running statistics
182
+ running_std = torch.sqrt(self.bn.running_var + self.bn.eps)
183
+ scale_factor = self.bn.weight / running_std
184
+ scaled_weight = self.weight_fake_quant(
185
+ self.weight * scale_factor.reshape(weight_shape)
186
+ )
187
+ # fused conv without bias for inference: (r * W / running_std) * X
188
+ conv_bn = self._conv_forward(input, scaled_weight, zero_bias)
189
+
190
+ if self.bn.training:
191
+ avg_dims = [0] + list(range(2, len(self.weight.shape)))
192
+ batch_mean = conv_out.mean(avg_dims) # type: ignore[possibly-undefined]
193
+ batch_var = torch.square(conv_out - batch_mean.reshape(bias_shape)).mean(
194
+ avg_dims
195
+ )
196
+ batch_std = torch.sqrt(batch_var + self.bn.eps)
197
+
198
+ # scale to use batch std in training mode
199
+ # conv(X, r * W / std_Y) = conv(X, r * W / running_std) * (running_std / std_Y)
200
+ unscale_factor = running_std / batch_std
201
+ conv_bn *= unscale_factor.reshape(bias_shape)
202
+
203
+ fused_mean = batch_mean
204
+ fused_std = batch_std
205
+ else:
206
+ fused_mean = self.bn.running_mean - (self.bias if self.bias is not None else 0)
207
+ fused_std = running_std
208
+
209
+ # fused bias = beta - r * mean / std
210
+ fused_bias = self.bn.bias - self.bn.weight * fused_mean / fused_std
211
+ conv_bn += fused_bias.reshape(bias_shape)
212
+
213
+ # HACK to let conv bias participate in loss to avoid DDP error (parameters
214
+ # were not used in producing loss)
215
+ if self.bias is not None:
216
+ conv_bn += (self.bias - self.bias).reshape(bias_shape)
217
+
218
+ return conv_bn
219
+
220
+ def extra_repr(self):
221
+ # TODO(jerryzh): extend
222
+ return super().extra_repr()
223
+
224
+ def forward(self, input):
225
+ return self._forward(input)
226
+
227
+ def train(self, mode=True):
228
+ """
229
+ Batchnorm's training behavior is using the self.training flag. Prevent
230
+ changing it if BN is frozen. This makes sure that calling `model.train()`
231
+ on a model with a frozen BN will behave properly.
232
+ """
233
+ self.training = mode
234
+ if not self.freeze_bn:
235
+ for module in self.children():
236
+ module.train(mode)
237
+ return self
238
+
239
+ # ===== Serialization version history =====
240
+ #
241
+ # Version 1/None
242
+ # self
243
+ # |--- weight : Tensor
244
+ # |--- bias : Tensor
245
+ # |--- gamma : Tensor
246
+ # |--- beta : Tensor
247
+ # |--- running_mean : Tensor
248
+ # |--- running_var : Tensor
249
+ # |--- num_batches_tracked : Tensor
250
+ #
251
+ # Version 2
252
+ # self
253
+ # |--- weight : Tensor
254
+ # |--- bias : Tensor
255
+ # |--- bn : Module
256
+ # |--- weight : Tensor (moved from v1.self.gamma)
257
+ # |--- bias : Tensor (moved from v1.self.beta)
258
+ # |--- running_mean : Tensor (moved from v1.self.running_mean)
259
+ # |--- running_var : Tensor (moved from v1.self.running_var)
260
+ # |--- num_batches_tracked : Tensor (moved from v1.self.num_batches_tracked)
261
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
262
+ version = local_metadata.get('version', None)
263
+ if version is None or version == 1:
264
+ # BN related parameters and buffers were moved into the BN module for v2
265
+ v2_to_v1_names = {
266
+ 'bn.weight': 'gamma',
267
+ 'bn.bias': 'beta',
268
+ 'bn.running_mean': 'running_mean',
269
+ 'bn.running_var': 'running_var',
270
+ 'bn.num_batches_tracked': 'num_batches_tracked',
271
+ }
272
+ for v2_name, v1_name in v2_to_v1_names.items():
273
+ if prefix + v1_name in state_dict:
274
+ state_dict[prefix + v2_name] = state_dict[prefix + v1_name]
275
+ state_dict.pop(prefix + v1_name)
276
+ elif prefix + v2_name in state_dict:
277
+ # there was a brief period where forward compatibility
278
+ # for this module was broken (between
279
+ # https://github.com/pytorch/pytorch/pull/38478
280
+ # and https://github.com/pytorch/pytorch/pull/38820)
281
+ # and modules emitted the v2 state_dict format while
282
+ # specifying that version == 1. This patches the forward
283
+ # compatibility issue by allowing the v2 style entries to
284
+ # be used.
285
+ pass
286
+ elif strict:
287
+ missing_keys.append(prefix + v2_name)
288
+
289
+ super()._load_from_state_dict(state_dict, prefix, local_metadata, strict,
290
+ missing_keys, unexpected_keys, error_msgs)
291
+
292
+ @classmethod
293
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
294
+ r"""Create a qat module from a float module or qparams_dict
295
+
296
+ Args: `mod` a float module, either produced by torch.ao.quantization utilities
297
+ or directly from user
298
+ """
299
+ # The ignore is because _FLOAT_MODULE is a TypeVar here where the bound
300
+ # has no __name__ (code is fine though)
301
+ assert type(mod) == cls._FLOAT_MODULE, 'qat.' + cls.__name__ + '.from_float only works for ' + \
302
+ cls._FLOAT_MODULE.__name__ # type: ignore[attr-defined]
303
+ assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
304
+ assert mod.qconfig, 'Input float module must have a valid qconfig'
305
+ qconfig = mod.qconfig
306
+ conv, bn = mod[0], mod[1]
307
+ qat_convbn = cls(conv.in_channels, conv.out_channels, conv.kernel_size,
308
+ conv.stride, conv.padding, conv.dilation,
309
+ conv.groups, conv.bias is not None,
310
+ conv.padding_mode,
311
+ bn.eps, bn.momentum,
312
+ False,
313
+ qconfig)
314
+ qat_convbn.weight = conv.weight
315
+ qat_convbn.bias = conv.bias
316
+ qat_convbn.bn.weight = bn.weight
317
+ qat_convbn.bn.bias = bn.bias
318
+ qat_convbn.bn.running_mean = bn.running_mean
319
+ qat_convbn.bn.running_var = bn.running_var
320
+ # mypy error: Cannot determine type of 'num_batches_tracked'
321
+ qat_convbn.bn.num_batches_tracked = bn.num_batches_tracked # type: ignore[has-type]
322
+ return qat_convbn
323
+
324
+ def to_float(self):
325
+ cls = type(self)
326
+ conv = cls._FLOAT_CONV_MODULE( # type: ignore[attr-defined]
327
+ self.in_channels,
328
+ self.out_channels,
329
+ self.kernel_size,
330
+ self.stride,
331
+ self.padding,
332
+ self.dilation,
333
+ self.groups,
334
+ self.bias is not None,
335
+ self.padding_mode)
336
+ conv.weight = torch.nn.Parameter(self.weight.detach())
337
+ if self.bias is not None:
338
+ conv.bias = torch.nn.Parameter(self.bias.detach())
339
+
340
+ if cls._FLOAT_BN_MODULE: # type: ignore[attr-defined]
341
+ # fuse bn into conv
342
+ assert self.bn.running_var is not None and self.bn.running_mean is not None
343
+ conv.weight, conv.bias = fuse_conv_bn_weights(
344
+ conv.weight,
345
+ conv.bias,
346
+ self.bn.running_mean,
347
+ self.bn.running_var,
348
+ self.bn.eps,
349
+ self.bn.weight,
350
+ self.bn.bias
351
+ )
352
+
353
+ if cls._FLOAT_RELU_MODULE: # type: ignore[attr-defined]
354
+ modules = []
355
+ modules.append(conv)
356
+ relu = cls._FLOAT_RELU_MODULE() # type: ignore[attr-defined]
357
+ modules.append(relu)
358
+ conv_relu = cls._FUSED_FLOAT_MODULE(*modules) # type: ignore[attr-defined]
359
+ conv_relu.train(self.training)
360
+ return conv_relu
361
+ else:
362
+ conv.train(self.training)
363
+ return conv
364
+
365
+ class ConvBn1d(_ConvBnNd, nn.Conv1d):
366
+ r"""
367
+ A ConvBn1d module is a module fused from Conv1d and BatchNorm1d,
368
+ attached with FakeQuantize modules for weight,
369
+ used in quantization aware training.
370
+
371
+ We combined the interface of :class:`torch.nn.Conv1d` and
372
+ :class:`torch.nn.BatchNorm1d`.
373
+
374
+ Similar to :class:`torch.nn.Conv1d`, with FakeQuantize modules initialized
375
+ to default.
376
+
377
+ Attributes:
378
+ freeze_bn:
379
+ weight_fake_quant: fake quant module for weight
380
+
381
+ """
382
+ _FLOAT_BN_MODULE = nn.BatchNorm1d
383
+ _FLOAT_RELU_MODULE: None = None
384
+ _FLOAT_MODULE = nni.ConvBn1d
385
+ _FLOAT_CONV_MODULE = nn.Conv1d
386
+
387
+ def __init__(self,
388
+ # Conv1d args
389
+ in_channels, out_channels, kernel_size, stride=1,
390
+ padding=0, dilation=1, groups=1,
391
+ bias=None,
392
+ padding_mode='zeros',
393
+ # BatchNorm1d args
394
+ # num_features: out_channels
395
+ eps=1e-05, momentum=0.1,
396
+ # affine: True
397
+ # track_running_stats: True
398
+ # Args for this module
399
+ freeze_bn=False,
400
+ qconfig=None):
401
+ kernel_size = _single(kernel_size)
402
+ stride = _single(stride)
403
+ padding = _single(padding)
404
+ dilation = _single(dilation)
405
+ _ConvBnNd.__init__(self, in_channels, out_channels, kernel_size, stride,
406
+ padding, dilation, False, _single(0), groups, bias, padding_mode,
407
+ eps, momentum, freeze_bn, qconfig, dim=1)
408
+
409
+ class ConvBnReLU1d(ConvBn1d):
410
+ r"""
411
+ A ConvBnReLU1d module is a module fused from Conv1d, BatchNorm1d and ReLU,
412
+ attached with FakeQuantize modules for weight,
413
+ used in quantization aware training.
414
+
415
+ We combined the interface of :class:`torch.nn.Conv1d` and
416
+ :class:`torch.nn.BatchNorm1d` and :class:`torch.nn.ReLU`.
417
+
418
+ Similar to `torch.nn.Conv1d`, with FakeQuantize modules initialized to
419
+ default.
420
+
421
+ Attributes:
422
+ weight_fake_quant: fake quant module for weight
423
+
424
+ """
425
+ # base class defines _FLOAT_MODULE as "ConvBn1d"
426
+ _FLOAT_MODULE = nni.ConvBnReLU1d # type: ignore[assignment]
427
+ _FLOAT_CONV_MODULE = nn.Conv1d
428
+ _FLOAT_BN_MODULE = nn.BatchNorm1d
429
+ _FLOAT_RELU_MODULE = nn.ReLU # type: ignore[assignment]
430
+ # module class after fusing bn into conv
431
+ _FUSED_FLOAT_MODULE = nni.ConvReLU1d
432
+
433
+ def __init__(self,
434
+ # Conv1d args
435
+ in_channels, out_channels, kernel_size, stride=1,
436
+ padding=0, dilation=1, groups=1,
437
+ bias=None,
438
+ padding_mode='zeros',
439
+ # BatchNorm1d args
440
+ # num_features: out_channels
441
+ eps=1e-05, momentum=0.1,
442
+ # affine: True
443
+ # track_running_stats: True
444
+ # Args for this module
445
+ freeze_bn=False,
446
+ qconfig=None):
447
+ super().__init__(in_channels, out_channels, kernel_size, stride,
448
+ padding, dilation, groups, bias,
449
+ padding_mode, eps, momentum,
450
+ freeze_bn,
451
+ qconfig)
452
+
453
+ def forward(self, input):
454
+ return F.relu(ConvBn1d._forward(self, input))
455
+
456
+ @classmethod
457
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
458
+ return super().from_float(mod, use_precomputed_fake_quant)
459
+
460
+ class ConvReLU1d(nnqat.Conv1d, nni._FusedModule):
461
+ r"""A ConvReLU1d module is a fused module of Conv1d and ReLU, attached with
462
+ FakeQuantize modules for weight for
463
+ quantization aware training.
464
+
465
+ We combined the interface of :class:`~torch.nn.Conv1d` and
466
+ :class:`~torch.nn.BatchNorm1d`.
467
+
468
+ Attributes:
469
+ weight_fake_quant: fake quant module for weight
470
+
471
+ """
472
+ _FLOAT_MODULE = nni.ConvReLU1d # type: ignore[assignment]
473
+ _FLOAT_CONV_MODULE = nn.Conv1d
474
+ _FLOAT_BN_MODULE: None = None
475
+ _FLOAT_RELU_MODULE = nn.ReLU
476
+
477
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
478
+ padding=0, dilation=1, groups=1,
479
+ bias=True, padding_mode='zeros',
480
+ qconfig=None):
481
+ super().__init__(in_channels, out_channels, kernel_size,
482
+ stride=stride, padding=padding, dilation=dilation,
483
+ groups=groups, bias=bias, padding_mode=padding_mode,
484
+ qconfig=qconfig)
485
+ assert qconfig, 'qconfig must be provided for QAT module'
486
+ self.qconfig = qconfig
487
+ self.weight_fake_quant = self.qconfig.weight()
488
+
489
+ def forward(self, input):
490
+ return F.relu(
491
+ self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias))
492
+
493
+ @classmethod
494
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
495
+ return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)
496
+
497
+ class ConvBn2d(_ConvBnNd, nn.Conv2d):
498
+ r"""
499
+ A ConvBn2d module is a module fused from Conv2d and BatchNorm2d,
500
+ attached with FakeQuantize modules for weight,
501
+ used in quantization aware training.
502
+
503
+ We combined the interface of :class:`torch.nn.Conv2d` and
504
+ :class:`torch.nn.BatchNorm2d`.
505
+
506
+ Similar to :class:`torch.nn.Conv2d`, with FakeQuantize modules initialized
507
+ to default.
508
+
509
+ Attributes:
510
+ freeze_bn:
511
+ weight_fake_quant: fake quant module for weight
512
+
513
+ """
514
+ _FLOAT_MODULE = nni.ConvBn2d
515
+ _FLOAT_CONV_MODULE = nn.Conv2d
516
+ _FLOAT_BN_MODULE = nn.BatchNorm2d
517
+ _FLOAT_RELU_MODULE: None = None
518
+
519
+ def __init__(self,
520
+ # ConvNd args
521
+ in_channels, out_channels, kernel_size, stride=1,
522
+ padding=0, dilation=1, groups=1,
523
+ bias=None,
524
+ padding_mode='zeros',
525
+ # BatchNorm2d args
526
+ # num_features: out_channels
527
+ eps=1e-05, momentum=0.1,
528
+ # affine: True
529
+ # track_running_stats: True
530
+ # Args for this module
531
+ freeze_bn=False,
532
+ qconfig=None):
533
+ kernel_size = _pair(kernel_size)
534
+ stride = _pair(stride)
535
+ padding = _pair(padding)
536
+ dilation = _pair(dilation)
537
+ _ConvBnNd.__init__(self, in_channels, out_channels, kernel_size, stride,
538
+ padding, dilation, False, _pair(0), groups, bias, padding_mode,
539
+ eps, momentum, freeze_bn, qconfig, dim=2)
540
+
541
+ class ConvBnReLU2d(ConvBn2d):
542
+ r"""
543
+ A ConvBnReLU2d module is a module fused from Conv2d, BatchNorm2d and ReLU,
544
+ attached with FakeQuantize modules for weight,
545
+ used in quantization aware training.
546
+
547
+ We combined the interface of :class:`torch.nn.Conv2d` and
548
+ :class:`torch.nn.BatchNorm2d` and :class:`torch.nn.ReLU`.
549
+
550
+ Similar to `torch.nn.Conv2d`, with FakeQuantize modules initialized to
551
+ default.
552
+
553
+ Attributes:
554
+ weight_fake_quant: fake quant module for weight
555
+
556
+ """
557
+ # base class defines _FLOAT_MODULE as "ConvBn2d"
558
+ _FLOAT_MODULE = nni.ConvBnReLU2d # type: ignore[assignment]
559
+ _FLOAT_CONV_MODULE = nn.Conv2d
560
+ _FLOAT_BN_MODULE = nn.BatchNorm2d
561
+ _FLOAT_RELU_MODULE = nn.ReLU # type: ignore[assignment]
562
+ # module class after fusing bn into conv
563
+ _FUSED_FLOAT_MODULE = nni.ConvReLU2d
564
+
565
+ def __init__(self,
566
+ # Conv2d args
567
+ in_channels, out_channels, kernel_size, stride=1,
568
+ padding=0, dilation=1, groups=1,
569
+ bias=None,
570
+ padding_mode='zeros',
571
+ # BatchNorm2d args
572
+ # num_features: out_channels
573
+ eps=1e-05, momentum=0.1,
574
+ # affine: True
575
+ # track_running_stats: True
576
+ # Args for this module
577
+ freeze_bn=False,
578
+ qconfig=None):
579
+ super().__init__(in_channels, out_channels, kernel_size, stride,
580
+ padding, dilation, groups, bias,
581
+ padding_mode, eps, momentum,
582
+ freeze_bn,
583
+ qconfig)
584
+
585
+ def forward(self, input):
586
+ return F.relu(ConvBn2d._forward(self, input))
587
+
588
+ @classmethod
589
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
590
+ return super().from_float(mod, use_precomputed_fake_quant)
591
+
592
+ class ConvReLU2d(nnqat.Conv2d, nni._FusedModule):
593
+ r"""A ConvReLU2d module is a fused module of Conv2d and ReLU, attached with
594
+ FakeQuantize modules for weight for
595
+ quantization aware training.
596
+
597
+ We combined the interface of :class:`~torch.nn.Conv2d` and
598
+ :class:`~torch.nn.BatchNorm2d`.
599
+
600
+ Attributes:
601
+ weight_fake_quant: fake quant module for weight
602
+
603
+ """
604
+ _FLOAT_MODULE = nni.ConvReLU2d # type: ignore[assignment]
605
+ _FLOAT_CONV_MODULE = nn.Conv2d
606
+ _FLOAT_BN_MODULE: None = None
607
+ _FLOAT_RELU_MODULE = nn.ReLU
608
+
609
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
610
+ padding=0, dilation=1, groups=1,
611
+ bias=True, padding_mode='zeros',
612
+ qconfig=None):
613
+ super().__init__(in_channels, out_channels, kernel_size,
614
+ stride=stride, padding=padding, dilation=dilation,
615
+ groups=groups, bias=bias, padding_mode=padding_mode,
616
+ qconfig=qconfig)
617
+ assert qconfig, 'qconfig must be provided for QAT module'
618
+ self.qconfig = qconfig
619
+ self.weight_fake_quant = self.qconfig.weight()
620
+
621
+ def forward(self, input):
622
+ return F.relu(
623
+ self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias))
624
+
625
+ @classmethod
626
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
627
+ return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)
628
+
629
+ class ConvBn3d(_ConvBnNd, nn.Conv3d):
630
+ r"""
631
+ A ConvBn3d module is a module fused from Conv3d and BatchNorm3d,
632
+ attached with FakeQuantize modules for weight,
633
+ used in quantization aware training.
634
+
635
+ We combined the interface of :class:`torch.nn.Conv3d` and
636
+ :class:`torch.nn.BatchNorm3d`.
637
+
638
+ Similar to :class:`torch.nn.Conv3d`, with FakeQuantize modules initialized
639
+ to default.
640
+
641
+ Attributes:
642
+ freeze_bn:
643
+ weight_fake_quant: fake quant module for weight
644
+
645
+ """
646
+ _FLOAT_MODULE = nni.ConvBn3d
647
+ _FLOAT_CONV_MODULE = nn.Conv3d
648
+ _FLOAT_BN_MODULE = nn.BatchNorm3d
649
+ _FLOAT_RELU_MODULE: None = None
650
+
651
+ def __init__(
652
+ self,
653
+ # ConvNd args
654
+ in_channels,
655
+ out_channels,
656
+ kernel_size,
657
+ stride=1,
658
+ padding=0,
659
+ dilation=1,
660
+ groups=1,
661
+ bias=None,
662
+ padding_mode="zeros",
663
+ # BatchNorm3d args
664
+ # num_features: out_channels
665
+ eps=1e-05,
666
+ momentum=0.1,
667
+ # affine: True
668
+ # track_running_stats: True
669
+ # Args for this module
670
+ freeze_bn=False,
671
+ qconfig=None,
672
+ ):
673
+ kernel_size = _triple(kernel_size)
674
+ stride = _triple(stride)
675
+ padding = _triple(padding)
676
+ dilation = _triple(dilation)
677
+ _ConvBnNd.__init__(
678
+ self,
679
+ in_channels,
680
+ out_channels,
681
+ kernel_size,
682
+ stride,
683
+ padding,
684
+ dilation,
685
+ False,
686
+ _triple(0),
687
+ groups,
688
+ bias,
689
+ padding_mode,
690
+ eps,
691
+ momentum,
692
+ freeze_bn,
693
+ qconfig,
694
+ dim=3,
695
+ )
696
+
697
+ class ConvBnReLU3d(ConvBn3d):
698
+ r"""
699
+ A ConvBnReLU3d module is a module fused from Conv3d, BatchNorm3d and ReLU,
700
+ attached with FakeQuantize modules for weight,
701
+ used in quantization aware training.
702
+
703
+ We combined the interface of :class:`torch.nn.Conv3d` and
704
+ :class:`torch.nn.BatchNorm3d` and :class:`torch.nn.ReLU`.
705
+
706
+ Similar to `torch.nn.Conv3d`, with FakeQuantize modules initialized to
707
+ default.
708
+
709
+ Attributes:
710
+ weight_fake_quant: fake quant module for weight
711
+
712
+ """
713
+ _FLOAT_MODULE = nni.ConvBnReLU3d # type: ignore[assignment]
714
+ _FLOAT_CONV_MODULE = nn.Conv3d
715
+ _FLOAT_BN_MODULE = nn.BatchNorm3d
716
+ _FLOAT_RELU_MODULE = nn.ReLU # type: ignore[assignment]
717
+ # module class after fusing bn into conv
718
+ _FUSED_FLOAT_MODULE = nni.ConvReLU3d
719
+
720
+ def __init__(
721
+ self,
722
+ # Conv3d args
723
+ in_channels,
724
+ out_channels,
725
+ kernel_size,
726
+ stride=1,
727
+ padding=0,
728
+ dilation=1,
729
+ groups=1,
730
+ bias=None,
731
+ padding_mode="zeros",
732
+ # BatchNorm3d args
733
+ # num_features: out_channels
734
+ eps=1e-05,
735
+ momentum=0.1,
736
+ # affine: True
737
+ # track_running_stats: True
738
+ # Args for this module
739
+ freeze_bn=False,
740
+ qconfig=None,
741
+ ):
742
+ super().__init__(
743
+ in_channels,
744
+ out_channels,
745
+ kernel_size,
746
+ stride,
747
+ padding,
748
+ dilation,
749
+ groups,
750
+ bias,
751
+ padding_mode,
752
+ eps,
753
+ momentum,
754
+ freeze_bn,
755
+ qconfig,
756
+ )
757
+
758
+ def forward(self, input):
759
+ return F.relu(ConvBn3d._forward(self, input))
760
+
761
+ @classmethod
762
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
763
+ return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)
764
+
765
+ class ConvReLU3d(nnqat.Conv3d, nni._FusedModule):
766
+ r"""A ConvReLU3d module is a fused module of Conv3d and ReLU, attached with
767
+ FakeQuantize modules for weight for
768
+ quantization aware training.
769
+
770
+ We combined the interface of :class:`~torch.nn.Conv3d` and
771
+ :class:`~torch.nn.BatchNorm3d`.
772
+
773
+ Attributes:
774
+ weight_fake_quant: fake quant module for weight
775
+
776
+ """
777
+ _FLOAT_MODULE = nni.ConvReLU3d # type: ignore[assignment]
778
+ _FLOAT_CONV_MODULE = nn.Conv3d
779
+ _FLOAT_BN_MODULE: None = None
780
+ _FLOAT_RELU_MODULE = nn.ReLU
781
+
782
+ def __init__(
783
+ self,
784
+ in_channels,
785
+ out_channels,
786
+ kernel_size,
787
+ stride=1,
788
+ padding=0,
789
+ dilation=1,
790
+ groups=1,
791
+ bias=True,
792
+ padding_mode="zeros",
793
+ qconfig=None,
794
+ ):
795
+ super().__init__(
796
+ in_channels,
797
+ out_channels,
798
+ kernel_size,
799
+ stride=stride,
800
+ padding=padding,
801
+ dilation=dilation,
802
+ groups=groups,
803
+ bias=bias,
804
+ padding_mode=padding_mode,
805
+ qconfig=qconfig,
806
+ )
807
+ assert qconfig, "qconfig must be provided for QAT module"
808
+ self.qconfig = qconfig
809
+ self.weight_fake_quant = self.qconfig.weight()
810
+
811
+ def forward(self, input):
812
+ return F.relu(
813
+ self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias)
814
+ )
815
+
816
+ @classmethod
817
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
818
+ return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)
819
+
820
+ def update_bn_stats(mod):
821
+ if type(mod) in {ConvBnReLU1d, ConvBnReLU2d, ConvBnReLU3d, ConvBn1d, ConvBn2d, ConvBn3d}:
822
+ mod.update_bn_stats()
823
+
824
+ def freeze_bn_stats(mod):
825
+ if type(mod) in {ConvBnReLU1d, ConvBnReLU2d, ConvBnReLU3d, ConvBn1d, ConvBn2d, ConvBn3d}:
826
+ mod.freeze_bn_stats()
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/linear_fused.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.ao.nn.intrinsic as nni
5
+ import torch.nn.functional as F
6
+ from torch.nn import init
7
+ from torch.nn.parameter import Parameter
8
+ from torch.nn.utils.fusion import fuse_linear_bn_weights
9
+
10
+ __all__ = [
11
+ "LinearBn1d",
12
+ ]
13
+
14
+ class LinearBn1d(nn.modules.linear.Linear, nni._FusedModule):
15
+ r"""
16
+ A LinearBn1d module is a module fused from Linear and BatchNorm1d, attached
17
+ with FakeQuantize modules for weight, used in quantization aware training.
18
+
19
+ We combined the interface of :class:`torch.nn.Linear` and
20
+ :class:torch.nn.BatchNorm1d`.
21
+
22
+ Similar to :class:`torch.nn.Linear`, with FakeQuantize modules initialized
23
+ to default.
24
+
25
+ Attributes:
26
+ freeze_bn:
27
+ weight_fake_quant: fake quant module for weight
28
+
29
+ """
30
+ def __init__(self,
31
+ # Linear args
32
+ in_features, out_features, bias=True,
33
+ # BatchNorm1d args
34
+ # num_features: out_features
35
+ eps=1e-05, momentum=0.1,
36
+ # affine: True
37
+ # track_running_stats: True
38
+ # Args for this module
39
+ freeze_bn=False,
40
+ qconfig=None):
41
+ nn.modules.linear.Linear.__init__(self, in_features, out_features, bias)
42
+ assert qconfig, 'qconfig must be provided for QAT module'
43
+ self.qconfig = qconfig
44
+ self.freeze_bn = freeze_bn if self.training else True
45
+ self.bn = nn.BatchNorm1d(out_features, eps, momentum, True, True)
46
+ self.weight_fake_quant = self.qconfig.weight()
47
+ if bias:
48
+ self.bias = Parameter(torch.empty(out_features))
49
+ else:
50
+ self.register_parameter('bias', None)
51
+ self.reset_bn_parameters()
52
+
53
+ # this needs to be called after reset_bn_parameters,
54
+ # as they modify the same state
55
+ if self.training:
56
+ if freeze_bn:
57
+ self.freeze_bn_stats()
58
+ else:
59
+ self.update_bn_stats()
60
+ else:
61
+ self.freeze_bn_stats()
62
+
63
+ def reset_running_stats(self):
64
+ self.bn.reset_running_stats()
65
+
66
+ def reset_bn_parameters(self):
67
+ self.bn.reset_running_stats()
68
+ init.uniform_(self.bn.weight)
69
+ init.zeros_(self.bn.bias)
70
+
71
+ def reset_parameters(self):
72
+ super().reset_parameters()
73
+
74
+ def update_bn_stats(self):
75
+ self.freeze_bn = False
76
+ self.bn.training = True
77
+ return self
78
+
79
+ def freeze_bn_stats(self):
80
+ self.freeze_bn = True
81
+ self.bn.training = False
82
+ return self
83
+
84
+ def forward(self, input):
85
+ assert self.bn.running_var is not None
86
+
87
+ # Scale the linear weights by BN's running statistics to reduce
88
+ # weight jitter, see https://arxiv.org/pdf/1806.08342.pdf, page 18
89
+ # for motivation.
90
+ #
91
+ # Instead of
92
+ #
93
+ # x1 = F.linear(x0, fq(w), b)
94
+ # x2 = self.bn(x1)
95
+ #
96
+ # We have
97
+ #
98
+ # # scale the weight by previous batch's running statistics
99
+ # scale_factor = bn.w / bn.running_std_from_prev_batch
100
+ # # do the linear transformation without bias
101
+ # x1_scaled = F.linear(x0, fq(w * scale_factor), 0)
102
+ # # reverse the scaling and add original bias
103
+ # x1_orig = x1_scaled / scale_factor + b
104
+ # x2 = self.bn(x1_orig)
105
+
106
+ running_std = torch.sqrt(self.bn.running_var + self.bn.eps)
107
+ scale_factor = self.bn.weight / running_std
108
+ weight_shape = [1] * len(self.weight.shape)
109
+ weight_shape[0] = -1
110
+ bias_shape = [1] * len(self.weight.shape)
111
+ bias_shape[1] = -1
112
+ scaled_weight = self.weight_fake_quant(self.weight * scale_factor.reshape(weight_shape))
113
+ if self.bias is not None:
114
+ zero_bias = torch.zeros_like(self.bias)
115
+ else:
116
+ zero_bias = torch.zeros(self.out_features, device=scaled_weight.device)
117
+ linear_out = F.linear(input, scaled_weight, zero_bias)
118
+ linear_out_orig = linear_out / scale_factor.reshape(bias_shape)
119
+ if self.bias is not None:
120
+ linear_out_orig = linear_out_orig + self.bias.reshape(bias_shape)
121
+ bn_out = self.bn(linear_out_orig)
122
+ return bn_out
123
+
124
+ def train(self, mode=True):
125
+ """
126
+ Batchnorm's training behavior is using the self.training flag. Prevent
127
+ changing it if BN is frozen. This makes sure that calling `model.train()`
128
+ on a model with a frozen BN will behave properly.
129
+ """
130
+ self.training = mode
131
+ if not self.freeze_bn:
132
+ for module in self.children():
133
+ module.train(mode)
134
+ return self
135
+
136
+ @classmethod
137
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
138
+ r"""Create a qat module from a float module or qparams_dict
139
+
140
+ Args: `mod' a float module, either produced by torch.ao.quantization
141
+ utilities or directly from user
142
+ """
143
+ assert type(mod) == nni.LinearBn1d, 'qat.' + cls.__name__ + \
144
+ '.from_float only works for ' + nni.LinearBn1d.__name__
145
+ assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
146
+ assert mod.qconfig, 'Input float module must have a valid config'
147
+ qconfig = mod.qconfig
148
+ linear, bn = mod[0], mod[1]
149
+ qat_linearbn = cls(linear.in_features, linear.out_features, linear.bias is not None,
150
+ bn.eps, bn.momentum,
151
+ False, qconfig)
152
+ qat_linearbn.weight = linear.weight
153
+ qat_linearbn.bias = linear.bias
154
+ qat_linearbn.bn.weight = bn.weight
155
+ qat_linearbn.bn.bias = bn.bias
156
+ qat_linearbn.bn.running_mean = bn.running_mean
157
+ qat_linearbn.bn.running_var = bn.running_var
158
+ qat_linearbn.bn.num_batches_tracked = bn.num_batches_tracked
159
+ return qat_linearbn
160
+
161
+ def to_float(self):
162
+ linear = torch.nn.Linear(self.in_features, self.out_features)
163
+ assert self.bn.running_var is not None and self.bn.running_mean is not None
164
+ linear.weight, linear.bias = fuse_linear_bn_weights(
165
+ self.weight,
166
+ self.bias,
167
+ self.bn.running_mean,
168
+ self.bn.running_var,
169
+ self.bn.eps,
170
+ self.bn.weight,
171
+ self.bn.bias)
172
+ return linear
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/qat/modules/linear_relu.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ import torch.ao.nn.qat as nnqat
4
+ import torch.ao.nn.intrinsic as nni
5
+ import torch.nn.functional as F
6
+
7
+ class LinearReLU(nnqat.Linear, nni._FusedModule):
8
+ r"""
9
+ A LinearReLU module fused from Linear and ReLU modules, attached with
10
+ FakeQuantize modules for weight, used in
11
+ quantization aware training.
12
+
13
+ We adopt the same interface as :class:`torch.nn.Linear`.
14
+
15
+ Similar to `torch.ao.nn.intrinsic.LinearReLU`, with FakeQuantize modules initialized to
16
+ default.
17
+
18
+ Attributes:
19
+ weight: fake quant module for weight
20
+
21
+ Examples::
22
+
23
+ >>> # xdoctest: +SKIP
24
+ >>> m = nn.qat.LinearReLU(20, 30)
25
+ >>> input = torch.randn(128, 20)
26
+ >>> output = m(input)
27
+ >>> print(output.size())
28
+ torch.Size([128, 30])
29
+ """
30
+ _FLOAT_MODULE = nni.LinearReLU # type: ignore[assignment]
31
+
32
+ def __init__(self, in_features, out_features, bias=True,
33
+ qconfig=None):
34
+ super().__init__(in_features, out_features, bias, qconfig)
35
+
36
+ def forward(self, input):
37
+ return F.relu(F.linear(input, self.weight_fake_quant(self.weight), self.bias))
38
+
39
+ @classmethod
40
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
41
+ return super().from_float(mod, use_precomputed_fake_quant)
42
+
43
+ def to_float(self):
44
+ linear = torch.nn.Linear(self.in_features, self.out_features, self.bias is not None)
45
+ linear.weight = torch.nn.Parameter(self.weight.detach())
46
+ if self.bias is not None:
47
+ linear.bias = torch.nn.Parameter(self.bias.detach())
48
+ relu = torch.nn.ReLU()
49
+ return torch.ao.nn.intrinsic.LinearReLU(linear, relu)
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .modules import * # noqa: F403
2
+
3
+ __all__ = [
4
+ 'BNReLU2d',
5
+ 'BNReLU3d',
6
+ 'ConvReLU1d',
7
+ 'ConvReLU2d',
8
+ 'ConvReLU3d',
9
+ 'LinearReLU',
10
+ 'LinearLeakyReLU',
11
+ 'LinearTanh',
12
+ 'ConvAdd2d',
13
+ 'ConvAddReLU2d',
14
+ ]
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (351 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modules import * # noqa: F403
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (215 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import torch
2
+ from .linear_relu import LinearReLU
3
+
4
+ __all__ = [
5
+ 'LinearReLU',
6
+ ]
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (291 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/__pycache__/linear_relu.cpython-310.pyc ADDED
Binary file (2.42 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (566 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/bn_relu.cpython-310.pyc ADDED
Binary file (3.1 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/conv_add.cpython-310.pyc ADDED
Binary file (3.37 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/conv_relu.cpython-310.pyc ADDED
Binary file (5.76 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/bn_relu.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import torch
4
+ import torch.ao.nn.intrinsic
5
+ import torch.ao.nn.intrinsic.qat
6
+ import torch.ao.nn.quantized as nnq
7
+
8
+ __all__ = [
9
+ "BNReLU2d",
10
+ "BNReLU3d"
11
+ ]
12
+
13
+ class BNReLU2d(nnq.BatchNorm2d):
14
+ r"""
15
+ A BNReLU2d module is a fused module of BatchNorm2d and ReLU
16
+
17
+ We adopt the same interface as :class:`torch.ao.nn.quantized.BatchNorm2d`.
18
+
19
+ Attributes:
20
+ Same as torch.ao.nn.quantized.BatchNorm2d
21
+
22
+ """
23
+ _FLOAT_MODULE = torch.ao.nn.intrinsic.BNReLU2d
24
+
25
+ def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None):
26
+ super().__init__(num_features, eps=eps, momentum=momentum, device=device, dtype=dtype)
27
+
28
+ def forward(self, input):
29
+ # Temporarily using len(shape) instead of ndim due to JIT issue
30
+ # https://github.com/pytorch/pytorch/issues/23890
31
+ if len(input.shape) != 4:
32
+ raise ValueError("Input shape must be `(N, C, H, W)`!")
33
+ return torch.ops.quantized.batch_norm2d_relu(
34
+ input, self.weight, self.bias, self.running_mean,
35
+ self.running_var, self.eps, self.scale, self.zero_point)
36
+
37
+ def _get_name(self):
38
+ return 'QuantizedBNReLU2d'
39
+
40
+ @classmethod
41
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
42
+ # TODO: Add qat support for BNReLU2d
43
+ return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)
44
+
45
+ @classmethod
46
+ def from_reference(cls, bn_relu, output_scale, output_zero_point):
47
+ return super().from_reference(bn_relu[0], output_scale, output_zero_point)
48
+
49
+ class BNReLU3d(nnq.BatchNorm3d):
50
+ r"""
51
+ A BNReLU3d module is a fused module of BatchNorm3d and ReLU
52
+
53
+ We adopt the same interface as :class:`torch.ao.nn.quantized.BatchNorm3d`.
54
+
55
+ Attributes:
56
+ Same as torch.ao.nn.quantized.BatchNorm3d
57
+
58
+ """
59
+ _FLOAT_MODULE = torch.ao.nn.intrinsic.BNReLU3d
60
+
61
+ def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None):
62
+ super().__init__(num_features, eps=eps, momentum=momentum, device=device, dtype=dtype)
63
+
64
+ def forward(self, input):
65
+ # Temporarily using len(shape) instead of ndim due to JIT issue
66
+ # https://github.com/pytorch/pytorch/issues/23890
67
+ if len(input.shape) != 5:
68
+ raise ValueError("Input shape must be `(N, C, D, H, W)`!")
69
+ return torch.ops.quantized.batch_norm3d_relu(
70
+ input, self.weight, self.bias, self.running_mean,
71
+ self.running_var, self.eps, self.scale, self.zero_point)
72
+
73
+ def _get_name(self):
74
+ return 'QuantizedBNReLU3d'
75
+
76
+ @classmethod
77
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
78
+ # TODO: Add qat support for BNReLU3d
79
+ return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)
80
+
81
+ @classmethod
82
+ def from_reference(cls, bn_relu, output_scale, output_zero_point):
83
+ return super().from_reference(bn_relu[0], output_scale, output_zero_point)
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/conv_relu.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import torch
4
+ import torch.ao.nn.intrinsic
5
+ import torch.ao.nn.intrinsic.qat
6
+ import torch.nn.functional as F
7
+ import torch.ao.nn.quantized as nnq
8
+
9
+ from torch.nn.utils import fuse_conv_bn_weights
10
+
11
+ __all__ = [
12
+ "ConvReLU1d",
13
+ "ConvReLU2d",
14
+ "ConvReLU3d",
15
+ ]
16
+
17
+ _reverse_repeat_padding = nnq.modules.conv._reverse_repeat_padding
18
+
19
+ # TODO: factor out the common parts to ConvNd
20
+ class ConvReLU1d(nnq.Conv1d):
21
+ r"""
22
+ A ConvReLU1d module is a fused module of Conv1d and ReLU
23
+
24
+ We adopt the same interface as :class:`torch.ao.nn.quantized.Conv1d`.
25
+
26
+ Attributes:
27
+ Same as torch.ao.nn.quantized.Conv1d
28
+
29
+ """
30
+ _FLOAT_MODULE = torch.ao.nn.intrinsic.ConvReLU1d # type: ignore[assignment]
31
+
32
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
33
+ padding=0, dilation=1, groups=1, bias=True,
34
+ padding_mode='zeros', device=None, dtype=None):
35
+ super().__init__(
36
+ in_channels, out_channels, kernel_size, stride=stride,
37
+ padding=padding, dilation=dilation, groups=groups, bias=bias,
38
+ padding_mode=padding_mode, device=device, dtype=dtype)
39
+
40
+ def forward(self, input):
41
+ # Temporarily using len(shape) instead of ndim due to JIT issue
42
+ # https://github.com/pytorch/pytorch/issues/23890
43
+ if len(input.shape) != 3:
44
+ raise ValueError("Input shape must be `(N, C, L)`!")
45
+ if self.padding_mode != 'zeros':
46
+ # Padding in Conv1d is stored as (p, p), need to get (p,)
47
+ _reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding[:1])
48
+ input = F.pad(input, _reversed_padding_repeated_twice,
49
+ mode=self.padding_mode)
50
+ return torch.ops.quantized.conv1d_relu(
51
+ input, self._packed_params, self.scale, self.zero_point)
52
+
53
+ def _get_name(self):
54
+ return 'QuantizedConvReLU1d'
55
+
56
+ @classmethod
57
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
58
+ if type(mod) == torch.ao.nn.intrinsic.qat.ConvBnReLU1d:
59
+ assert mod.bn.running_var is not None and mod.bn.running_mean is not None
60
+ mod.weight, mod.bias = fuse_conv_bn_weights(
61
+ mod.weight, mod.bias, mod.bn.running_mean, mod.bn.running_var,
62
+ mod.bn.eps, mod.bn.weight, mod.bn.bias)
63
+ return super().from_float(mod, use_precomputed_fake_quant)
64
+
65
+ @classmethod
66
+ def from_reference(cls, ref_qconv, output_scale, output_zero_point):
67
+ assert type(ref_qconv) != torch.ao.nn.intrinsic.ConvBnReLU1d, \
68
+ "BatchNorm1d should be fused into Conv1d before converting to reference module"
69
+ return super().from_reference(ref_qconv[0], output_scale, output_zero_point)
70
+
71
+ class ConvReLU2d(nnq.Conv2d):
72
+ r"""
73
+ A ConvReLU2d module is a fused module of Conv2d and ReLU
74
+
75
+ We adopt the same interface as :class:`torch.ao.nn.quantized.Conv2d`.
76
+
77
+ Attributes:
78
+ Same as torch.ao.nn.quantized.Conv2d
79
+
80
+ """
81
+ _FLOAT_MODULE = torch.ao.nn.intrinsic.ConvReLU2d # type: ignore[assignment]
82
+
83
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
84
+ padding=0, dilation=1, groups=1, bias=True,
85
+ padding_mode='zeros', device=None, dtype=None):
86
+ super().__init__(
87
+ in_channels, out_channels, kernel_size, stride=stride,
88
+ padding=padding, dilation=dilation, groups=groups, bias=bias,
89
+ padding_mode=padding_mode, device=device, dtype=dtype)
90
+
91
+ def forward(self, input):
92
+ # Temporarily using len(shape) instead of ndim due to JIT issue
93
+ # https://github.com/pytorch/pytorch/issues/23890
94
+ if len(input.shape) != 4:
95
+ raise ValueError("Input shape must be `(N, C, H, W)`!")
96
+ if self.padding_mode != 'zeros':
97
+ _reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
98
+ input = F.pad(input, _reversed_padding_repeated_twice,
99
+ mode=self.padding_mode)
100
+ return torch.ops.quantized.conv2d_relu(
101
+ input, self._packed_params, self.scale, self.zero_point)
102
+
103
+ def _get_name(self):
104
+ return 'QuantizedConvReLU2d'
105
+
106
+ @classmethod
107
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
108
+ if type(mod) == torch.ao.nn.intrinsic.qat.ConvBnReLU2d:
109
+ assert mod.bn.running_var is not None and mod.bn.running_mean is not None
110
+ mod.weight, mod.bias = fuse_conv_bn_weights(
111
+ mod.weight, mod.bias, mod.bn.running_mean, mod.bn.running_var,
112
+ mod.bn.eps, mod.bn.weight, mod.bn.bias)
113
+ return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)
114
+
115
+ @classmethod
116
+ def from_reference(cls, ref_qconv, output_scale, output_zero_point):
117
+ assert type(ref_qconv) != torch.ao.nn.intrinsic.ConvBnReLU2d, \
118
+ "BatchNorm2d should be fused into Conv2d before converting to reference module"
119
+ return super().from_reference(ref_qconv[0], output_scale, output_zero_point)
120
+
121
+
122
+ class ConvReLU3d(nnq.Conv3d):
123
+ r"""
124
+ A ConvReLU3d module is a fused module of Conv3d and ReLU
125
+
126
+ We adopt the same interface as :class:`torch.ao.nn.quantized.Conv3d`.
127
+
128
+ Attributes: Same as torch.ao.nn.quantized.Conv3d
129
+
130
+ """
131
+ _FLOAT_MODULE = torch.ao.nn.intrinsic.ConvReLU3d # type: ignore[assignment]
132
+
133
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
134
+ padding=0, dilation=1, groups=1, bias=True,
135
+ padding_mode='zeros', device=None, dtype=None):
136
+ assert padding_mode != 'reflect', "Conv3d does not support reflection padding"
137
+ super().__init__(
138
+ in_channels, out_channels, kernel_size, stride=stride,
139
+ padding=padding, dilation=dilation, groups=groups, bias=bias,
140
+ padding_mode=padding_mode, device=device, dtype=dtype)
141
+
142
+ def forward(self, input):
143
+ # Temporarily using len(shape) instead of ndim due to JIT issue
144
+ # https://github.com/pytorch/pytorch/issues/23890
145
+ if len(input.shape) != 5:
146
+ raise ValueError("Input shape must be `(N, C, D, H, W)`!")
147
+ if self.padding_mode != 'zeros':
148
+ _reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
149
+ input = F.pad(input, _reversed_padding_repeated_twice,
150
+ mode=self.padding_mode)
151
+ return torch.ops.quantized.conv3d_relu(
152
+ input, self._packed_params, self.scale, self.zero_point)
153
+
154
+ def _get_name(self):
155
+ return 'QuantizedConvReLU3d'
156
+
157
+ @classmethod
158
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
159
+ if type(mod) == torch.ao.nn.intrinsic.qat.ConvBnReLU3d:
160
+ assert mod.bn.running_var is not None and mod.bn.running_mean is not None
161
+ mod.weight, mod.bias = fuse_conv_bn_weights(
162
+ mod.weight,
163
+ mod.bias,
164
+ mod.bn.running_mean,
165
+ mod.bn.running_var,
166
+ mod.bn.eps,
167
+ mod.bn.weight,
168
+ mod.bn.bias,
169
+ )
170
+ return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)
171
+
172
+ @classmethod
173
+ def from_reference(cls, ref_qconv, output_scale, output_zero_point):
174
+ assert type(ref_qconv) != torch.ao.nn.intrinsic.ConvBnReLU3d, \
175
+ "BatchNorm3d should be fused into Conv3d before converting to reference module"
176
+ return super().from_reference(ref_qconv[0], output_scale, output_zero_point)
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/linear_relu.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ import torch.ao.nn.quantized as nnq
4
+ import torch.ao.nn.intrinsic as nni
5
+ from torch.ao.nn.quantized.modules.utils import _quantize_weight
6
+
7
+ __all__ = [
8
+ "LinearReLU",
9
+ "LinearLeakyReLU",
10
+ "LinearTanh",
11
+ ]
12
+
13
+ class LinearReLU(nnq.Linear):
14
+ r"""
15
+ A LinearReLU module fused from Linear and ReLU modules
16
+
17
+ We adopt the same interface as :class:`torch.ao.nn.quantized.Linear`.
18
+
19
+ Attributes:
20
+ Same as torch.ao.nn.quantized.Linear
21
+
22
+ Examples::
23
+
24
+ >>> # xdoctest: +SKIP
25
+ >>> m = nn.intrinsic.LinearReLU(20, 30)
26
+ >>> input = torch.randn(128, 20)
27
+ >>> output = m(input)
28
+ >>> print(output.size())
29
+ torch.Size([128, 30])
30
+ """
31
+ _FLOAT_MODULE = nni.LinearReLU # type: ignore[assignment]
32
+
33
+ def __init__(self, in_features, out_features, bias=True, dtype=torch.qint8):
34
+ super().__init__(in_features, out_features, bias, dtype)
35
+
36
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
37
+ return torch.ops.quantized.linear_relu(
38
+ x, self._packed_params._packed_params, self.scale, self.zero_point)
39
+
40
+ def _get_name(self):
41
+ return 'QuantizedLinearReLU'
42
+
43
+ @classmethod
44
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
45
+ return super().from_float(mod, use_precomputed_fake_quant)
46
+
47
+ @classmethod
48
+ def from_reference(cls, ref_linear_relu, output_scale, output_zero_point):
49
+ return super().from_reference(ref_linear_relu[0], output_scale, output_zero_point)
50
+
51
+ class LinearLeakyReLU(nnq.Linear):
52
+ r"""
53
+ For onednn backend only
54
+ A LinearLeakyReLU module fused from Linear and LeakyReLU modules
55
+ We adopt the same interface as :class:`torch.ao.nn.quantized.Linear`.
56
+ Attributes:
57
+ Same as torch.ao.nn.quantized.Linear
58
+ + negative_slope
59
+ Examples::
60
+ >>> # xdoctest: +SKIP
61
+ >>> m = nn.intrinsic.LinearLeakyReLU(20, 30, 0.01)
62
+ >>> input = torch.randn(128, 20)
63
+ >>> output = m(input)
64
+ >>> print(output.size())
65
+ torch.Size([128, 30])
66
+ """
67
+ _FLOAT_MODULE = nni.LinearLeakyReLU # type: ignore[assignment]
68
+
69
+ def __init__(self, in_features, out_features, negative_slope, bias=True, dtype=torch.qint8):
70
+ super().__init__(in_features, out_features, bias, dtype)
71
+ self.negative_slope = negative_slope
72
+
73
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
74
+ return torch.ops.quantized.linear_leaky_relu(
75
+ x, self._packed_params._packed_params, self.scale, self.zero_point, self.negative_slope)
76
+
77
+ def _get_name(self):
78
+ return 'QuantizedLinearLeakyReLU'
79
+
80
+ @classmethod
81
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
82
+ assert type(mod) == nni.LinearLeakyReLU, 'Input float module should be LinearLeakyReLU'
83
+ assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
84
+ activation_post_process = mod.activation_post_process
85
+ leaky_relu = mod[1]
86
+ mod = mod[0]
87
+ weight_post_process = mod.qconfig.weight()
88
+ weight_post_process(mod.weight)
89
+ dtype = weight_post_process.dtype
90
+ act_scale, act_zp = activation_post_process.calculate_qparams() # type: ignore[union-attr,operator]
91
+ assert dtype == torch.qint8, 'Weight observer must have dtype torch.qint8'
92
+ qweight = _quantize_weight(mod.weight.float(), weight_post_process)
93
+ qlinear_leaky_relu = cls(
94
+ mod.in_features,
95
+ mod.out_features,
96
+ leaky_relu.negative_slope,
97
+ dtype=dtype)
98
+ qlinear_leaky_relu.set_weight_bias(qweight, mod.bias)
99
+ qlinear_leaky_relu.scale = float(act_scale)
100
+ qlinear_leaky_relu.zero_point = int(act_zp)
101
+ return qlinear_leaky_relu
102
+
103
+ @classmethod
104
+ def from_reference(cls, ref_mod, output_scale, output_zero_point):
105
+ linear = ref_mod[0]
106
+ leaky_relu = ref_mod[1]
107
+ qlinear_leaky_relu = cls(
108
+ linear.in_features,
109
+ linear.out_features,
110
+ leaky_relu.negative_slope)
111
+ qweight = linear.get_quantized_weight()
112
+ qlinear_leaky_relu.set_weight_bias(qweight, linear.bias)
113
+ qlinear_leaky_relu.scale = float(output_scale)
114
+ qlinear_leaky_relu.zero_point = int(output_zero_point)
115
+ return qlinear_leaky_relu
116
+
117
+ class LinearTanh(nnq.Linear):
118
+ r"""
119
+ A LinearTanh module fused from Linear and Tanh modules
120
+
121
+ We adopt the same interface as :class:`torch.ao.nn.quantized.Linear`.
122
+
123
+ Attributes:
124
+ Same as torch.ao.nn.quantized.Linear
125
+
126
+ Examples::
127
+
128
+ >>> # xdoctest: +SKIP
129
+ >>> m = nn.intrinsic.LinearTanh(20, 30)
130
+ >>> input = torch.randn(128, 20)
131
+ >>> output = m(input)
132
+ >>> print(output.size())
133
+ torch.Size([128, 30])
134
+ """
135
+ _FLOAT_MODULE = nni.LinearTanh # type: ignore[assignment]
136
+
137
+ def __init__(self, in_features, out_features, bias=True, dtype=torch.qint8):
138
+ super().__init__(in_features, out_features, bias, dtype)
139
+
140
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
141
+ return torch.ops.quantized.linear_tanh(
142
+ x, self._packed_params._packed_params, self.scale, self.zero_point)
143
+
144
+ def _get_name(self):
145
+ return 'QuantizedLinearTanh'
146
+
147
+ @classmethod
148
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
149
+ assert type(mod) == nni.LinearTanh, 'Input float module should be LinearTanh'
150
+ assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
151
+ activation_post_process = mod.activation_post_process
152
+ mod = mod[0]
153
+ weight_post_process = mod.qconfig.weight()
154
+ weight_post_process(mod.weight)
155
+ dtype = weight_post_process.dtype
156
+ act_scale, act_zp = activation_post_process.calculate_qparams() # type: ignore[union-attr,operator]
157
+ assert dtype == torch.qint8, 'Weight observer must have dtype torch.qint8'
158
+ qweight = _quantize_weight(mod.weight.float(), weight_post_process)
159
+ qlinear_tanh = cls(
160
+ mod.in_features,
161
+ mod.out_features,
162
+ dtype=dtype)
163
+ qlinear_tanh.set_weight_bias(qweight, mod.bias)
164
+ qlinear_tanh.scale = float(act_scale)
165
+ qlinear_tanh.zero_point = int(act_zp)
166
+ return qlinear_tanh
167
+
168
+ @classmethod
169
+ def from_reference(cls, ref_mod, output_scale, output_zero_point):
170
+ linear = ref_mod[0]
171
+ qlinear_tanh = cls(
172
+ linear.in_features,
173
+ linear.out_features)
174
+ qweight = linear.get_quantized_weight()
175
+ qlinear_tanh.set_weight_bias(qweight, linear.bias)
176
+ qlinear_tanh.scale = float(output_scale)
177
+ qlinear_tanh.zero_point = int(output_zero_point)
178
+ return qlinear_tanh
parrot/lib/python3.10/site-packages/torch/ao/nn/qat/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modules import * # noqa: F403
parrot/lib/python3.10/site-packages/torch/ao/nn/qat/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (191 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modules import * # noqa: F403
parrot/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .linear import Linear
2
+
3
+ __all__ = ["Linear"]
parrot/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (242 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/__pycache__/linear.cpython-310.pyc ADDED
Binary file (1.27 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/qat/dynamic/modules/linear.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+
4
+ __all__ = ["Linear"]
5
+
6
+ class Linear(torch.ao.nn.qat.Linear):
7
+ r"""
8
+ A linear module attached with FakeQuantize modules for weight,
9
+ used for dynamic quantization aware training.
10
+
11
+ We adopt the same interface as `torch.nn.Linear`, please see
12
+ https://pytorch.org/docs/stable/nn.html#torch.nn.Linear
13
+ for documentation.
14
+
15
+ Similar to `torch.nn.Linear`, with FakeQuantize modules initialized to
16
+ default.
17
+ """
18
+
19
+ def __init__(self, in_features, out_features, bias=True,
20
+ qconfig=None, device=None, dtype=None) -> None:
21
+ super().__init__(in_features, out_features, bias, qconfig, device, dtype)
22
+ if not torch.ao.quantization.qconfig._activation_is_memoryless(qconfig):
23
+ raise ValueError(
24
+ "Dynamic QAT requires a memoryless observer." +
25
+ "This means a MovingAverage observer with averaging constant equal to 1"
26
+ )