ZTWHHH commited on
Commit
c6921fd
·
verified ·
1 Parent(s): a7c37b8

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. llava_next/share/terminfo/m/mach-gnu-color +0 -0
  3. llava_next/share/terminfo/m/mgr-sun +0 -0
  4. llava_next/share/terminfo/m/microterm +0 -0
  5. llava_next/share/terminfo/m/minix +0 -0
  6. llava_next/share/terminfo/m/minix-1.5 +0 -0
  7. llava_next/share/terminfo/m/mintty-direct +0 -0
  8. llava_next/share/terminfo/m/mlterm-256color +0 -0
  9. llava_next/share/terminfo/m/mosh +0 -0
  10. llava_next/share/terminfo/m/ms-vt-utf8 +0 -0
  11. llava_next/share/terminfo/m/mskermit22714 +0 -0
  12. llava_next/share/terminfo/m/mterm +0 -0
  13. parrot/lib/python3.10/site-packages/torch/ao/nn/__pycache__/__init__.cpython-310.pyc +0 -0
  14. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/linear_relu.py +56 -0
  15. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__init__.py +17 -0
  16. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/linear_relu.cpython-310.pyc +0 -0
  17. parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/conv_add.py +94 -0
  18. parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/__init__.py +1 -0
  19. parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/__pycache__/__init__.cpython-310.pyc +0 -0
  20. parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__init__.py +9 -0
  21. parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  22. parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__pycache__/activation.cpython-310.pyc +0 -0
  23. parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__pycache__/rnn.cpython-310.pyc +0 -0
  24. parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/activation.py +473 -0
  25. parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/rnn.py +412 -0
  26. parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/__pycache__/__init__.cpython-310.pyc +0 -0
  27. parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/__init__.py +1 -0
  28. parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/__pycache__/__init__.cpython-310.pyc +0 -0
  29. parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__init__.py +19 -0
  30. parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  31. parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/conv.cpython-310.pyc +0 -0
  32. parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/rnn.cpython-310.pyc +0 -0
  33. parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/rnn.py +1101 -0
  34. parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  35. parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/activation.cpython-310.pyc +0 -0
  36. parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/dropout.cpython-310.pyc +0 -0
  37. parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/functional_modules.cpython-310.pyc +0 -0
  38. parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/rnn.cpython-310.pyc +0 -0
  39. parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/utils.cpython-310.pyc +0 -0
  40. parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/__init__.py +18 -0
  41. parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/__pycache__/__init__.cpython-310.pyc +0 -0
  42. parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__init__.py +21 -0
  43. parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/conv.py +319 -0
  44. parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/rnn.py +615 -0
  45. parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/sparse.py +95 -0
  46. parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/__init__.py +1 -0
  47. parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/__pycache__/__init__.cpython-310.pyc +0 -0
  48. parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__init__.py +10 -0
  49. parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__pycache__/__init__.cpython-310.pyc +0 -0
  50. parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__pycache__/linear.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -877,3 +877,7 @@ videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__
877
  videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/math_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
878
  videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_math_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
879
  videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_array_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
877
  videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/math_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
878
  videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_math_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
879
  videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_array_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
880
+ videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/sparse_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
881
+ videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_training_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
882
+ videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/nn_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
883
+ videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
llava_next/share/terminfo/m/mach-gnu-color ADDED
Binary file (1.34 kB). View file
 
llava_next/share/terminfo/m/mgr-sun ADDED
Binary file (900 Bytes). View file
 
llava_next/share/terminfo/m/microterm ADDED
Binary file (473 Bytes). View file
 
llava_next/share/terminfo/m/minix ADDED
Binary file (1.46 kB). View file
 
llava_next/share/terminfo/m/minix-1.5 ADDED
Binary file (607 Bytes). View file
 
llava_next/share/terminfo/m/mintty-direct ADDED
Binary file (3.91 kB). View file
 
llava_next/share/terminfo/m/mlterm-256color ADDED
Binary file (3.43 kB). View file
 
llava_next/share/terminfo/m/mosh ADDED
Binary file (3.41 kB). View file
 
llava_next/share/terminfo/m/ms-vt-utf8 ADDED
Binary file (1.65 kB). View file
 
llava_next/share/terminfo/m/mskermit22714 ADDED
Binary file (603 Bytes). View file
 
llava_next/share/terminfo/m/mterm ADDED
Binary file (393 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (486 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/linear_relu.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ import torch.ao.nn.quantized.dynamic as nnqd
4
+ import torch.ao.nn.intrinsic as nni
5
+
6
+ __all__ = [
7
+ "LinearReLU"
8
+ ]
9
+
10
+ class LinearReLU(nnqd.Linear):
11
+ r"""
12
+ A LinearReLU module fused from Linear and ReLU modules that can be used
13
+ for dynamic quantization.
14
+ Supports both, FP16 and INT8 quantization.
15
+
16
+ We adopt the same interface as :class:`torch.ao.nn.quantized.dynamic.Linear`.
17
+
18
+ Attributes:
19
+ Same as torch.ao.nn.quantized.dynamic.Linear
20
+
21
+ Examples::
22
+
23
+ >>> # xdoctest: +SKIP
24
+ >>> m = nn.intrinsic.quantized.dynamic.LinearReLU(20, 30)
25
+ >>> input = torch.randn(128, 20)
26
+ >>> output = m(input)
27
+ >>> print(output.size())
28
+ torch.Size([128, 30])
29
+ """
30
+ _FLOAT_MODULE = nni.LinearReLU # type: ignore[assignment]
31
+
32
+ def __init__(self, in_features, out_features, bias=True, dtype=torch.qint8):
33
+ super().__init__(in_features, out_features, bias, dtype)
34
+
35
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
36
+ if self._packed_params.dtype == torch.qint8:
37
+ # TODO check if we should set reduce_rage = True by default here
38
+ Y = torch.ops.quantized.linear_relu_dynamic(
39
+ x, self._packed_params._packed_params, reduce_range=True)
40
+ elif self._packed_params.dtype == torch.float16:
41
+ Y = torch.ops.quantized.linear_relu_dynamic_fp16(
42
+ x, self._packed_params._packed_params)
43
+ else:
44
+ raise RuntimeError('Unsupported dtype on dynamic quantized linear relu!')
45
+ return Y.to(x.dtype)
46
+
47
+ def _get_name(self):
48
+ return 'DynamicQuantizedLinearReLU'
49
+
50
+ @classmethod
51
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
52
+ return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)
53
+
54
+ @classmethod
55
+ def from_reference(cls, ref_qlinear_relu):
56
+ return super().from_reference(ref_qlinear_relu[0])
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__init__.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .linear_relu import LinearReLU, LinearLeakyReLU, LinearTanh
2
+ from .conv_relu import ConvReLU1d, ConvReLU2d, ConvReLU3d
3
+ from .bn_relu import BNReLU2d, BNReLU3d
4
+ from .conv_add import ConvAdd2d, ConvAddReLU2d
5
+
6
+ __all__ = [
7
+ 'LinearReLU',
8
+ 'ConvReLU1d',
9
+ 'ConvReLU2d',
10
+ 'ConvReLU3d',
11
+ 'BNReLU2d',
12
+ 'BNReLU3d',
13
+ 'LinearLeakyReLU',
14
+ 'LinearTanh',
15
+ 'ConvAdd2d',
16
+ 'ConvAddReLU2d',
17
+ ]
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/linear_relu.cpython-310.pyc ADDED
Binary file (6.4 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/conv_add.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ import torch.ao.nn.intrinsic
4
+ import torch.ao.nn.intrinsic.qat
5
+ import torch.nn.functional as F
6
+ import torch.ao.nn.quantized as nnq
7
+
8
+ _reverse_repeat_padding = nnq.modules.conv._reverse_repeat_padding
9
+
10
+ class ConvAdd2d(nnq.Conv2d):
11
+ r"""
12
+ A ConvAdd2d module is a fused module of Conv2d and Add
13
+
14
+ We adopt the same interface as :class:`torch.ao.nn.quantized.Conv2d`.
15
+
16
+ Attributes:
17
+ Same as torch.ao.nn.quantized.Conv2d
18
+
19
+ """
20
+ _FLOAT_MODULE = torch.ao.nn.intrinsic.ConvAdd2d # type: ignore[assignment]
21
+
22
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
23
+ padding=0, dilation=1, groups=1, bias=True,
24
+ padding_mode='zeros', device=None, dtype=None):
25
+ super().__init__(
26
+ in_channels, out_channels, kernel_size, stride=stride,
27
+ padding=padding, dilation=dilation, groups=groups, bias=bias,
28
+ padding_mode=padding_mode, device=device, dtype=dtype)
29
+
30
+ def forward(self, input, extra_input):
31
+ # Temporarily using len(shape) instead of ndim due to JIT issue
32
+ # https://github.com/pytorch/pytorch/issues/23890
33
+ if len(input.shape) != 4:
34
+ raise ValueError("Input shape must be `(N, C, H, W)`!")
35
+ if self.padding_mode != 'zeros':
36
+ _reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
37
+ input = F.pad(input, _reversed_padding_repeated_twice,
38
+ mode=self.padding_mode)
39
+ return torch.ops.quantized.conv2d_add(
40
+ input, extra_input, self._packed_params, self.scale, self.zero_point)
41
+
42
+ def _get_name(self):
43
+ return 'QuantizedConvAdd2d'
44
+
45
+ @classmethod
46
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
47
+ return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)
48
+
49
+ @classmethod
50
+ def from_reference(cls, ref_qconv, output_scale, output_zero_point):
51
+ return super().from_reference(ref_qconv[0], output_scale, output_zero_point)
52
+
53
+ class ConvAddReLU2d(nnq.Conv2d):
54
+ r"""
55
+ A ConvAddReLU2d module is a fused module of Conv2d, Add and Relu
56
+
57
+ We adopt the same interface as :class:`torch.ao.nn.quantized.Conv2d`.
58
+
59
+ Attributes:
60
+ Same as torch.ao.nn.quantized.Conv2d
61
+
62
+ """
63
+ _FLOAT_MODULE = torch.ao.nn.intrinsic.ConvAddReLU2d # type: ignore[assignment]
64
+
65
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
66
+ padding=0, dilation=1, groups=1, bias=True,
67
+ padding_mode='zeros', device=None, dtype=None):
68
+ super().__init__(
69
+ in_channels, out_channels, kernel_size, stride=stride,
70
+ padding=padding, dilation=dilation, groups=groups, bias=bias,
71
+ padding_mode=padding_mode, device=device, dtype=dtype)
72
+
73
+ def forward(self, input, extra_input):
74
+ # Temporarily using len(shape) instead of ndim due to JIT issue
75
+ # https://github.com/pytorch/pytorch/issues/23890
76
+ if len(input.shape) != 4:
77
+ raise ValueError("Input shape must be `(N, C, H, W)`!")
78
+ if self.padding_mode != 'zeros':
79
+ _reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
80
+ input = F.pad(input, _reversed_padding_repeated_twice,
81
+ mode=self.padding_mode)
82
+ return torch.ops.quantized.conv2d_add_relu(
83
+ input, extra_input, self._packed_params, self.scale, self.zero_point)
84
+
85
+ def _get_name(self):
86
+ return 'QuantizedConvAddReLU2d'
87
+
88
+ @classmethod
89
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
90
+ return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)
91
+
92
+ @classmethod
93
+ def from_reference(cls, ref_qconv, output_scale, output_zero_point):
94
+ return super().from_reference(ref_qconv[0], output_scale, output_zero_point)
parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modules import * # noqa: F403
parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (199 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from .activation import MultiheadAttention
2
+ from .rnn import LSTM
3
+ from .rnn import LSTMCell
4
+
5
+ __all__ = [
6
+ 'LSTM',
7
+ 'LSTMCell',
8
+ 'MultiheadAttention',
9
+ ]
parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (335 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__pycache__/activation.cpython-310.pyc ADDED
Binary file (12.1 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__pycache__/rnn.cpython-310.pyc ADDED
Binary file (12.5 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/activation.py ADDED
@@ -0,0 +1,473 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ import torch.jit # this is needed to avoid a circular import
4
+ from torch import nn
5
+ import torch.nn.functional as nnF
6
+
7
+ from torch import Tensor
8
+ from typing import Optional, Tuple
9
+
10
+ import warnings
11
+
12
+ __all__ = [
13
+ "MultiheadAttention"
14
+ ]
15
+
16
+ class MultiheadAttention(nn.MultiheadAttention):
17
+ _FLOAT_MODULE = nn.MultiheadAttention
18
+
19
+ r"""Quantizable implementation of the MultiheadAttention.
20
+
21
+ Note::
22
+ Please, refer to :class:`~torch.nn.MultiheadAttention` for more
23
+ information
24
+
25
+ Allows the model to jointly attend to information from different
26
+ representation subspaces.
27
+ See reference: Attention Is All You Need
28
+
29
+ The original MHA module is not quantizable.
30
+ This reimplements it by explicitly instantiating the linear layers.
31
+
32
+ .. math::
33
+ \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
34
+ \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
35
+
36
+ Args:
37
+ embed_dim: total dimension of the model.
38
+ num_heads: parallel attention heads.
39
+ dropout: a Dropout layer on attn_output_weights. Default: 0.0.
40
+ bias: add bias as module parameter. Default: True.
41
+ add_bias_kv: add bias to the key and value sequences at dim=0.
42
+ add_zero_attn: add a new batch of zeros to the key and
43
+ value sequences at dim=1.
44
+ kdim: total number of features in key. Default: None.
45
+ vdim: total number of features in value. Default: None.
46
+ batch_first: If ``True``, then the input and output tensors are provided
47
+ as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
48
+
49
+ Note that if :attr:`kdim` and :attr:`vdim` are None, they will be set
50
+ to :attr:`embed_dim` such that query, key, and value have the same
51
+ number of features.
52
+
53
+ Examples::
54
+
55
+ >>> import torch.ao.nn.quantizable as nnqa
56
+ >>> multihead_attn = nnqa.MultiheadAttention(embed_dim, num_heads)
57
+ >>> attn_output, attn_output_weights = multihead_attn(query, key, value)
58
+
59
+ Note::
60
+ Please, follow the quantization flow to convert the quantizable MHA.
61
+ """
62
+ __constants__ = ['batch_first']
63
+
64
+ def __init__(self, embed_dim: int, num_heads: int,
65
+ dropout: float = 0., bias: bool = True,
66
+ add_bias_kv: bool = False, add_zero_attn: bool = False,
67
+ kdim: Optional[int] = None, vdim: Optional[int] = None, batch_first: bool = False,
68
+ device=None, dtype=None) -> None:
69
+ factory_kwargs = {'device': device, 'dtype': dtype}
70
+ super().__init__(embed_dim, num_heads, dropout,
71
+ bias, add_bias_kv,
72
+ add_zero_attn, kdim, vdim, batch_first,
73
+ **factory_kwargs)
74
+ self.linear_Q = nn.Linear(self.embed_dim, self.embed_dim, bias=bias, **factory_kwargs)
75
+ self.linear_K = nn.Linear(self.kdim, self.embed_dim, bias=bias, **factory_kwargs)
76
+ self.linear_V = nn.Linear(self.vdim, self.embed_dim, bias=bias, **factory_kwargs)
77
+ # for the type: ignore, see https://github.com/pytorch/pytorch/issues/58969
78
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=bias, **factory_kwargs) # type: ignore[assignment]
79
+
80
+ # Functionals
81
+ self.q_scaling_product = torch.ao.nn.quantized.FloatFunctional()
82
+ # note: importing torch.ao.nn.quantized at top creates a circular import
83
+
84
+ # Quant/Dequant
85
+ self.quant_attn_output = torch.ao.quantization.QuantStub()
86
+ self.quant_attn_output_weights = torch.ao.quantization.QuantStub()
87
+ self.dequant_q = torch.ao.quantization.DeQuantStub()
88
+ self.dequant_k = torch.ao.quantization.DeQuantStub()
89
+ self.dequant_v = torch.ao.quantization.DeQuantStub()
90
+
91
+ def _get_name(self):
92
+ return 'QuantizableMultiheadAttention'
93
+
94
+ @classmethod
95
+ def from_float(cls, other):
96
+ assert type(other) == cls._FLOAT_MODULE
97
+ assert hasattr(other, 'qconfig'), "The float module must have 'qconfig'"
98
+ # Setting the dropout to 0.0!
99
+ observed = cls(other.embed_dim, other.num_heads, other.dropout,
100
+ (other.in_proj_bias is not None),
101
+ (other.bias_k is not None),
102
+ other.add_zero_attn, other.kdim, other.vdim,
103
+ other.batch_first)
104
+ observed.bias_k = other.bias_k
105
+ observed.bias_v = other.bias_v
106
+ observed.qconfig = other.qconfig
107
+
108
+ # Set the linear weights
109
+ # for the type: ignores, see https://github.com/pytorch/pytorch/issues/58969
110
+ observed.out_proj.weight = other.out_proj.weight # type: ignore[has-type]
111
+ observed.out_proj.bias = other.out_proj.bias # type: ignore[has-type]
112
+ if other._qkv_same_embed_dim:
113
+ # Use separate params
114
+ bias = other.in_proj_bias
115
+ _start = 0
116
+ _end = _start + other.embed_dim
117
+ weight = other.in_proj_weight[_start:_end, :]
118
+ if bias is not None:
119
+ bias = torch.nn.Parameter(bias[_start:_end], bias.requires_grad)
120
+ observed.linear_Q.weight = torch.nn.Parameter(weight,
121
+ weight.requires_grad)
122
+ observed.linear_Q.bias = bias
123
+
124
+ bias = other.in_proj_bias
125
+ _start = _end
126
+ _end = _start + other.embed_dim
127
+ weight = other.in_proj_weight[_start:_end, :]
128
+ if bias is not None:
129
+ bias = torch.nn.Parameter(bias[_start:_end], bias.requires_grad)
130
+ observed.linear_K.weight = torch.nn.Parameter(weight,
131
+ weight.requires_grad)
132
+ observed.linear_K.bias = bias
133
+
134
+ bias = other.in_proj_bias
135
+ _start = _end
136
+ weight = other.in_proj_weight[_start:, :]
137
+ if bias is not None:
138
+ bias = torch.nn.Parameter(bias[_start:], bias.requires_grad)
139
+ observed.linear_V.weight = torch.nn.Parameter(weight,
140
+ weight.requires_grad)
141
+ observed.linear_V.bias = bias
142
+ else:
143
+ observed.linear_Q.weight = nn.Parameter(other.q_proj_weight)
144
+ observed.linear_K.weight = nn.Parameter(other.k_proj_weight)
145
+ observed.linear_V.weight = nn.Parameter(other.v_proj_weight)
146
+ if other.in_proj_bias is None:
147
+ observed.linear_Q.bias = None # type: ignore[assignment]
148
+ observed.linear_K.bias = None # type: ignore[assignment]
149
+ observed.linear_V.bias = None # type: ignore[assignment]
150
+ else:
151
+ observed.linear_Q.bias = nn.Parameter(other.in_proj_bias[0:other.embed_dim])
152
+ observed.linear_K.bias = nn.Parameter(other.in_proj_bias[other.embed_dim:(other.embed_dim * 2)])
153
+ observed.linear_V.bias = nn.Parameter(other.in_proj_bias[(other.embed_dim * 2):])
154
+ observed.eval()
155
+ # Explicit prepare
156
+ observed = torch.ao.quantization.prepare(observed, inplace=True)
157
+ return observed
158
+
159
+ @torch.jit.unused
160
+ def dequantize(self):
161
+ r"""Utility to convert the quantized MHA back to float.
162
+
163
+ The motivation for this is that it is not trivial to conver the weights
164
+ from the format that is used in the quantized version back to the
165
+ float.
166
+ """
167
+ fp = self._FLOAT_MODULE(self.embed_dim, self.num_heads, self.dropout,
168
+ (self.linear_Q._weight_bias()[1] is not None),
169
+ (self.bias_k is not None),
170
+ self.add_zero_attn, self.kdim, self.vdim, self.batch_first)
171
+ assert fp._qkv_same_embed_dim == self._qkv_same_embed_dim
172
+ if self.bias_k is not None:
173
+ fp.bias_k = nn.Parameter(self.bias_k.dequantize())
174
+ if self.bias_v is not None:
175
+ fp.bias_v = nn.Parameter(self.bias_v.dequantize())
176
+
177
+ # Set the linear weights
178
+ # Note: Because the linear layers are quantized, mypy does not nkow how
179
+ # to deal with them -- might need to ignore the typing checks.
180
+ # for the type: ignore[has-type], see https://github.com/pytorch/pytorch/issues/58969
181
+ w, b = self.out_proj._weight_bias() # type: ignore[operator, has-type]
182
+ fp.out_proj.weight = nn.Parameter(w.dequantize())
183
+ if b is not None:
184
+ fp.out_proj.bias = nn.Parameter(b)
185
+
186
+ wQ, bQ = self.linear_Q._weight_bias() # type: ignore[operator]
187
+ wQ = wQ.dequantize()
188
+ wK, bK = self.linear_K._weight_bias() # type: ignore[operator]
189
+ wK = wK.dequantize()
190
+ wV, bV = self.linear_V._weight_bias() # type: ignore[operator]
191
+ wV = wV.dequantize()
192
+ if fp._qkv_same_embed_dim:
193
+ # Use separate params
194
+ _start = 0
195
+ _end = _start + fp.embed_dim
196
+ fp.in_proj_weight[_start:_end, :] = wQ
197
+ if fp.in_proj_bias is not None:
198
+ assert all(bQ == 0)
199
+ fp.in_proj_bias[_start:_end] = bQ
200
+
201
+ _start = _end
202
+ _end = _start + fp.embed_dim
203
+ fp.in_proj_weight[_start:_end, :] = wK
204
+ if fp.in_proj_bias is not None:
205
+ assert all(bK == 0)
206
+ fp.in_proj_bias[_start:_end] = bK
207
+
208
+ _start = _end
209
+ fp.in_proj_weight[_start:, :] = wV
210
+ if fp.in_proj_bias is not None:
211
+ assert all(bV == 0)
212
+ fp.in_proj_bias[_start:] = bV
213
+ else:
214
+ fp.q_proj_weight = nn.Parameter(wQ)
215
+ fp.k_proj_weight = nn.Parameter(wK)
216
+ fp.v_proj_weight = nn.Parameter(wV)
217
+ if fp.in_proj_bias is None:
218
+ self.linear_Q.bias = None
219
+ self.linear_K.bias = None
220
+ self.linear_V.bias = None
221
+ else:
222
+ fp.in_proj_bias[0:fp.embed_dim] = bQ
223
+ fp.in_proj_bias[fp.embed_dim:(fp.embed_dim * 2)] = bK
224
+ fp.in_proj_bias[(fp.embed_dim * 2):] = bV
225
+
226
+ return fp
227
+
228
+ @classmethod
229
+ def from_observed(cls, other):
230
+ # The whole flow is float -> observed -> quantized
231
+ # This class does float -> observed only
232
+ # See nn.quantized.MultiheadAttention
233
+ raise NotImplementedError("It looks like you are trying to prepare an "
234
+ "MHA module. Please, see "
235
+ "the examples on quantizable MHAs.")
236
+
237
+ def forward(self,
238
+ query: Tensor,
239
+ key: Tensor,
240
+ value: Tensor,
241
+ key_padding_mask: Optional[Tensor] = None,
242
+ need_weights: bool = True,
243
+ attn_mask: Optional[Tensor] = None,
244
+ average_attn_weights: bool = True,
245
+ is_causal: bool = False) -> Tuple[Tensor, Optional[Tensor]]:
246
+ r"""
247
+ Note::
248
+ Please, refer to :func:`~torch.nn.MultiheadAttention.forward` for more
249
+ information
250
+
251
+ Args:
252
+ query, key, value: map a query and a set of key-value pairs to an output.
253
+ See "Attention Is All You Need" for more details.
254
+ key_padding_mask: if provided, specified padding elements in the key will
255
+ be ignored by the attention. When given a binary mask and a value is True,
256
+ the corresponding value on the attention layer will be ignored.
257
+ need_weights: output attn_output_weights.
258
+ attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
259
+ the batches while a 3D mask allows to specify a different mask for the entries of each batch.
260
+
261
+ Shape:
262
+ - Inputs:
263
+ - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
264
+ the embedding dimension. :math:`(N, L, E)` if ``batch_first`` is ``True``.
265
+ - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
266
+ the embedding dimension. :math:`(N, S, E)` if ``batch_first`` is ``True``.
267
+ - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
268
+ the embedding dimension. :math:`(N, S, E)` if ``batch_first`` is ``True``.
269
+ - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
270
+ If a BoolTensor is provided, the positions with the
271
+ value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
272
+ - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
273
+ 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
274
+ S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
275
+ positions. If a BoolTensor is provided, positions with ``True``
276
+ is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
277
+ is provided, it will be added to the attention weight.
278
+ - is_causal: If specified, applies a causal mask as attention mask. Mutually exclusive with providing attn_mask.
279
+ Default: ``False``.
280
+ - average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across
281
+ heads. Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an
282
+ effect when ``need_weights=True.``. Default: True (i.e. average weights across heads)
283
+
284
+ - Outputs:
285
+ - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
286
+ E is the embedding dimension. :math:`(N, L, E)` if ``batch_first`` is ``True``.
287
+ - attn_output_weights: If ``average_attn_weights=True``, returns attention weights averaged
288
+ across heads of shape :math:`(N, L, S)`, where N is the batch size, L is the target sequence length,
289
+ S is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
290
+ head of shape :math:`(N, num_heads, L, S)`.
291
+ """
292
+ return self._forward_impl(query, key, value, key_padding_mask,
293
+ need_weights, attn_mask, average_attn_weights,
294
+ is_causal)
295
+
296
+ def _forward_impl(self,
297
+ query: Tensor,
298
+ key: Tensor,
299
+ value: Tensor,
300
+ key_padding_mask: Optional[Tensor] = None,
301
+ need_weights: bool = True,
302
+ attn_mask: Optional[Tensor] = None,
303
+ average_attn_weights: bool = True,
304
+ is_causal: bool = False) -> Tuple[Tensor, Optional[Tensor]]:
305
+ # This version will not deal with the static key/value pairs.
306
+ # Keeping it here for future changes.
307
+ #
308
+ # TODO: This method has some duplicate lines with the
309
+ # `torch.nn.functional.multi_head_attention`. Will need to refactor.
310
+ static_k = None
311
+ static_v = None
312
+
313
+ if attn_mask is not None and is_causal:
314
+ raise AssertionError("Only allow causal mask or attn_mask")
315
+
316
+ if is_causal:
317
+ raise AssertionError("causal mask not supported by AO MHA module")
318
+
319
+ if self.batch_first:
320
+ query, key, value = (x.transpose(0, 1) for x in (query, key, value))
321
+
322
+ tgt_len, bsz, embed_dim_to_check = query.size()
323
+ assert self.embed_dim == embed_dim_to_check
324
+ # allow MHA to have different sizes for the feature dimension
325
+ assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
326
+
327
+ head_dim = self.embed_dim // self.num_heads
328
+ assert head_dim * self.num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
329
+ scaling = float(head_dim) ** -0.5
330
+
331
+ q = self.linear_Q(query)
332
+ k = self.linear_K(key)
333
+ v = self.linear_V(value)
334
+
335
+ q = self.q_scaling_product.mul_scalar(q, scaling)
336
+
337
+ if attn_mask is not None:
338
+ if attn_mask.dtype == torch.uint8:
339
+ warnings.warn(
340
+ "Byte tensor for `attn_mask` in `nn.MultiheadAttention` is deprecated. "
341
+ "Use bool tensor instead.",
342
+ stacklevel=3,
343
+ )
344
+ attn_mask = attn_mask.to(torch.bool)
345
+ assert attn_mask.is_floating_point() or attn_mask.dtype == torch.bool, \
346
+ f'Only float and bool types are supported for attn_mask, not {attn_mask.dtype}'
347
+
348
+ if attn_mask.dim() == 2:
349
+ attn_mask = attn_mask.unsqueeze(0)
350
+ if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
351
+ raise RuntimeError('The size of the 2D attn_mask is not correct.')
352
+ elif attn_mask.dim() == 3:
353
+ if list(attn_mask.size()) != [bsz * self.num_heads, query.size(0), key.size(0)]:
354
+ raise RuntimeError('The size of the 3D attn_mask is not correct.')
355
+ else:
356
+ raise RuntimeError(f"attn_mask's dimension {attn_mask.dim()} is not supported")
357
+ # attn_mask's dim is 3 now.
358
+
359
+ # convert ByteTensor key_padding_mask to bool
360
+ if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
361
+ warnings.warn(
362
+ "Byte tensor for `key_padding_mask` in `nn.MultiheadAttention` is deprecated. "
363
+ "Use bool tensor instead.",
364
+ stacklevel=3,
365
+ )
366
+ key_padding_mask = key_padding_mask.to(torch.bool)
367
+ if self.bias_k is not None and self.bias_v is not None:
368
+ if static_k is None and static_v is None:
369
+
370
+ # Explicitly assert that bias_k and bias_v are not None
371
+ # in a way that TorchScript can understand.
372
+ bias_k = self.bias_k
373
+ assert bias_k is not None
374
+ bias_v = self.bias_v
375
+ assert bias_v is not None
376
+
377
+ k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
378
+ v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
379
+ if attn_mask is not None:
380
+ attn_mask = nnF.pad(attn_mask, (0, 1))
381
+ if key_padding_mask is not None:
382
+ key_padding_mask = nnF.pad(key_padding_mask, (0, 1))
383
+ else:
384
+ assert static_k is None, "bias cannot be added to static key."
385
+ assert static_v is None, "bias cannot be added to static value."
386
+ else:
387
+ assert self.bias_k is None
388
+ assert self.bias_v is None
389
+
390
+ q = q.contiguous().view(tgt_len, bsz * self.num_heads, head_dim).transpose(0, 1)
391
+ if k is not None:
392
+ k = k.contiguous().view(-1, bsz * self.num_heads, head_dim).transpose(0, 1)
393
+ if v is not None:
394
+ v = v.contiguous().view(-1, bsz * self.num_heads, head_dim).transpose(0, 1)
395
+
396
+ if static_k is not None:
397
+ assert static_k.size(0) == bsz * self.num_heads
398
+ assert static_k.size(2) == head_dim
399
+ k = static_k
400
+
401
+ if static_v is not None:
402
+ assert static_v.size(0) == bsz * self.num_heads
403
+ assert static_v.size(2) == head_dim
404
+ v = static_v
405
+
406
+ src_len = k.size(1)
407
+
408
+ if key_padding_mask is not None:
409
+ assert key_padding_mask.size(0) == bsz
410
+ assert key_padding_mask.size(1) == src_len
411
+
412
+ if self.add_zero_attn:
413
+ src_len += 1
414
+ k_zeros = torch.zeros((k.size(0), 1) + k.size()[2:])
415
+ if k.is_quantized:
416
+ k_zeros = torch.quantize_per_tensor(k_zeros, k.q_scale(), k.q_zero_point(), k.dtype)
417
+ k = torch.cat([k, k_zeros], dim=1)
418
+ v_zeros = torch.zeros((v.size(0), 1) + k.size()[2:])
419
+ if v.is_quantized:
420
+ v_zeros = torch.quantize_per_tensor(v_zeros, v.q_scale(), v.q_zero_point(), v.dtype)
421
+ v = torch.cat([v, v_zeros], dim=1)
422
+
423
+ if attn_mask is not None:
424
+ attn_mask = nnF.pad(attn_mask, (0, 1))
425
+ if key_padding_mask is not None:
426
+ key_padding_mask = nnF.pad(key_padding_mask, (0, 1))
427
+
428
+ # Leaving the quantized zone here
429
+ q = self.dequant_q(q)
430
+ k = self.dequant_k(k)
431
+ v = self.dequant_v(v)
432
+ attn_output_weights = torch.bmm(q, k.transpose(1, 2))
433
+ assert list(attn_output_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
434
+
435
+ if attn_mask is not None:
436
+ if attn_mask.dtype == torch.bool:
437
+ attn_output_weights.masked_fill_(attn_mask, float('-inf'))
438
+ else:
439
+ attn_output_weights += attn_mask
440
+
441
+ if key_padding_mask is not None:
442
+ attn_output_weights = attn_output_weights.view(bsz, self.num_heads, tgt_len, src_len)
443
+ attn_output_weights = attn_output_weights.masked_fill(
444
+ key_padding_mask.unsqueeze(1).unsqueeze(2),
445
+ float('-inf'),
446
+ )
447
+ attn_output_weights = attn_output_weights.view(bsz * self.num_heads, tgt_len, src_len)
448
+
449
+ attn_output_weights = nnF.softmax(
450
+ attn_output_weights, dim=-1)
451
+ attn_output_weights = nnF.dropout(attn_output_weights, p=self.dropout, training=self.training)
452
+
453
+ attn_output = torch.bmm(attn_output_weights, v)
454
+ assert list(attn_output.size()) == [bsz * self.num_heads, tgt_len, head_dim]
455
+ if self.batch_first:
456
+ attn_output = attn_output.view(bsz, tgt_len, self.embed_dim)
457
+ else:
458
+ attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, self.embed_dim)
459
+
460
+ # Reentering the quantized zone
461
+ attn_output = self.quant_attn_output(attn_output)
462
+ # for the type: ignore[has-type], see https://github.com/pytorch/pytorch/issues/58969
463
+ attn_output = self.out_proj(attn_output) # type: ignore[has-type]
464
+ attn_output_weights = self.quant_attn_output_weights(attn_output_weights)
465
+
466
+ if need_weights:
467
+ # average attention weights over heads
468
+ attn_output_weights = attn_output_weights.view(bsz, self.num_heads, tgt_len, src_len)
469
+ if average_attn_weights:
470
+ attn_output_weights = attn_output_weights.mean(dim=1)
471
+ return attn_output, attn_output_weights
472
+ else:
473
+ return attn_output, None
parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/rnn.py ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import numbers
3
+ from typing import Optional, Tuple
4
+ import warnings
5
+
6
+ import torch
7
+ from torch import Tensor
8
+
9
+ """
10
+ We will recreate all the RNN modules as we require the modules to be decomposed
11
+ into its building blocks to be able to observe.
12
+ """
13
+
14
+ __all__ = [
15
+ "LSTMCell",
16
+ "LSTM"
17
+ ]
18
+
19
+ class LSTMCell(torch.nn.Module):
20
+ r"""A quantizable long short-term memory (LSTM) cell.
21
+
22
+ For the description and the argument types, please, refer to :class:`~torch.nn.LSTMCell`
23
+
24
+ Examples::
25
+
26
+ >>> import torch.ao.nn.quantizable as nnqa
27
+ >>> rnn = nnqa.LSTMCell(10, 20)
28
+ >>> input = torch.randn(6, 10)
29
+ >>> hx = torch.randn(3, 20)
30
+ >>> cx = torch.randn(3, 20)
31
+ >>> output = []
32
+ >>> for i in range(6):
33
+ ... hx, cx = rnn(input[i], (hx, cx))
34
+ ... output.append(hx)
35
+ """
36
+ _FLOAT_MODULE = torch.nn.LSTMCell
37
+
38
+ def __init__(self, input_dim: int, hidden_dim: int, bias: bool = True,
39
+ device=None, dtype=None) -> None:
40
+ factory_kwargs = {'device': device, 'dtype': dtype}
41
+ super().__init__()
42
+ self.input_size = input_dim
43
+ self.hidden_size = hidden_dim
44
+ self.bias = bias
45
+
46
+ self.igates = torch.nn.Linear(input_dim, 4 * hidden_dim, bias=bias, **factory_kwargs)
47
+ self.hgates = torch.nn.Linear(hidden_dim, 4 * hidden_dim, bias=bias, **factory_kwargs)
48
+ self.gates = torch.ao.nn.quantized.FloatFunctional()
49
+
50
+ self.input_gate = torch.nn.Sigmoid()
51
+ self.forget_gate = torch.nn.Sigmoid()
52
+ self.cell_gate = torch.nn.Tanh()
53
+ self.output_gate = torch.nn.Sigmoid()
54
+
55
+ self.fgate_cx = torch.ao.nn.quantized.FloatFunctional()
56
+ self.igate_cgate = torch.ao.nn.quantized.FloatFunctional()
57
+ self.fgate_cx_igate_cgate = torch.ao.nn.quantized.FloatFunctional()
58
+
59
+ self.ogate_cy = torch.ao.nn.quantized.FloatFunctional()
60
+
61
+ self.initial_hidden_state_qparams: Tuple[float, int] = (1.0, 0)
62
+ self.initial_cell_state_qparams: Tuple[float, int] = (1.0, 0)
63
+ self.hidden_state_dtype: torch.dtype = torch.quint8
64
+ self.cell_state_dtype: torch.dtype = torch.quint8
65
+
66
+ def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]:
67
+ if hidden is None or hidden[0] is None or hidden[1] is None:
68
+ hidden = self.initialize_hidden(x.shape[0], x.is_quantized)
69
+ hx, cx = hidden
70
+
71
+ igates = self.igates(x)
72
+ hgates = self.hgates(hx)
73
+ gates = self.gates.add(igates, hgates)
74
+
75
+ input_gate, forget_gate, cell_gate, out_gate = gates.chunk(4, 1)
76
+
77
+ input_gate = self.input_gate(input_gate)
78
+ forget_gate = self.forget_gate(forget_gate)
79
+ cell_gate = self.cell_gate(cell_gate)
80
+ out_gate = self.output_gate(out_gate)
81
+
82
+ fgate_cx = self.fgate_cx.mul(forget_gate, cx)
83
+ igate_cgate = self.igate_cgate.mul(input_gate, cell_gate)
84
+ fgate_cx_igate_cgate = self.fgate_cx_igate_cgate.add(fgate_cx, igate_cgate)
85
+ cy = fgate_cx_igate_cgate
86
+
87
+ # TODO: make this tanh a member of the module so its qparams can be configured
88
+ tanh_cy = torch.tanh(cy)
89
+ hy = self.ogate_cy.mul(out_gate, tanh_cy)
90
+ return hy, cy
91
+
92
+ def initialize_hidden(self, batch_size: int, is_quantized: bool = False) -> Tuple[Tensor, Tensor]:
93
+ h, c = torch.zeros((batch_size, self.hidden_size)), torch.zeros((batch_size, self.hidden_size))
94
+ if is_quantized:
95
+ (h_scale, h_zp) = self.initial_hidden_state_qparams
96
+ (c_scale, c_zp) = self.initial_cell_state_qparams
97
+ h = torch.quantize_per_tensor(h, scale=h_scale, zero_point=h_zp, dtype=self.hidden_state_dtype)
98
+ c = torch.quantize_per_tensor(c, scale=c_scale, zero_point=c_zp, dtype=self.cell_state_dtype)
99
+ return h, c
100
+
101
+ def _get_name(self):
102
+ return 'QuantizableLSTMCell'
103
+
104
+ @classmethod
105
+ def from_params(cls, wi, wh, bi=None, bh=None):
106
+ """Uses the weights and biases to create a new LSTM cell.
107
+
108
+ Args:
109
+ wi, wh: Weights for the input and hidden layers
110
+ bi, bh: Biases for the input and hidden layers
111
+ """
112
+ assert (bi is None) == (bh is None) # Either both None or both have values
113
+ input_size = wi.shape[1]
114
+ hidden_size = wh.shape[1]
115
+ cell = cls(input_dim=input_size, hidden_dim=hidden_size,
116
+ bias=(bi is not None))
117
+ cell.igates.weight = torch.nn.Parameter(wi)
118
+ if bi is not None:
119
+ cell.igates.bias = torch.nn.Parameter(bi)
120
+ cell.hgates.weight = torch.nn.Parameter(wh)
121
+ if bh is not None:
122
+ cell.hgates.bias = torch.nn.Parameter(bh)
123
+ return cell
124
+
125
+ @classmethod
126
+ def from_float(cls, other, use_precomputed_fake_quant=False):
127
+ assert type(other) == cls._FLOAT_MODULE
128
+ assert hasattr(other, 'qconfig'), "The float module must have 'qconfig'"
129
+ observed = cls.from_params(other.weight_ih, other.weight_hh,
130
+ other.bias_ih, other.bias_hh)
131
+ observed.qconfig = other.qconfig
132
+ observed.igates.qconfig = other.qconfig
133
+ observed.hgates.qconfig = other.qconfig
134
+ return observed
135
+
136
+
137
+ class _LSTMSingleLayer(torch.nn.Module):
138
+ r"""A single one-directional LSTM layer.
139
+
140
+ The difference between a layer and a cell is that the layer can process a
141
+ sequence, while the cell only expects an instantaneous value.
142
+ """
143
+ def __init__(self, input_dim: int, hidden_dim: int, bias: bool = True,
144
+ device=None, dtype=None) -> None:
145
+ factory_kwargs = {'device': device, 'dtype': dtype}
146
+ super().__init__()
147
+ self.cell = LSTMCell(input_dim, hidden_dim, bias=bias, **factory_kwargs)
148
+
149
+ def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None):
150
+ result = []
151
+ seq_len = x.shape[0]
152
+ for i in range(seq_len):
153
+ hidden = self.cell(x[i], hidden)
154
+ result.append(hidden[0]) # type: ignore[index]
155
+ result_tensor = torch.stack(result, 0)
156
+ return result_tensor, hidden
157
+
158
+ @classmethod
159
+ def from_params(cls, *args, **kwargs):
160
+ cell = LSTMCell.from_params(*args, **kwargs)
161
+ layer = cls(cell.input_size, cell.hidden_size, cell.bias)
162
+ layer.cell = cell
163
+ return layer
164
+
165
+
166
+ class _LSTMLayer(torch.nn.Module):
167
+ r"""A single bi-directional LSTM layer."""
168
+ def __init__(self, input_dim: int, hidden_dim: int, bias: bool = True,
169
+ batch_first: bool = False, bidirectional: bool = False,
170
+ device=None, dtype=None) -> None:
171
+ factory_kwargs = {'device': device, 'dtype': dtype}
172
+ super().__init__()
173
+ self.batch_first = batch_first
174
+ self.bidirectional = bidirectional
175
+ self.layer_fw = _LSTMSingleLayer(input_dim, hidden_dim, bias=bias, **factory_kwargs)
176
+ if self.bidirectional:
177
+ self.layer_bw = _LSTMSingleLayer(input_dim, hidden_dim, bias=bias, **factory_kwargs)
178
+
179
+ def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None):
180
+ if self.batch_first:
181
+ x = x.transpose(0, 1)
182
+ if hidden is None:
183
+ hx_fw, cx_fw = (None, None)
184
+ else:
185
+ hx_fw, cx_fw = hidden
186
+ hidden_bw: Optional[Tuple[Tensor, Tensor]] = None
187
+ if self.bidirectional:
188
+ if hx_fw is None:
189
+ hx_bw = None
190
+ else:
191
+ hx_bw = hx_fw[1]
192
+ hx_fw = hx_fw[0]
193
+ if cx_fw is None:
194
+ cx_bw = None
195
+ else:
196
+ cx_bw = cx_fw[1]
197
+ cx_fw = cx_fw[0]
198
+ if hx_bw is not None and cx_bw is not None:
199
+ hidden_bw = hx_bw, cx_bw
200
+ if hx_fw is None and cx_fw is None:
201
+ hidden_fw = None
202
+ else:
203
+ hidden_fw = torch.jit._unwrap_optional(hx_fw), torch.jit._unwrap_optional(cx_fw)
204
+ result_fw, hidden_fw = self.layer_fw(x, hidden_fw)
205
+
206
+ if hasattr(self, 'layer_bw') and self.bidirectional:
207
+ x_reversed = x.flip(0)
208
+ result_bw, hidden_bw = self.layer_bw(x_reversed, hidden_bw)
209
+ result_bw = result_bw.flip(0)
210
+
211
+ result = torch.cat([result_fw, result_bw], result_fw.dim() - 1)
212
+ if hidden_fw is None and hidden_bw is None:
213
+ h = None
214
+ c = None
215
+ elif hidden_fw is None:
216
+ (h, c) = torch.jit._unwrap_optional(hidden_bw)
217
+ elif hidden_bw is None:
218
+ (h, c) = torch.jit._unwrap_optional(hidden_fw)
219
+ else:
220
+ h = torch.stack([hidden_fw[0], hidden_bw[0]], 0) # type: ignore[list-item]
221
+ c = torch.stack([hidden_fw[1], hidden_bw[1]], 0) # type: ignore[list-item]
222
+ else:
223
+ result = result_fw
224
+ h, c = torch.jit._unwrap_optional(hidden_fw) # type: ignore[assignment]
225
+
226
+ if self.batch_first:
227
+ result.transpose_(0, 1)
228
+
229
+ return result, (h, c)
230
+
231
+ @classmethod
232
+ def from_float(cls, other, layer_idx=0, qconfig=None, **kwargs):
233
+ r"""
234
+ There is no FP equivalent of this class. This function is here just to
235
+ mimic the behavior of the `prepare` within the `torch.ao.quantization`
236
+ flow.
237
+ """
238
+ assert hasattr(other, 'qconfig') or (qconfig is not None)
239
+
240
+ input_size = kwargs.get('input_size', other.input_size)
241
+ hidden_size = kwargs.get('hidden_size', other.hidden_size)
242
+ bias = kwargs.get('bias', other.bias)
243
+ batch_first = kwargs.get('batch_first', other.batch_first)
244
+ bidirectional = kwargs.get('bidirectional', other.bidirectional)
245
+
246
+ layer = cls(input_size, hidden_size, bias, batch_first, bidirectional)
247
+ layer.qconfig = getattr(other, 'qconfig', qconfig)
248
+ wi = getattr(other, f'weight_ih_l{layer_idx}')
249
+ wh = getattr(other, f'weight_hh_l{layer_idx}')
250
+ bi = getattr(other, f'bias_ih_l{layer_idx}', None)
251
+ bh = getattr(other, f'bias_hh_l{layer_idx}', None)
252
+
253
+ layer.layer_fw = _LSTMSingleLayer.from_params(wi, wh, bi, bh)
254
+
255
+ if other.bidirectional:
256
+ wi = getattr(other, f'weight_ih_l{layer_idx}_reverse')
257
+ wh = getattr(other, f'weight_hh_l{layer_idx}_reverse')
258
+ bi = getattr(other, f'bias_ih_l{layer_idx}_reverse', None)
259
+ bh = getattr(other, f'bias_hh_l{layer_idx}_reverse', None)
260
+ layer.layer_bw = _LSTMSingleLayer.from_params(wi, wh, bi, bh)
261
+ return layer
262
+
263
+
264
+ class LSTM(torch.nn.Module):
265
+ r"""A quantizable long short-term memory (LSTM).
266
+
267
+ For the description and the argument types, please, refer to :class:`~torch.nn.LSTM`
268
+
269
+ Attributes:
270
+ layers : instances of the `_LSTMLayer`
271
+
272
+ .. note::
273
+ To access the weights and biases, you need to access them per layer.
274
+ See examples below.
275
+
276
+ Examples::
277
+
278
+ >>> import torch.ao.nn.quantizable as nnqa
279
+ >>> rnn = nnqa.LSTM(10, 20, 2)
280
+ >>> input = torch.randn(5, 3, 10)
281
+ >>> h0 = torch.randn(2, 3, 20)
282
+ >>> c0 = torch.randn(2, 3, 20)
283
+ >>> output, (hn, cn) = rnn(input, (h0, c0))
284
+ >>> # To get the weights:
285
+ >>> # xdoctest: +SKIP
286
+ >>> print(rnn.layers[0].weight_ih)
287
+ tensor([[...]])
288
+ >>> print(rnn.layers[0].weight_hh)
289
+ AssertionError: There is no reverse path in the non-bidirectional layer
290
+ """
291
+ _FLOAT_MODULE = torch.nn.LSTM
292
+
293
+ def __init__(self, input_size: int, hidden_size: int,
294
+ num_layers: int = 1, bias: bool = True,
295
+ batch_first: bool = False, dropout: float = 0.,
296
+ bidirectional: bool = False,
297
+ device=None, dtype=None) -> None:
298
+ factory_kwargs = {'device': device, 'dtype': dtype}
299
+ super().__init__()
300
+ self.input_size = input_size
301
+ self.hidden_size = hidden_size
302
+ self.num_layers = num_layers
303
+ self.bias = bias
304
+ self.batch_first = batch_first
305
+ self.dropout = float(dropout)
306
+ self.bidirectional = bidirectional
307
+ self.training = False # Default to eval mode. If we want to train, we will explicitly set to training.
308
+ num_directions = 2 if bidirectional else 1
309
+
310
+ if not isinstance(dropout, numbers.Number) or not 0 <= dropout <= 1 or \
311
+ isinstance(dropout, bool):
312
+ raise ValueError("dropout should be a number in range [0, 1] "
313
+ "representing the probability of an element being "
314
+ "zeroed")
315
+ if dropout > 0:
316
+ warnings.warn("dropout option for quantizable LSTM is ignored. "
317
+ "If you are training, please, use nn.LSTM version "
318
+ "followed by `prepare` step.")
319
+ if num_layers == 1:
320
+ warnings.warn("dropout option adds dropout after all but last "
321
+ "recurrent layer, so non-zero dropout expects "
322
+ f"num_layers greater than 1, but got dropout={dropout} "
323
+ f"and num_layers={num_layers}")
324
+
325
+ layers = [_LSTMLayer(self.input_size, self.hidden_size,
326
+ self.bias, batch_first=False,
327
+ bidirectional=self.bidirectional, **factory_kwargs)]
328
+ for layer in range(1, num_layers):
329
+ layers.append(_LSTMLayer(self.hidden_size, self.hidden_size,
330
+ self.bias, batch_first=False,
331
+ bidirectional=self.bidirectional,
332
+ **factory_kwargs))
333
+ self.layers = torch.nn.ModuleList(layers)
334
+
335
+ def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None):
336
+ if self.batch_first:
337
+ x = x.transpose(0, 1)
338
+
339
+ max_batch_size = x.size(1)
340
+ num_directions = 2 if self.bidirectional else 1
341
+ if hidden is None:
342
+ zeros = torch.zeros(num_directions, max_batch_size,
343
+ self.hidden_size, dtype=torch.float,
344
+ device=x.device)
345
+ zeros.squeeze_(0)
346
+ if x.is_quantized:
347
+ zeros = torch.quantize_per_tensor(zeros, scale=1.0,
348
+ zero_point=0, dtype=x.dtype)
349
+ hxcx = [(zeros, zeros) for _ in range(self.num_layers)]
350
+ else:
351
+ hidden_non_opt = torch.jit._unwrap_optional(hidden)
352
+ if isinstance(hidden_non_opt[0], Tensor):
353
+ hx = hidden_non_opt[0].reshape(self.num_layers, num_directions,
354
+ max_batch_size,
355
+ self.hidden_size)
356
+ cx = hidden_non_opt[1].reshape(self.num_layers, num_directions,
357
+ max_batch_size,
358
+ self.hidden_size)
359
+ hxcx = [(hx[idx].squeeze(0), cx[idx].squeeze(0)) for idx in range(self.num_layers)]
360
+ else:
361
+ hxcx = hidden_non_opt
362
+
363
+ hx_list = []
364
+ cx_list = []
365
+ for idx, layer in enumerate(self.layers):
366
+ x, (h, c) = layer(x, hxcx[idx])
367
+ hx_list.append(torch.jit._unwrap_optional(h))
368
+ cx_list.append(torch.jit._unwrap_optional(c))
369
+ hx_tensor = torch.stack(hx_list)
370
+ cx_tensor = torch.stack(cx_list)
371
+
372
+ # We are creating another dimension for bidirectional case
373
+ # need to collapse it
374
+ hx_tensor = hx_tensor.reshape(-1, hx_tensor.shape[-2], hx_tensor.shape[-1])
375
+ cx_tensor = cx_tensor.reshape(-1, cx_tensor.shape[-2], cx_tensor.shape[-1])
376
+
377
+ if self.batch_first:
378
+ x = x.transpose(0, 1)
379
+
380
+ return x, (hx_tensor, cx_tensor)
381
+
382
+ def _get_name(self):
383
+ return 'QuantizableLSTM'
384
+
385
+ @classmethod
386
+ def from_float(cls, other, qconfig=None):
387
+ assert isinstance(other, cls._FLOAT_MODULE)
388
+ assert (hasattr(other, 'qconfig') or qconfig)
389
+ observed = cls(other.input_size, other.hidden_size, other.num_layers,
390
+ other.bias, other.batch_first, other.dropout,
391
+ other.bidirectional)
392
+ observed.qconfig = getattr(other, 'qconfig', qconfig)
393
+ for idx in range(other.num_layers):
394
+ observed.layers[idx] = _LSTMLayer.from_float(other, idx, qconfig,
395
+ batch_first=False)
396
+
397
+ # Prepare the model
398
+ if other.training:
399
+ observed.train()
400
+ observed = torch.ao.quantization.prepare_qat(observed, inplace=True)
401
+ else:
402
+ observed.eval()
403
+ observed = torch.ao.quantization.prepare(observed, inplace=True)
404
+ return observed
405
+
406
+ @classmethod
407
+ def from_observed(cls, other):
408
+ # The whole flow is float -> observed -> quantized
409
+ # This class does float -> observed only
410
+ raise NotImplementedError("It looks like you are trying to convert a "
411
+ "non-quantizable LSTM module. Please, see "
412
+ "the examples on quantizable LSTMs.")
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (654 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modules import * # noqa: F403
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (205 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from .linear import Linear
3
+ from .rnn import LSTM, GRU, LSTMCell, RNNCell, GRUCell
4
+ from .conv import Conv1d, Conv2d, Conv3d, ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
5
+
6
+ __all__ = [
7
+ 'Linear',
8
+ 'LSTM',
9
+ 'GRU',
10
+ 'LSTMCell',
11
+ 'RNNCell',
12
+ 'GRUCell',
13
+ 'Conv1d',
14
+ 'Conv2d',
15
+ 'Conv3d',
16
+ 'ConvTranspose1d',
17
+ 'ConvTranspose2d',
18
+ 'ConvTranspose3d',
19
+ ]
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (555 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/conv.cpython-310.pyc ADDED
Binary file (14 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/rnn.cpython-310.pyc ADDED
Binary file (34.3 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/rnn.py ADDED
@@ -0,0 +1,1101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import numbers
3
+ import warnings
4
+ from typing_extensions import deprecated
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ from torch import Tensor # noqa: F401
9
+ from torch._jit_internal import Tuple, Optional, List, Union, Dict # noqa: F401
10
+ from torch.nn.utils.rnn import PackedSequence
11
+ from torch.ao.nn.quantized.modules.utils import _quantize_weight
12
+
13
+ __all__ = ['pack_weight_bias', 'PackedParameter', 'RNNBase', 'LSTM', 'GRU', 'RNNCellBase', 'RNNCell', 'LSTMCell',
14
+ 'GRUCell', "apply_permutation"]
15
+
16
+
17
+ def _apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor:
18
+ return tensor.index_select(dim, permutation)
19
+
20
+
21
+ @deprecated(
22
+ "`apply_permutation` is deprecated, please use `tensor.index_select(dim, permutation)` instead",
23
+ category=FutureWarning,
24
+ )
25
+ def apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor:
26
+ return _apply_permutation(tensor, permutation, dim)
27
+
28
+
29
+ def pack_weight_bias(qweight, bias, dtype):
30
+
31
+ if dtype == torch.qint8:
32
+ # for each layer, for each direction we need to quantize and pack
33
+ # weights and pack parameters in this order:
34
+ #
35
+ # w_ih, w_hh
36
+ packed_weight = \
37
+ torch.ops.quantized.linear_prepack(qweight, bias)
38
+
39
+ return packed_weight
40
+ else:
41
+ # for each layer, for each direction we need to quantize and pack
42
+ # weights and pack parameters in this order:
43
+ #
44
+ # packed_ih, packed_hh, b_ih, b_hh
45
+ packed_weight = torch.ops.quantized.linear_prepack_fp16(
46
+ qweight, bias)
47
+
48
+ return packed_weight
49
+
50
+
51
+ class PackedParameter(torch.nn.Module):
52
+ def __init__(self, param):
53
+ super().__init__()
54
+ self.param = param
55
+
56
+ def _save_to_state_dict(self, destination, prefix, keep_vars):
57
+ super()._save_to_state_dict(destination, prefix, keep_vars)
58
+ destination[prefix + 'param'] = self.param
59
+
60
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
61
+ missing_keys, unexpected_keys, error_msgs):
62
+ self.param = state_dict[prefix + 'param']
63
+ super()._load_from_state_dict(state_dict, prefix, local_metadata, False,
64
+ missing_keys, unexpected_keys, error_msgs)
65
+
66
+
67
+ class RNNBase(torch.nn.Module):
68
+
69
+ _FLOAT_MODULE = nn.RNNBase
70
+
71
+ _version = 2
72
+
73
+ def __init__(self, mode, input_size, hidden_size,
74
+ num_layers=1, bias=True, batch_first=False,
75
+ dropout=0., bidirectional=False, dtype=torch.qint8):
76
+ super().__init__()
77
+
78
+ self.mode = mode
79
+ self.input_size = input_size
80
+ self.hidden_size = hidden_size
81
+ self.num_layers = num_layers
82
+ self.bias = bias
83
+ self.batch_first = batch_first
84
+ self.dropout = float(dropout)
85
+ self.bidirectional = bidirectional
86
+ self.dtype = dtype
87
+ self.version = 2
88
+ self.training = False
89
+ num_directions = 2 if bidirectional else 1
90
+
91
+ # "type: ignore" is required since ints and Numbers are not fully comparable
92
+ # https://github.com/python/mypy/issues/8566
93
+ if not isinstance(dropout, numbers.Number) \
94
+ or not 0 <= dropout <= 1 or isinstance(dropout, bool): # type: ignore[operator]
95
+ raise ValueError("dropout should be a number in range [0, 1] "
96
+ "representing the probability of an element being "
97
+ "zeroed")
98
+ if dropout > 0 and num_layers == 1: # type: ignore[operator]
99
+ warnings.warn("dropout option adds dropout after all but last "
100
+ "recurrent layer, so non-zero dropout expects "
101
+ f"num_layers greater than 1, but got dropout={dropout} and "
102
+ f"num_layers={num_layers}")
103
+
104
+ if mode == 'LSTM':
105
+ gate_size = 4 * hidden_size
106
+ elif mode == 'GRU':
107
+ gate_size = 3 * hidden_size
108
+ else:
109
+ raise ValueError("Unrecognized RNN mode: " + mode)
110
+
111
+ _all_weight_values = []
112
+ for layer in range(num_layers):
113
+ for direction in range(num_directions):
114
+ layer_input_size = input_size if layer == 0 else hidden_size * num_directions
115
+
116
+ w_ih = torch.randn(gate_size, layer_input_size).to(torch.float)
117
+ w_hh = torch.randn(gate_size, hidden_size).to(torch.float)
118
+ b_ih = torch.randn(gate_size).to(torch.float)
119
+ b_hh = torch.randn(gate_size).to(torch.float)
120
+ if dtype == torch.qint8:
121
+ w_ih = torch.quantize_per_tensor(w_ih, scale=0.1, zero_point=0, dtype=torch.qint8)
122
+ w_hh = torch.quantize_per_tensor(w_hh, scale=0.1, zero_point=0, dtype=torch.qint8)
123
+ packed_ih = \
124
+ torch.ops.quantized.linear_prepack(w_ih, b_ih)
125
+ packed_hh = \
126
+ torch.ops.quantized.linear_prepack(w_hh, b_hh)
127
+ if self.version is None or self.version < 2:
128
+ cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic(
129
+ packed_ih, packed_hh, b_ih, b_hh)
130
+ else:
131
+ cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic(
132
+ packed_ih, packed_hh, b_ih, b_hh, True)
133
+ else:
134
+ packed_ih = torch.ops.quantized.linear_prepack_fp16(w_ih, b_ih)
135
+ packed_hh = torch.ops.quantized.linear_prepack_fp16(w_hh, b_hh)
136
+ cell_params = torch.ops.quantized.make_quantized_cell_params_fp16(
137
+ packed_ih, packed_hh)
138
+
139
+ _all_weight_values.append(PackedParameter(cell_params))
140
+ self._all_weight_values = torch.nn.ModuleList(_all_weight_values)
141
+
142
+ def _get_name(self):
143
+ return 'DynamicQuantizedRNN'
144
+
145
+ def extra_repr(self):
146
+ s = '{input_size}, {hidden_size}'
147
+ if self.num_layers != 1:
148
+ s += ', num_layers={num_layers}'
149
+ if self.bias is not True:
150
+ s += ', bias={bias}'
151
+ if self.batch_first is not False:
152
+ s += ', batch_first={batch_first}'
153
+ if self.dropout != 0:
154
+ s += ', dropout={dropout}'
155
+ if self.bidirectional is not False:
156
+ s += ', bidirectional={bidirectional}'
157
+ return s.format(**self.__dict__)
158
+
159
+ def __repr__(self):
160
+ # We don't want to show `ModuleList` children, hence custom
161
+ # `__repr__`. This is the same as nn.Module.__repr__, except the check
162
+ # for the `PackedParameter` and `nn.ModuleList`.
163
+ # You should still override `extra_repr` to add more info.
164
+ extra_lines = []
165
+ extra_repr = self.extra_repr()
166
+ # empty string will be split into list ['']
167
+ if extra_repr:
168
+ extra_lines = extra_repr.split('\n')
169
+ child_lines = []
170
+ for key, module in self._modules.items():
171
+ if isinstance(module, (PackedParameter, nn.ModuleList)):
172
+ continue
173
+ mod_str = repr(module)
174
+ mod_str = nn.modules.module._addindent(mod_str, 2)
175
+ child_lines.append('(' + key + '): ' + mod_str)
176
+ lines = extra_lines + child_lines
177
+
178
+ main_str = self._get_name() + '('
179
+ if lines:
180
+ # simple one-liner info, which most builtin Modules will use
181
+ if len(extra_lines) == 1 and not child_lines:
182
+ main_str += extra_lines[0]
183
+ else:
184
+ main_str += '\n ' + '\n '.join(lines) + '\n'
185
+
186
+ main_str += ')'
187
+ return main_str
188
+
189
+ def check_input(self, input: Tensor, batch_sizes: Optional[Tensor]) -> None:
190
+ expected_input_dim = 2 if batch_sizes is not None else 3
191
+ if input.dim() != expected_input_dim:
192
+ raise RuntimeError(
193
+ f'input must have {expected_input_dim} dimensions, got {input.dim()}')
194
+ if self.input_size != input.size(-1):
195
+ raise RuntimeError(
196
+ f'input.size(-1) must be equal to input_size. Expected {self.input_size}, got {input.size(-1)}')
197
+
198
+ def get_expected_hidden_size(self, input: Tensor, batch_sizes: Optional[Tensor]) -> Tuple[int, int, int]:
199
+ if batch_sizes is not None:
200
+ mini_batch = int(batch_sizes[0])
201
+ else:
202
+ mini_batch = input.size(0) if self.batch_first else input.size(1)
203
+ num_directions = 2 if self.bidirectional else 1
204
+ expected_hidden_size = (self.num_layers * num_directions,
205
+ mini_batch, self.hidden_size)
206
+ return expected_hidden_size
207
+
208
+ def check_hidden_size(
209
+ self, hx: Tensor, expected_hidden_size: Tuple[int, int, int],
210
+ msg: str = 'Expected hidden size {}, got {}'
211
+ ) -> None:
212
+ if hx.size() != expected_hidden_size:
213
+ raise RuntimeError(msg.format(
214
+ expected_hidden_size, list(hx.size())))
215
+
216
+ def check_forward_args(self, input: Tensor, hidden: Tensor, batch_sizes: Optional[Tensor]) -> None:
217
+ self.check_input(input, batch_sizes)
218
+ expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes)
219
+ self.check_hidden_size(hidden, expected_hidden_size,
220
+ msg='Expected hidden size {}, got {}')
221
+
222
+ def permute_hidden(self, hx: Tensor, permutation: Optional[Tensor]) -> Tensor:
223
+ if permutation is None:
224
+ return hx
225
+ return _apply_permutation(hx, permutation)
226
+
227
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
228
+ missing_keys, unexpected_keys, error_msgs):
229
+ version = local_metadata.get('version', None)
230
+ self.version = version
231
+ super()._load_from_state_dict(state_dict, prefix, local_metadata, False,
232
+ missing_keys, unexpected_keys, error_msgs)
233
+
234
+ def set_weight_bias(self, weight_bias_dict):
235
+
236
+ def weight_bias_name(ihhh, layer, suffix):
237
+ weight_name = f"weight_{ihhh}_l{layer}{suffix}"
238
+ bias_name = f"bias_{ihhh}_l{layer}{suffix}"
239
+ return weight_name, bias_name
240
+
241
+ num_directions = 2 if self.bidirectional else 1
242
+ # TODO: dedup with __init__ of RNNBase
243
+ _all_weight_values = []
244
+ for layer in range(self.num_layers):
245
+ for direction in range(num_directions):
246
+ suffix = "_reverse" if direction == 1 else ""
247
+ w_ih_name, b_ih_name = weight_bias_name("ih", layer, suffix)
248
+ w_hh_name, b_hh_name = weight_bias_name("hh", layer, suffix)
249
+ w_ih = weight_bias_dict[w_ih_name]
250
+ b_ih = weight_bias_dict[b_ih_name]
251
+ w_hh = weight_bias_dict[w_hh_name]
252
+ b_hh = weight_bias_dict[b_hh_name]
253
+ if w_ih.dtype == torch.qint8:
254
+ packed_ih = torch.ops.quantized.linear_prepack(w_ih, b_ih)
255
+ packed_hh = torch.ops.quantized.linear_prepack(w_hh, b_hh)
256
+ if self.version is None or self.version < 2:
257
+ cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic(
258
+ packed_ih, packed_hh, b_ih, b_hh)
259
+ else:
260
+ cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic(
261
+ packed_ih, packed_hh, b_ih, b_hh, True)
262
+ else:
263
+ packed_ih = torch.ops.quantized.linear_prepack_fp16(w_ih, b_ih)
264
+ packed_hh = torch.ops.quantized.linear_prepack_fp16(w_hh, b_hh)
265
+ cell_params = torch.ops.quantized.make_quantized_cell_params_fp16(
266
+ packed_ih, packed_hh)
267
+
268
+ _all_weight_values.append(PackedParameter(cell_params))
269
+ self._all_weight_values = torch.nn.ModuleList(_all_weight_values)
270
+
271
+ @classmethod
272
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
273
+ assert type(mod) in {torch.nn.LSTM,
274
+ torch.nn.GRU}, 'nn.quantized.dynamic.RNNBase.from_float only works for nn.LSTM and nn.GRU'
275
+ assert hasattr(
276
+ mod,
277
+ 'qconfig'
278
+ ), 'Input float module must have qconfig defined'
279
+
280
+ if mod.qconfig is not None and mod.qconfig.weight is not None:
281
+ weight_observer_method = mod.qconfig.weight
282
+ else:
283
+ # We have the circular import issues if we import the qconfig in the beginning of this file:
284
+ # https://github.com/pytorch/pytorch/pull/24231. The current workaround is to postpone the
285
+ # import until we need it.
286
+ from torch.ao.quantization.qconfig import default_dynamic_qconfig
287
+ weight_observer_method = default_dynamic_qconfig.weight
288
+
289
+ dtype = weight_observer_method().dtype
290
+ supported_scalar_types = [torch.qint8, torch.float16]
291
+ if dtype not in supported_scalar_types:
292
+ raise RuntimeError(f'Unsupported dtype for dynamic RNN quantization: {dtype}')
293
+ # RNNBase can be either LSTM or GRU
294
+ qRNNBase: Union[LSTM, GRU]
295
+ if mod.mode == 'LSTM':
296
+ qRNNBase = LSTM(mod.input_size, mod.hidden_size, mod.num_layers,
297
+ mod.bias, mod.batch_first, mod.dropout, mod.bidirectional, dtype)
298
+ elif mod.mode == 'GRU':
299
+ qRNNBase = GRU(mod.input_size, mod.hidden_size, mod.num_layers,
300
+ mod.bias, mod.batch_first, mod.dropout, mod.bidirectional, dtype)
301
+ else:
302
+ raise NotImplementedError('Only LSTM/GRU is supported for QuantizedRNN for now')
303
+
304
+ num_directions = 2 if mod.bidirectional else 1
305
+
306
+ assert mod.bias
307
+
308
+ _all_weight_values = []
309
+ for layer in range(qRNNBase.num_layers):
310
+ for direction in range(num_directions):
311
+ suffix = '_reverse' if direction == 1 else ''
312
+
313
+ def retrieve_weight_bias(ihhh):
314
+ weight_name = f'weight_{ihhh}_l{layer}{suffix}'
315
+ bias_name = f'bias_{ihhh}_l{layer}{suffix}'
316
+ weight = getattr(mod, weight_name)
317
+ bias = getattr(mod, bias_name)
318
+ return weight, bias
319
+
320
+ weight_ih, bias_ih = retrieve_weight_bias('ih')
321
+ weight_hh, bias_hh = retrieve_weight_bias('hh')
322
+
323
+ if dtype == torch.qint8:
324
+ def quantize_and_pack(w, b):
325
+ weight_observer = weight_observer_method()
326
+ weight_observer(w)
327
+ qweight = _quantize_weight(w.float(), weight_observer)
328
+ packed_weight = \
329
+ torch.ops.quantized.linear_prepack(qweight, b)
330
+ return packed_weight
331
+ packed_ih = quantize_and_pack(weight_ih, bias_ih)
332
+ packed_hh = quantize_and_pack(weight_hh, bias_hh)
333
+ if qRNNBase.version is None or qRNNBase.version < 2:
334
+ cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic(
335
+ packed_ih, packed_hh, bias_ih, bias_hh)
336
+ else:
337
+ cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic(
338
+ packed_ih, packed_hh, bias_ih, bias_hh, True)
339
+
340
+ elif dtype == torch.float16:
341
+ packed_ih = torch.ops.quantized.linear_prepack_fp16(
342
+ weight_ih.float(), bias_ih)
343
+ packed_hh = torch.ops.quantized.linear_prepack_fp16(
344
+ weight_hh.float(), bias_hh)
345
+
346
+ cell_params = torch.ops.quantized.make_quantized_cell_params_fp16(
347
+ packed_ih, packed_hh)
348
+ else:
349
+ raise RuntimeError('Unsupported dtype specified for dynamic quantized LSTM!')
350
+
351
+ _all_weight_values.append(PackedParameter(cell_params))
352
+ qRNNBase._all_weight_values = torch.nn.ModuleList(_all_weight_values)
353
+
354
+ return qRNNBase
355
+
356
+ def _weight_bias(self):
357
+ # Returns a dict of weights and biases
358
+ weight_bias_dict: Dict[str, Dict] = {'weight' : {}, 'bias' : {}}
359
+ count = 0
360
+ num_directions = 2 if self.bidirectional else 1
361
+ for layer in range(self.num_layers):
362
+ for direction in range(num_directions):
363
+ suffix = '_reverse' if direction == 1 else ''
364
+ key_name1 = f'weight_ih_l{layer}{suffix}'
365
+ key_name2 = f'weight_hh_l{layer}{suffix}'
366
+ # packed weights are part of torchbind class, CellParamsSerializationType
367
+ # Within the packed weight class, the weight and bias are accessible as Tensors
368
+ packed_weight_bias = self._all_weight_values[count].param.__getstate__()[0][4]
369
+ weight_bias_dict['weight'][key_name1] = packed_weight_bias[0].__getstate__()[0][0]
370
+ weight_bias_dict['weight'][key_name2] = packed_weight_bias[1].__getstate__()[0][0]
371
+ key_name1 = f'bias_ih_l{layer}{suffix}'
372
+ key_name2 = f'bias_hh_l{layer}{suffix}'
373
+ weight_bias_dict['bias'][key_name1] = packed_weight_bias[0].__getstate__()[0][1]
374
+ weight_bias_dict['bias'][key_name2] = packed_weight_bias[1].__getstate__()[0][1]
375
+ count = count + 1
376
+ return weight_bias_dict
377
+
378
+ def get_weight(self):
379
+ return self._weight_bias()['weight']
380
+
381
+ def get_bias(self):
382
+ return self._weight_bias()['bias']
383
+
384
+
385
+ class LSTM(RNNBase):
386
+ r"""
387
+ A dynamic quantized LSTM module with floating point tensor as inputs and outputs.
388
+ We adopt the same interface as `torch.nn.LSTM`, please see
389
+ https://pytorch.org/docs/stable/nn.html#torch.nn.LSTM for documentation.
390
+
391
+ Examples::
392
+
393
+ >>> # xdoctest: +SKIP
394
+ >>> rnn = nn.LSTM(10, 20, 2)
395
+ >>> input = torch.randn(5, 3, 10)
396
+ >>> h0 = torch.randn(2, 3, 20)
397
+ >>> c0 = torch.randn(2, 3, 20)
398
+ >>> output, (hn, cn) = rnn(input, (h0, c0))
399
+ """
400
+ _FLOAT_MODULE = nn.LSTM
401
+
402
+ __overloads__ = {'forward': ['forward_packed', 'forward_tensor']}
403
+
404
+ def __init__(self, *args, **kwargs):
405
+ super().__init__('LSTM', *args, **kwargs)
406
+
407
+ def _get_name(self):
408
+ return 'DynamicQuantizedLSTM'
409
+
410
+ def forward_impl(
411
+ self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]],
412
+ batch_sizes: Optional[Tensor], max_batch_size: int,
413
+ sorted_indices: Optional[Tensor]
414
+ ) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
415
+ if hx is None:
416
+ num_directions = 2 if self.bidirectional else 1
417
+ zeros = torch.zeros(self.num_layers * num_directions,
418
+ max_batch_size, self.hidden_size,
419
+ dtype=input.dtype, device=input.device)
420
+ hx = (zeros, zeros)
421
+ else:
422
+ # Each batch of the hidden state should match the input sequence that
423
+ # the user believes he/she is passing in.
424
+ hx = self.permute_hidden(hx, sorted_indices)
425
+
426
+ self.check_forward_args(input, hx, batch_sizes)
427
+
428
+ _all_params = ([m.param for m in self._all_weight_values])
429
+ if batch_sizes is None:
430
+ result = torch.quantized_lstm(input, hx, _all_params, self.bias, self.num_layers,
431
+ float(self.dropout), self.training, self.bidirectional,
432
+ self.batch_first, dtype=self.dtype, use_dynamic=True)
433
+ else:
434
+ result = torch.quantized_lstm(input, batch_sizes, hx, _all_params, self.bias,
435
+ self.num_layers, float(self.dropout), self.training,
436
+ self.bidirectional, dtype=self.dtype, use_dynamic=True)
437
+ output = result[0]
438
+ hidden = result[1:]
439
+
440
+ return output, hidden
441
+
442
+ @torch.jit.export
443
+ def forward_tensor(
444
+ self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None
445
+ ) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
446
+ batch_sizes = None
447
+ max_batch_size = input.size(0) if self.batch_first else input.size(1)
448
+ sorted_indices = None
449
+ unsorted_indices = None
450
+
451
+ output, hidden = self.forward_impl(
452
+ input, hx, batch_sizes, max_batch_size, sorted_indices)
453
+
454
+ return output, self.permute_hidden(hidden, unsorted_indices)
455
+
456
+ @torch.jit.export
457
+ def forward_packed(
458
+ self, input: PackedSequence, hx: Optional[Tuple[Tensor, Tensor]] = None
459
+ ) -> Tuple[PackedSequence, Tuple[Tensor, Tensor]]:
460
+ input_, batch_sizes, sorted_indices, unsorted_indices = input
461
+ max_batch_size = int(batch_sizes[0])
462
+
463
+ output_, hidden = self.forward_impl(
464
+ input_, hx, batch_sizes, max_batch_size, sorted_indices
465
+ )
466
+
467
+ output = PackedSequence(output_, batch_sizes,
468
+ sorted_indices, unsorted_indices)
469
+ return output, self.permute_hidden(hidden, unsorted_indices)
470
+
471
+ # "type: ignore" is required due to issue #43072
472
+ def permute_hidden( # type: ignore[override]
473
+ self, hx: Tuple[Tensor, Tensor], permutation: Optional[Tensor]
474
+ ) -> Tuple[Tensor, Tensor]:
475
+ if permutation is None:
476
+ return hx
477
+ return _apply_permutation(hx[0], permutation), _apply_permutation(hx[1], permutation)
478
+
479
+ # "type: ignore" is required due to issue #43072
480
+ def check_forward_args( # type: ignore[override]
481
+ self, input: Tensor, hidden: Tuple[Tensor, Tensor], batch_sizes: Optional[Tensor]
482
+ ) -> None:
483
+ self.check_input(input, batch_sizes)
484
+ expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes)
485
+
486
+ self.check_hidden_size(hidden[0], expected_hidden_size,
487
+ 'Expected hidden[0] size {}, got {}')
488
+ self.check_hidden_size(hidden[1], expected_hidden_size,
489
+ 'Expected hidden[1] size {}, got {}')
490
+
491
+ @torch.jit.ignore
492
+ def forward(self, input, hx=None):
493
+ if isinstance(input, PackedSequence):
494
+ return self.forward_packed(input, hx)
495
+ else:
496
+ return self.forward_tensor(input, hx)
497
+
498
+ @classmethod
499
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
500
+ return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)
501
+
502
+ @classmethod
503
+ def from_reference(cls, ref_mod):
504
+ assert hasattr(ref_mod, "weight_ih_l0_dtype"), "We are assuming weight_ih_l0 "
505
+ "exists in LSTM, may need to relax the assumption to support the use case"
506
+ qmod = cls(
507
+ ref_mod.input_size,
508
+ ref_mod.hidden_size,
509
+ ref_mod.num_layers,
510
+ ref_mod.bias,
511
+ ref_mod.batch_first,
512
+ ref_mod.dropout,
513
+ ref_mod.bidirectional,
514
+ # assuming there is layer 0, which should be OK
515
+ ref_mod.weight_ih_l0_dtype,
516
+ )
517
+ qmod.set_weight_bias(ref_mod.get_quantized_weight_bias_dict())
518
+ return qmod
519
+
520
+
521
+ class GRU(RNNBase):
522
+ r"""Applies a multi-layer gated recurrent unit (GRU) RNN to an input sequence.
523
+
524
+
525
+ For each element in the input sequence, each layer computes the following
526
+ function:
527
+
528
+ .. math::
529
+ \begin{array}{ll}
530
+ r_t = \sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\
531
+ z_t = \sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\
532
+ n_t = \tanh(W_{in} x_t + b_{in} + r_t \odot (W_{hn} h_{(t-1)}+ b_{hn})) \\
533
+ h_t = (1 - z_t) \odot n_t + z_t \odot h_{(t-1)}
534
+ \end{array}
535
+
536
+ where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is the input
537
+ at time `t`, :math:`h_{(t-1)}` is the hidden state of the layer
538
+ at time `t-1` or the initial hidden state at time `0`, and :math:`r_t`,
539
+ :math:`z_t`, :math:`n_t` are the reset, update, and new gates, respectively.
540
+ :math:`\sigma` is the sigmoid function, and :math:`\odot` is the Hadamard product.
541
+
542
+ In a multilayer GRU, the input :math:`x^{(l)}_t` of the :math:`l` -th layer
543
+ (:math:`l >= 2`) is the hidden state :math:`h^{(l-1)}_t` of the previous layer multiplied by
544
+ dropout :math:`\delta^{(l-1)}_t` where each :math:`\delta^{(l-1)}_t` is a Bernoulli random
545
+ variable which is :math:`0` with probability :attr:`dropout`.
546
+
547
+ Args:
548
+ input_size: The number of expected features in the input `x`
549
+ hidden_size: The number of features in the hidden state `h`
550
+ num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
551
+ would mean stacking two GRUs together to form a `stacked GRU`,
552
+ with the second GRU taking in outputs of the first GRU and
553
+ computing the final results. Default: 1
554
+ bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
555
+ Default: ``True``
556
+ batch_first: If ``True``, then the input and output tensors are provided
557
+ as (batch, seq, feature). Default: ``False``
558
+ dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
559
+ GRU layer except the last layer, with dropout probability equal to
560
+ :attr:`dropout`. Default: 0
561
+ bidirectional: If ``True``, becomes a bidirectional GRU. Default: ``False``
562
+
563
+ Inputs: input, h_0
564
+ - **input** of shape `(seq_len, batch, input_size)`: tensor containing the features
565
+ of the input sequence. The input can also be a packed variable length
566
+ sequence. See :func:`torch.nn.utils.rnn.pack_padded_sequence`
567
+ for details.
568
+ - **h_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
569
+ containing the initial hidden state for each element in the batch.
570
+ Defaults to zero if not provided. If the RNN is bidirectional,
571
+ num_directions should be 2, else it should be 1.
572
+
573
+ Outputs: output, h_n
574
+ - **output** of shape `(seq_len, batch, num_directions * hidden_size)`: tensor
575
+ containing the output features h_t from the last layer of the GRU,
576
+ for each `t`. If a :class:`torch.nn.utils.rnn.PackedSequence` has been
577
+ given as the input, the output will also be a packed sequence.
578
+ For the unpacked case, the directions can be separated
579
+ using ``output.view(seq_len, batch, num_directions, hidden_size)``,
580
+ with forward and backward being direction `0` and `1` respectively.
581
+
582
+ Similarly, the directions can be separated in the packed case.
583
+ - **h_n** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
584
+ containing the hidden state for `t = seq_len`
585
+
586
+ Like *output*, the layers can be separated using
587
+ ``h_n.view(num_layers, num_directions, batch, hidden_size)``.
588
+
589
+ Shape:
590
+ - Input1: :math:`(L, N, H_{in})` tensor containing input features where
591
+ :math:`H_{in}=\text{input\_size}` and `L` represents a sequence length.
592
+ - Input2: :math:`(S, N, H_{out})` tensor
593
+ containing the initial hidden state for each element in the batch.
594
+ :math:`H_{out}=\text{hidden\_size}`
595
+ Defaults to zero if not provided. where :math:`S=\text{num\_layers} * \text{num\_directions}`
596
+ If the RNN is bidirectional, num_directions should be 2, else it should be 1.
597
+ - Output1: :math:`(L, N, H_{all})` where :math:`H_{all}=\text{num\_directions} * \text{hidden\_size}`
598
+ - Output2: :math:`(S, N, H_{out})` tensor containing the next hidden state
599
+ for each element in the batch
600
+
601
+ Attributes:
602
+ weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer
603
+ (W_ir|W_iz|W_in), of shape `(3*hidden_size, input_size)` for `k = 0`.
604
+ Otherwise, the shape is `(3*hidden_size, num_directions * hidden_size)`
605
+ weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer
606
+ (W_hr|W_hz|W_hn), of shape `(3*hidden_size, hidden_size)`
607
+ bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer
608
+ (b_ir|b_iz|b_in), of shape `(3*hidden_size)`
609
+ bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer
610
+ (b_hr|b_hz|b_hn), of shape `(3*hidden_size)`
611
+
612
+ .. note::
613
+ All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
614
+ where :math:`k = \frac{1}{\text{hidden\_size}}`
615
+
616
+ .. note::
617
+ The calculation of new gate :math:`n_t` subtly differs from the original paper and other frameworks.
618
+ In the original implementation, the Hadamard product :math:`(\odot)` between :math:`r_t` and the
619
+ previous hidden state :math:`h_{(t-1)}` is done before the multiplication with the weight matrix
620
+ `W` and addition of bias:
621
+
622
+ .. math::
623
+ \begin{aligned}
624
+ n_t = \tanh(W_{in} x_t + b_{in} + W_{hn} ( r_t \odot h_{(t-1)} ) + b_{hn})
625
+ \end{aligned}
626
+
627
+ This is in contrast to PyTorch implementation, which is done after :math:`W_{hn} h_{(t-1)}`
628
+
629
+ .. math::
630
+ \begin{aligned}
631
+ n_t = \tanh(W_{in} x_t + b_{in} + r_t \odot (W_{hn} h_{(t-1)}+ b_{hn}))
632
+ \end{aligned}
633
+
634
+ This implementation differs on purpose for efficiency.
635
+
636
+ .. include:: ../cudnn_persistent_rnn.rst
637
+
638
+ Examples::
639
+
640
+ >>> # xdoctest: +SKIP
641
+ >>> rnn = nn.GRU(10, 20, 2)
642
+ >>> input = torch.randn(5, 3, 10)
643
+ >>> h0 = torch.randn(2, 3, 20)
644
+ >>> output, hn = rnn(input, h0)
645
+ """
646
+ _FLOAT_MODULE = nn.GRU
647
+
648
+ __overloads__ = {'forward': ['forward_packed', 'forward_tensor']}
649
+
650
+ def __init__(self, *args, **kwargs):
651
+ super().__init__('GRU', *args, **kwargs)
652
+
653
+ def _get_name(self):
654
+ return 'DynamicQuantizedGRU'
655
+
656
+ def check_forward_args(self, input: Tensor, hidden: Tensor, batch_sizes: Optional[Tensor]) -> None:
657
+ self.check_input(input, batch_sizes)
658
+ expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes)
659
+
660
+ self.check_hidden_size(hidden, expected_hidden_size,
661
+ 'Expected hidden size {}, got {}')
662
+
663
+ def forward_impl(
664
+ self, input: Tensor, hx: Optional[Tensor],
665
+ batch_sizes: Optional[Tensor], max_batch_size: int,
666
+ sorted_indices: Optional[Tensor]
667
+ ) -> Tuple[Tensor, Tensor]:
668
+ if hx is None:
669
+ num_directions = 2 if self.bidirectional else 1
670
+ zeros = torch.zeros(self.num_layers * num_directions,
671
+ max_batch_size, self.hidden_size,
672
+ dtype=input.dtype, device=input.device)
673
+ hx = zeros
674
+ else:
675
+ # Each batch of the hidden state should match the input sequence that
676
+ # the user believes he/she is passing in.
677
+ hx = self.permute_hidden(hx, sorted_indices)
678
+
679
+ self.check_forward_args(input, hx, batch_sizes)
680
+
681
+ _all_params = ([m.param for m in self._all_weight_values])
682
+ if batch_sizes is None:
683
+ result = torch.quantized_gru(input,
684
+ hx,
685
+ _all_params,
686
+ self.bias,
687
+ self.num_layers,
688
+ self.dropout,
689
+ self.training,
690
+ self.bidirectional,
691
+ self.batch_first)
692
+ else:
693
+ result = torch.quantized_gru(input,
694
+ batch_sizes,
695
+ hx,
696
+ _all_params,
697
+ self.bias,
698
+ self.num_layers,
699
+ self.dropout,
700
+ self.training,
701
+ self.bidirectional)
702
+ output = result[0]
703
+ hidden = result[1]
704
+
705
+ return output, hidden
706
+
707
+
708
+ @torch.jit.export
709
+ def forward_tensor(
710
+ self, input: Tensor, hx: Optional[Tensor] = None
711
+ ) -> Tuple[Tensor, Tensor]:
712
+ batch_sizes = None
713
+ max_batch_size = input.size(0) if self.batch_first else input.size(1)
714
+ sorted_indices = None
715
+ unsorted_indices = None
716
+
717
+ output, hidden = self.forward_impl(
718
+ input, hx, batch_sizes, max_batch_size, sorted_indices)
719
+
720
+ return output, self.permute_hidden(hidden, unsorted_indices)
721
+
722
+ @torch.jit.export
723
+ def forward_packed(
724
+ self, input: PackedSequence, hx: Optional[Tensor] = None
725
+ ) -> Tuple[PackedSequence, Tensor]:
726
+ input_, batch_sizes, sorted_indices, unsorted_indices = input
727
+ max_batch_size = int(batch_sizes[0])
728
+ output_, hidden = self.forward_impl(
729
+ input_, hx, batch_sizes, max_batch_size, sorted_indices
730
+ )
731
+
732
+ output = PackedSequence(output_, batch_sizes,
733
+ sorted_indices, unsorted_indices)
734
+ return output, self.permute_hidden(hidden, unsorted_indices)
735
+
736
+ def permute_hidden(
737
+ self, hx: Tensor, permutation: Optional[Tensor]
738
+ ) -> Tensor:
739
+ if permutation is None:
740
+ return hx
741
+ return _apply_permutation(hx, permutation)
742
+
743
+ @torch.jit.ignore
744
+ def forward(self, input, hx=None):
745
+ if isinstance(input, PackedSequence):
746
+ return self.forward_packed(input, hx)
747
+ else:
748
+ return self.forward_tensor(input, hx)
749
+
750
+ @classmethod
751
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
752
+ return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)
753
+
754
+ @classmethod
755
+ def from_reference(cls, ref_mod):
756
+ assert hasattr(ref_mod, "weight_ih_l0_dtype"), "We are assuming weight_ih_l0 "
757
+ "exists in LSTM, may need to relax the assumption to support the use case"
758
+ qmod = cls(
759
+ ref_mod.input_size,
760
+ ref_mod.hidden_size,
761
+ ref_mod.num_layers,
762
+ ref_mod.bias,
763
+ ref_mod.batch_first,
764
+ ref_mod.dropout,
765
+ ref_mod.bidirectional,
766
+ # assuming there is layer 0, which should be OK
767
+ ref_mod.weight_ih_l0_dtype,
768
+ )
769
+ qmod.set_weight_bias(ref_mod.get_quantized_weight_bias_dict())
770
+ return qmod
771
+
772
+ class RNNCellBase(torch.nn.Module):
773
+ # _FLOAT_MODULE = nn.CellRNNBase
774
+ __constants__ = ['input_size', 'hidden_size', 'bias']
775
+
776
+ def __init__(self, input_size, hidden_size, bias=True, num_chunks=4, dtype=torch.qint8):
777
+ super().__init__()
778
+ self.input_size = input_size
779
+ self.hidden_size = hidden_size
780
+ self.bias = bias
781
+ self.weight_dtype = dtype
782
+ if bias:
783
+ self.bias_ih = torch.randn(num_chunks * hidden_size).to(dtype=torch.float)
784
+ self.bias_hh = torch.randn(num_chunks * hidden_size).to(dtype=torch.float)
785
+ else:
786
+ self.register_parameter('bias_ih', None)
787
+ self.register_parameter('bias_hh', None)
788
+
789
+ weight_ih = torch.randn(num_chunks * hidden_size, input_size).to(torch.float)
790
+ weight_hh = torch.randn(num_chunks * hidden_size, hidden_size).to(torch.float)
791
+ if dtype == torch.qint8:
792
+ weight_ih = torch.quantize_per_tensor(weight_ih, scale=1, zero_point=0, dtype=torch.qint8)
793
+ weight_hh = torch.quantize_per_tensor(weight_hh, scale=1, zero_point=0, dtype=torch.qint8)
794
+
795
+ if dtype == torch.qint8:
796
+ # for each layer, for each direction we need to quantize and pack
797
+ # weights and pack parameters in this order:
798
+ #
799
+ # w_ih, w_hh
800
+ packed_weight_ih = \
801
+ torch.ops.quantized.linear_prepack(weight_ih, self.bias_ih)
802
+ packed_weight_hh = \
803
+ torch.ops.quantized.linear_prepack(weight_hh, self.bias_hh)
804
+ else:
805
+ # for each layer, for each direction we need to quantize and pack
806
+ # weights and pack parameters in this order:
807
+ #
808
+ # packed_ih, packed_hh, b_ih, b_hh
809
+ packed_weight_ih = torch.ops.quantized.linear_prepack_fp16(
810
+ weight_ih, self.bias_ih)
811
+ packed_weight_hh = torch.ops.quantized.linear_prepack_fp16(
812
+ weight_hh, self.bias_hh)
813
+
814
+ self._packed_weight_ih = packed_weight_ih
815
+ self._packed_weight_hh = packed_weight_hh
816
+
817
+ def _get_name(self):
818
+ return 'DynamicQuantizedRNNBase'
819
+
820
+ def extra_repr(self):
821
+ s = '{input_size}, {hidden_size}'
822
+ if 'bias' in self.__dict__ and self.bias is not True:
823
+ s += ', bias={bias}'
824
+ if 'nonlinearity' in self.__dict__ and self.nonlinearity != "tanh":
825
+ s += ', nonlinearity={nonlinearity}'
826
+ return s.format(**self.__dict__)
827
+
828
+ def check_forward_input(self, input):
829
+ if input.size(1) != self.input_size:
830
+ raise RuntimeError(
831
+ f"input has inconsistent input_size: got {input.size(1)}, expected {self.input_size}")
832
+
833
+ def check_forward_hidden(self, input: Tensor, hx: Tensor, hidden_label: str = '') -> None:
834
+ if input.size(0) != hx.size(0):
835
+ raise RuntimeError(
836
+ f"Input batch size {input.size(0)} doesn't match hidden{hidden_label} batch size {hx.size(0)}")
837
+
838
+ if hx.size(1) != self.hidden_size:
839
+ raise RuntimeError(
840
+ f"hidden{hidden_label} has inconsistent hidden_size: got {hx.size(1)}, expected {self.hidden_size}")
841
+
842
+ @classmethod
843
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
844
+ assert type(mod) in {torch.nn.LSTMCell,
845
+ torch.nn.GRUCell,
846
+ torch.nn.RNNCell}, 'nn.quantized.dynamic.RNNCellBase.from_float \
847
+ only works for nn.LSTMCell, nn.GRUCell and nn.RNNCell'
848
+ assert hasattr(
849
+ mod, 'qconfig'), 'Input float module must have qconfig defined'
850
+
851
+ if mod.qconfig is not None and mod.qconfig.weight is not None:
852
+ weight_observer_method = mod.qconfig.weight
853
+ else:
854
+ # We have the circular import issues if we import the qconfig in the beginning of this file:
855
+ # https://github.com/pytorch/pytorch/pull/24231. The current workaround is to postpone the
856
+ # import until we need it.
857
+ from torch.ao.quantization.qconfig import default_dynamic_qconfig
858
+ weight_observer_method = default_dynamic_qconfig.weight
859
+
860
+ dtype = weight_observer_method().dtype
861
+ supported_scalar_types = [torch.qint8, torch.float16]
862
+ if dtype not in supported_scalar_types:
863
+ raise RuntimeError(f'Unsupported dtype for dynamic RNN quantization: {dtype}')
864
+
865
+ qRNNCellBase: Union[LSTMCell, GRUCell, RNNCell]
866
+
867
+ if type(mod) == torch.nn.LSTMCell:
868
+ qRNNCellBase = LSTMCell(mod.input_size, mod.hidden_size, bias=mod.bias, dtype=dtype)
869
+ elif type(mod) == torch.nn.GRUCell:
870
+ qRNNCellBase = GRUCell(mod.input_size, mod.hidden_size, bias=mod.bias, dtype=dtype)
871
+ elif type(mod) == torch.nn.RNNCell:
872
+ qRNNCellBase = RNNCell(mod.input_size, mod.hidden_size, bias=mod.bias, nonlinearity=mod.nonlinearity, dtype=dtype)
873
+ else:
874
+ raise NotImplementedError('Only LSTMCell, GRUCell and RNNCell \
875
+ are supported for QuantizedRNN for now')
876
+
877
+ assert mod.bias
878
+
879
+ def _observe_and_quantize_weight(weight):
880
+ if dtype == torch.qint8:
881
+ weight_observer = weight_observer_method()
882
+ weight_observer(weight)
883
+ qweight = _quantize_weight(weight.float(), weight_observer)
884
+ return qweight
885
+ else:
886
+ return weight.float()
887
+
888
+ qRNNCellBase._packed_weight_ih = pack_weight_bias(_observe_and_quantize_weight(mod.weight_ih), mod.bias_ih, dtype)
889
+ qRNNCellBase._packed_weight_hh = pack_weight_bias(_observe_and_quantize_weight(mod.weight_hh), mod.bias_hh, dtype)
890
+ return qRNNCellBase
891
+
892
+ @classmethod
893
+ def from_reference(cls, ref_mod):
894
+ assert hasattr(ref_mod, "weight_ih_dtype"), "We are assuming weight_ih "
895
+ "exists in reference module, may need to relax the assumption to support the use case"
896
+ if hasattr(ref_mod, "nonlinearity"):
897
+ qmod = cls(
898
+ ref_mod.input_size,
899
+ ref_mod.hidden_size,
900
+ ref_mod.bias,
901
+ ref_mod.nonlinearity,
902
+ dtype=ref_mod.weight_ih_dtype
903
+ )
904
+ else:
905
+ qmod = cls(
906
+ ref_mod.input_size,
907
+ ref_mod.hidden_size,
908
+ ref_mod.bias,
909
+ dtype=ref_mod.weight_ih_dtype
910
+ )
911
+ weight_bias_dict = {
912
+ "weight": {
913
+ "weight_ih": ref_mod.get_quantized_weight_ih(),
914
+ "weight_hh": ref_mod.get_quantized_weight_hh(),
915
+ },
916
+ "bias": {
917
+ "bias_ih": ref_mod.bias_ih,
918
+ "bias_hh": ref_mod.bias_hh,
919
+ }
920
+ }
921
+ qmod.set_weight_bias(weight_bias_dict)
922
+ return qmod
923
+
924
+ def _weight_bias(self):
925
+ # Returns a dict of weights and biases
926
+ weight_bias_dict: Dict[str, Dict] = {'weight' : {}, 'bias' : {}}
927
+ w1, b1 = self._packed_weight_ih.__getstate__()[0]
928
+ w2, b2 = self._packed_weight_hh.__getstate__()[0]
929
+ # TODO: these can be simplified to one level? e.g. using weight_ih as key
930
+ # directly
931
+ weight_bias_dict['weight']['weight_ih'] = w1
932
+ weight_bias_dict['weight']['weight_hh'] = w2
933
+ weight_bias_dict['bias']['bias_ih'] = b1
934
+ weight_bias_dict['bias']['bias_hh'] = b2
935
+ return weight_bias_dict
936
+
937
+ def get_weight(self):
938
+ return self._weight_bias()['weight']
939
+
940
+ def get_bias(self):
941
+ return self._weight_bias()['bias']
942
+
943
+ def set_weight_bias(self, weight_bias_dict):
944
+ # TODO: these can be simplified to one level? e.g. using weight_ih as key
945
+ # directly
946
+ self._packed_weight_ih = pack_weight_bias(
947
+ weight_bias_dict["weight"]["weight_ih"],
948
+ weight_bias_dict["bias"]["bias_ih"],
949
+ self.weight_dtype)
950
+ self._packed_weight_hh = pack_weight_bias(
951
+ weight_bias_dict["weight"]["weight_hh"],
952
+ weight_bias_dict["bias"]["bias_hh"],
953
+ self.weight_dtype)
954
+
955
+ def _save_to_state_dict(self, destination, prefix, keep_vars):
956
+ super()._save_to_state_dict(destination, prefix, keep_vars)
957
+ destination[prefix + '_packed_weight_ih'] = self._packed_weight_ih
958
+ destination[prefix + '_packed_weight_hh'] = self._packed_weight_hh
959
+
960
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
961
+ missing_keys, unexpected_keys, error_msgs):
962
+ self._packed_weight_ih = state_dict.pop(prefix + '_packed_weight_ih')
963
+ self._packed_weight_hh = state_dict.pop(prefix + '_packed_weight_hh')
964
+ super()._load_from_state_dict(state_dict, prefix, local_metadata, False,
965
+ missing_keys, unexpected_keys, error_msgs)
966
+
967
+
968
+ class RNNCell(RNNCellBase):
969
+ r"""An Elman RNN cell with tanh or ReLU non-linearity.
970
+ A dynamic quantized RNNCell module with floating point tensor as inputs and outputs.
971
+ Weights are quantized to 8 bits. We adopt the same interface as `torch.nn.RNNCell`,
972
+ please see https://pytorch.org/docs/stable/nn.html#torch.nn.RNNCell for documentation.
973
+
974
+ Examples::
975
+
976
+ >>> # xdoctest: +SKIP
977
+ >>> rnn = nn.RNNCell(10, 20)
978
+ >>> input = torch.randn(6, 3, 10)
979
+ >>> hx = torch.randn(3, 20)
980
+ >>> output = []
981
+ >>> for i in range(6):
982
+ ... hx = rnn(input[i], hx)
983
+ ... output.append(hx)
984
+ """
985
+ __constants__ = ['input_size', 'hidden_size', 'bias', 'nonlinearity']
986
+
987
+ def __init__(self, input_size, hidden_size, bias=True, nonlinearity="tanh", dtype=torch.qint8):
988
+ super().__init__(input_size, hidden_size, bias, num_chunks=1, dtype=dtype)
989
+ self.nonlinearity = nonlinearity
990
+
991
+ def _get_name(self):
992
+ return 'DynamicQuantizedRNNCell'
993
+
994
+ def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
995
+ self.check_forward_input(input)
996
+ if hx is None:
997
+ hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
998
+ self.check_forward_hidden(input, hx, '')
999
+ if self.nonlinearity == "tanh":
1000
+ ret = torch.ops.quantized.quantized_rnn_tanh_cell_dynamic(
1001
+ input, hx,
1002
+ self._packed_weight_ih, self._packed_weight_hh,
1003
+ self.bias_ih, self.bias_hh)
1004
+ elif self.nonlinearity == "relu":
1005
+ ret = torch.ops.quantized.quantized_rnn_relu_cell_dynamic(
1006
+ input, hx,
1007
+ self._packed_weight_ih, self._packed_weight_hh,
1008
+ self.bias_ih, self.bias_hh)
1009
+ else:
1010
+ ret = input # TODO: remove when jit supports exception flow
1011
+ raise RuntimeError(
1012
+ f"Unknown nonlinearity: {self.nonlinearity}")
1013
+ return ret
1014
+
1015
+ @classmethod
1016
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
1017
+ return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)
1018
+
1019
+
1020
+ class LSTMCell(RNNCellBase):
1021
+ r"""A long short-term memory (LSTM) cell.
1022
+
1023
+ A dynamic quantized LSTMCell module with floating point tensor as inputs and outputs.
1024
+ Weights are quantized to 8 bits. We adopt the same interface as `torch.nn.LSTMCell`,
1025
+ please see https://pytorch.org/docs/stable/nn.html#torch.nn.LSTMCell for documentation.
1026
+
1027
+ Examples::
1028
+
1029
+ >>> # xdoctest: +SKIP
1030
+ >>> rnn = nn.LSTMCell(10, 20)
1031
+ >>> input = torch.randn(6, 3, 10)
1032
+ >>> hx = torch.randn(3, 20)
1033
+ >>> cx = torch.randn(3, 20)
1034
+ >>> output = []
1035
+ >>> for i in range(6):
1036
+ ... hx, cx = rnn(input[i], (hx, cx))
1037
+ ... output.append(hx)
1038
+ """
1039
+
1040
+ def __init__(self, *args, **kwargs):
1041
+ super().__init__(*args, num_chunks=4, **kwargs) # type: ignore[misc]
1042
+
1043
+ def _get_name(self):
1044
+ return 'DynamicQuantizedLSTMCell'
1045
+
1046
+ def forward(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]:
1047
+ self.check_forward_input(input)
1048
+ if hx is None:
1049
+ zeros = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
1050
+ hx = (zeros, zeros)
1051
+ self.check_forward_hidden(input, hx[0], '[0]')
1052
+ self.check_forward_hidden(input, hx[1], '[1]')
1053
+ return torch.ops.quantized.quantized_lstm_cell_dynamic(
1054
+ input, hx,
1055
+ self._packed_weight_ih, self._packed_weight_hh,
1056
+ self.bias_ih, self.bias_hh)
1057
+
1058
+ @classmethod
1059
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
1060
+ return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)
1061
+
1062
+
1063
+ class GRUCell(RNNCellBase):
1064
+ r"""A gated recurrent unit (GRU) cell
1065
+
1066
+ A dynamic quantized GRUCell module with floating point tensor as inputs and outputs.
1067
+ Weights are quantized to 8 bits. We adopt the same interface as `torch.nn.GRUCell`,
1068
+ please see https://pytorch.org/docs/stable/nn.html#torch.nn.GRUCell for documentation.
1069
+
1070
+ Examples::
1071
+
1072
+ >>> # xdoctest: +SKIP
1073
+ >>> rnn = nn.GRUCell(10, 20)
1074
+ >>> input = torch.randn(6, 3, 10)
1075
+ >>> hx = torch.randn(3, 20)
1076
+ >>> output = []
1077
+ >>> for i in range(6):
1078
+ ... hx = rnn(input[i], hx)
1079
+ ... output.append(hx)
1080
+ """
1081
+
1082
+ def __init__(self, input_size, hidden_size, bias=True, dtype=torch.qint8):
1083
+ super().__init__(input_size, hidden_size, bias, num_chunks=3, dtype=dtype)
1084
+
1085
+ def _get_name(self):
1086
+ return 'DynamicQuantizedGRUCell'
1087
+
1088
+ def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
1089
+ self.check_forward_input(input)
1090
+ if hx is None:
1091
+ hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
1092
+ self.check_forward_hidden(input, hx, '')
1093
+ return torch.ops.quantized.quantized_gru_cell_dynamic(
1094
+ input, hx,
1095
+ self._packed_weight_ih, self._packed_weight_hh,
1096
+ self.bias_ih, self.bias_hh,
1097
+ )
1098
+
1099
+ @classmethod
1100
+ def from_float(cls, mod, use_precomputed_fake_quant=False):
1101
+ return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.73 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/activation.cpython-310.pyc ADDED
Binary file (11.7 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/dropout.cpython-310.pyc ADDED
Binary file (1.37 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/functional_modules.cpython-310.pyc ADDED
Binary file (8.53 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/rnn.cpython-310.pyc ADDED
Binary file (2.06 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/utils.cpython-310.pyc ADDED
Binary file (3.7 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .modules import * # noqa: F403
2
+
3
+ __all__ = [
4
+ 'Linear',
5
+ 'Conv1d',
6
+ 'Conv2d',
7
+ 'Conv3d',
8
+ 'ConvTranspose1d',
9
+ 'ConvTranspose2d',
10
+ 'ConvTranspose3d',
11
+ 'RNNCell',
12
+ 'LSTMCell',
13
+ 'GRUCell',
14
+ 'LSTM',
15
+ 'GRU',
16
+ 'Embedding',
17
+ 'EmbeddingBag',
18
+ ]
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (375 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .linear import Linear
2
+ from .conv import Conv1d, Conv2d, Conv3d, ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
3
+ from .rnn import RNNCell, LSTMCell, GRUCell, LSTM, GRU
4
+ from .sparse import Embedding, EmbeddingBag
5
+
6
+ __all__ = [
7
+ 'Linear',
8
+ 'Conv1d',
9
+ 'Conv2d',
10
+ 'Conv3d',
11
+ 'ConvTranspose1d',
12
+ 'ConvTranspose2d',
13
+ 'ConvTranspose3d',
14
+ 'RNNCell',
15
+ 'LSTMCell',
16
+ 'GRUCell',
17
+ 'LSTM',
18
+ 'GRU',
19
+ 'Embedding',
20
+ 'EmbeddingBag',
21
+ ]
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/conv.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from typing import Optional, Dict, Any, List
6
+ from torch.nn.common_types import _size_1_t
7
+ from .utils import ReferenceQuantizedModule
8
+
9
+ __all__ = ['Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d']
10
+
11
+ class _ConvNd(torch.nn.modules.conv._ConvNd, ReferenceQuantizedModule):
12
+ """ A reference version of nn.quantized.Conv2d
13
+ we will not pack the parameters in this module, since weight packing is an
14
+ optimization for quantized backends supported in PyTorch (fbgemm/qnnpack),
15
+ this is useful when user want to use this module in other backends like Glow.
16
+ """
17
+ __annotations__ = {"bias": Optional[torch.Tensor]}
18
+ _IS_REFERENCE = True
19
+
20
+ @staticmethod
21
+ def from_float(cls, float_conv, weight_qparams):
22
+ qref_conv = cls(
23
+ float_conv.in_channels,
24
+ float_conv.out_channels,
25
+ float_conv.kernel_size, # type: ignore[arg-type]
26
+ float_conv.stride, # type: ignore[arg-type]
27
+ float_conv.padding, # type: ignore[arg-type]
28
+ float_conv.dilation, # type: ignore[arg-type]
29
+ float_conv.groups,
30
+ float_conv.bias is not None, # type: ignore[arg-type]
31
+ float_conv.padding_mode,
32
+ device=float_conv.weight.device,
33
+ dtype=float_conv.weight.dtype,
34
+ weight_qparams=weight_qparams)
35
+ qref_conv.weight = torch.nn.Parameter(float_conv.weight.detach())
36
+ if float_conv.bias is not None:
37
+ qref_conv.bias = torch.nn.Parameter(float_conv.bias.detach())
38
+ return qref_conv
39
+
40
+ class Conv1d(_ConvNd, nn.Conv1d):
41
+ def __init__(self,
42
+ in_channels: int,
43
+ out_channels: int,
44
+ kernel_size: _size_1_t,
45
+ stride: _size_1_t = 1,
46
+ padding: _size_1_t = 0,
47
+ dilation: _size_1_t = 1,
48
+ groups: int = 1,
49
+ bias: bool = True,
50
+ padding_mode: str = "zeros",
51
+ device=None,
52
+ dtype=None,
53
+ weight_qparams: Optional[Dict[str, Any]] = None):
54
+ nn.Conv1d.__init__(
55
+ self, in_channels, out_channels, kernel_size, stride, padding, dilation,
56
+ groups, bias, padding_mode, device, dtype)
57
+ self._init_weight_qparams(weight_qparams, device)
58
+
59
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
60
+ """
61
+ we have:
62
+ w(float) -- quant - dequant \
63
+ x(float) ------------- F.conv1d ---
64
+
65
+ In the full model, we will see
66
+ w(float) -- quant - *dequant \
67
+ x -- quant --- *dequant -- *F.conv1d --- *quant - dequant
68
+ and the backend should be able to fuse the ops with `*` into a quantized conv1d
69
+ """
70
+ weight_quant_dequant = self.get_weight()
71
+ result = F.conv1d(
72
+ x, weight_quant_dequant, self.bias, self.stride,
73
+ self.padding, self.dilation, self.groups)
74
+ return result
75
+
76
+ def _get_name(self):
77
+ return "QuantizedConv1d(Reference)"
78
+
79
+ @classmethod
80
+ def from_float(cls, float_conv, weight_qparams):
81
+ return _ConvNd.from_float(cls, float_conv, weight_qparams)
82
+
83
+ class Conv2d(_ConvNd, nn.Conv2d):
84
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
85
+ padding=0, dilation=1, groups=1, bias=True,
86
+ padding_mode='zeros',
87
+ device=None,
88
+ dtype=None,
89
+ weight_qparams: Optional[Dict[str, Any]] = None):
90
+ nn.Conv2d.__init__(
91
+ self, in_channels, out_channels, kernel_size, stride, padding, dilation,
92
+ groups, bias, padding_mode, device, dtype)
93
+ self._init_weight_qparams(weight_qparams, device)
94
+
95
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
96
+ """
97
+ we have:
98
+ w(float) -- quant - dequant \
99
+ x(float) ------------- F.conv2d ---
100
+
101
+ In the full model, we will see
102
+ w(float) -- quant - *dequant \
103
+ x -- quant --- *dequant -- *F.conv2d --- *quant - dequant
104
+ and the backend should be able to fuse the ops with `*` into a quantized conv2d
105
+ """
106
+ weight_quant_dequant = self.get_weight()
107
+ result = F.conv2d(
108
+ x, weight_quant_dequant, self.bias, self.stride,
109
+ self.padding, self.dilation, self.groups)
110
+ return result
111
+
112
+ def _get_name(self):
113
+ return "QuantizedConv2d(Reference)"
114
+
115
+ @classmethod
116
+ def from_float(cls, float_conv, weight_qparams):
117
+ return _ConvNd.from_float(cls, float_conv, weight_qparams)
118
+
119
+ class Conv3d(_ConvNd, nn.Conv3d):
120
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
121
+ padding=0, dilation=1, groups=1, bias=True,
122
+ padding_mode="zeros",
123
+ device=None,
124
+ dtype=None,
125
+ weight_qparams: Optional[Dict[str, Any]] = None):
126
+ nn.Conv3d.__init__(
127
+ self, in_channels, out_channels, kernel_size, stride, padding, dilation,
128
+ groups, bias, padding_mode, device, dtype)
129
+ self._init_weight_qparams(weight_qparams, device)
130
+
131
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
132
+ """
133
+ we have:
134
+ w(float) -- quant - dequant \
135
+ x(float) ------------- F.conv3d ---
136
+
137
+ In the full model, we will see
138
+ w(float) -- quant - *dequant \
139
+ x -- quant --- *dequant -- *F.conv3d --- *quant - dequant
140
+ and the backend should be able to fuse the ops with `*` into a quantized conv3d
141
+ """
142
+ weight_quant_dequant = self.get_weight()
143
+ result = F.conv3d(
144
+ x, weight_quant_dequant, self.bias, self.stride,
145
+ self.padding, self.dilation, self.groups)
146
+ return result
147
+
148
+ def _get_name(self):
149
+ return "QuantizedConv3d(Reference)"
150
+
151
+ @classmethod
152
+ def from_float(cls, float_conv, weight_qparams):
153
+ return _ConvNd.from_float(cls, float_conv, weight_qparams)
154
+
155
+ class _ConvTransposeNd(_ConvNd, torch.nn.modules.conv._ConvTransposeNd):
156
+ """ A reference version of nn.quantized.ConvTranspose2d
157
+ we will not pack the parameters in this module, since weight packing is an
158
+ optimization for quantized backends supported in PyTorch (fbgemm/qnnpack),
159
+ this is useful when user want to use this module in other backends like Glow.
160
+ """
161
+ @staticmethod
162
+ def from_float(cls, float_conv, weight_qparams):
163
+ qref_conv = cls(
164
+ float_conv.in_channels,
165
+ float_conv.out_channels,
166
+ float_conv.kernel_size, # type: ignore[arg-type]
167
+ float_conv.stride, # type: ignore[arg-type]
168
+ float_conv.padding, # type: ignore[arg-type]
169
+ float_conv.output_padding, # type: ignore[arg-type]
170
+ float_conv.groups,
171
+ float_conv.bias is not None, # type: ignore[arg-type]
172
+ float_conv.dilation, # type: ignore[arg-type]
173
+ float_conv.padding_mode,
174
+ device=float_conv.weight.device,
175
+ dtype=float_conv.weight.dtype,
176
+ weight_qparams=weight_qparams)
177
+ qref_conv.weight = torch.nn.Parameter(float_conv.weight.detach())
178
+ if float_conv.bias is not None:
179
+ qref_conv.bias = torch.nn.Parameter(float_conv.bias.detach())
180
+ return qref_conv
181
+
182
+
183
+ class ConvTranspose1d(_ConvTransposeNd, nn.ConvTranspose1d):
184
+ def __init__(self,
185
+ in_channels: int,
186
+ out_channels: int,
187
+ kernel_size: _size_1_t,
188
+ stride: _size_1_t = 1,
189
+ padding: _size_1_t = 0,
190
+ output_padding: _size_1_t = 0,
191
+ groups: int = 1,
192
+ bias: bool = True,
193
+ dilation: _size_1_t = 1,
194
+ padding_mode: str = "zeros",
195
+ device=None,
196
+ dtype=None,
197
+ weight_qparams: Optional[Dict[str, Any]] = None):
198
+ nn.ConvTranspose1d.__init__(
199
+ self, in_channels, out_channels, kernel_size, stride, padding, output_padding,
200
+ groups, bias, dilation, padding_mode, device, dtype)
201
+ self._init_weight_qparams(weight_qparams, device)
202
+
203
+ def forward(self, x: torch.Tensor, output_size: Optional[List[int]] = None) -> torch.Tensor:
204
+ """
205
+ we have:
206
+ w(float) -- quant - dequant \
207
+ x(float) ------------- F.convTranspose1d ---
208
+ In the full model, we will see
209
+ w(float) -- quant - *dequant \
210
+ x -- quant --- *dequant -- *F.convTranspose1d --- *quant - dequant
211
+ and the backend should be able to fuse the ops with `*` into a quantized conv1d
212
+ """
213
+
214
+ assert isinstance(self.padding, tuple)
215
+ # One cannot replace List by Tuple or Sequence in "_output_padding" because
216
+ # TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
217
+ output_padding = self._output_padding(
218
+ input, output_size, self.stride, self.padding, self.kernel_size, self.dilation) # type: ignore[arg-type]
219
+
220
+ weight_quant_dequant = self.get_weight()
221
+ result = F.conv_transpose1d(
222
+ x, weight_quant_dequant, self.bias, self.stride,
223
+ self.padding, output_padding, self.groups, self.dilation)
224
+ return result
225
+
226
+ def _get_name(self):
227
+ return "QuantizedConvTranspose1d(Reference)"
228
+
229
+ @classmethod
230
+ def from_float(cls, float_conv, weight_qparams):
231
+ return _ConvTransposeNd.from_float(cls, float_conv, weight_qparams)
232
+
233
+ class ConvTranspose2d(_ConvTransposeNd, nn.ConvTranspose2d):
234
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
235
+ padding=0, output_padding=0,
236
+ groups=1, bias=True, dilation=1,
237
+ padding_mode='zeros',
238
+ device=None,
239
+ dtype=None,
240
+ weight_qparams: Optional[Dict[str, Any]] = None):
241
+
242
+ nn.ConvTranspose2d.__init__(
243
+ self, in_channels, out_channels, kernel_size, stride, padding, output_padding,
244
+ groups, bias, dilation, padding_mode, device, dtype)
245
+ self._init_weight_qparams(weight_qparams, device)
246
+
247
+ def forward(self, x: torch.Tensor, output_size: Optional[List[int]] = None) -> torch.Tensor:
248
+ """
249
+ we have:
250
+ w(float) -- quant - dequant \
251
+ x(float) ------------- F.convTranspose2d ---
252
+ In the full model, we will see
253
+ w(float) -- quant - *dequant \
254
+ x -- quant --- *dequant -- *F.convTranspose2d --- *quant - dequant
255
+ and the backend should be able to fuse the ops with `*` into a quantized conv2d
256
+ """
257
+ assert isinstance(self.padding, tuple)
258
+ # One cannot replace List by Tuple or Sequence in "_output_padding" because
259
+ # TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
260
+
261
+ output_padding = self._output_padding(
262
+ input, output_size, self.stride, self.padding, self.kernel_size, self.dilation) # type: ignore[arg-type]
263
+
264
+ weight_quant_dequant = self.get_weight()
265
+ result = F.conv_transpose2d(
266
+ x, weight_quant_dequant, self.bias, self.stride,
267
+ self.padding, output_padding, self.groups, self.dilation)
268
+
269
+ return result
270
+
271
+ def _get_name(self):
272
+ return "QuantizedConvTranspose2d(Reference)"
273
+
274
+ @classmethod
275
+ def from_float(cls, float_conv, weight_qparams):
276
+ return _ConvTransposeNd.from_float(cls, float_conv, weight_qparams)
277
+
278
+ class ConvTranspose3d(_ConvTransposeNd, nn.ConvTranspose3d):
279
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
280
+ padding=0, output_padding=0,
281
+ groups=1, bias=True, dilation=1,
282
+ padding_mode="zeros",
283
+ device=None,
284
+ dtype=None,
285
+ weight_qparams: Optional[Dict[str, Any]] = None):
286
+ nn.ConvTranspose3d.__init__(
287
+ self, in_channels, out_channels, kernel_size, stride, padding, output_padding,
288
+ groups, bias, dilation, padding_mode, device, dtype)
289
+ self._init_weight_qparams(weight_qparams, device)
290
+
291
+ def forward(self, x: torch.Tensor, output_size: Optional[List[int]] = None) -> torch.Tensor:
292
+ """
293
+ we have:
294
+ w(float) -- quant - dequant \
295
+ x(float) ------------- F.convTranspose3d ---
296
+ In the full model, we will see
297
+ w(float) -- quant - *dequant \
298
+ x -- quant --- *dequant -- *F.convTranspose3d --- *quant - dequant
299
+ and the backend should be able to fuse the ops with `*` into a quantized conv3d
300
+ """
301
+
302
+ assert isinstance(self.padding, tuple)
303
+ # One cannot replace List by Tuple or Sequence in "_output_padding" because
304
+ # TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
305
+ output_padding = self._output_padding(
306
+ input, output_size, self.stride, self.padding, self.kernel_size, self.dilation) # type: ignore[arg-type]
307
+
308
+ weight_quant_dequant = self.get_weight()
309
+ result = F.conv_transpose3d(
310
+ x, weight_quant_dequant, self.bias, self.stride,
311
+ self.padding, output_padding, self.groups, self.dilation)
312
+ return result
313
+
314
+ def _get_name(self):
315
+ return "QuantizedConvTranspose3d(Reference)"
316
+
317
+ @classmethod
318
+ def from_float(cls, float_conv, weight_qparams):
319
+ return _ConvTransposeNd.from_float(cls, float_conv, weight_qparams)
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/rnn.py ADDED
@@ -0,0 +1,615 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ import torch.nn as nn
4
+ from torch import Tensor
5
+ from .utils import _quantize_and_dequantize_weight
6
+ from .utils import _quantize_weight
7
+ from typing import Optional, Dict, Any, Tuple
8
+ from torch import _VF
9
+ from torch.nn.utils.rnn import PackedSequence
10
+
11
+ __all__ = ['RNNCellBase', 'RNNCell', 'LSTMCell', 'GRUCell', 'RNNBase', 'LSTM', 'GRU', 'get_quantized_weight']
12
+
13
+ def _apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor:
14
+ return tensor.index_select(dim, permutation)
15
+
16
+ def _get_weight_and_quantization_params(module, wn):
17
+ weight = getattr(module, wn)
18
+ params = [weight]
19
+ for param_name in [wn + n for n in ["_qscheme", "_dtype", "_scale", "_zero_point", "_axis_int"]]:
20
+ if hasattr(module, param_name):
21
+ param = getattr(module, param_name)
22
+ else:
23
+ param = None
24
+ params.append(param)
25
+ return params
26
+
27
+ def get_quantized_weight(module, wn):
28
+ if not hasattr(module, wn):
29
+ return None
30
+ params = _get_weight_and_quantization_params(module, wn)
31
+ weight = _quantize_weight(*params)
32
+ return weight
33
+
34
+ def _get_quantize_and_dequantized_weight(module, wn):
35
+ if not hasattr(module, wn):
36
+ return None
37
+ params = _get_weight_and_quantization_params(module, wn)
38
+ weight = _quantize_and_dequantize_weight(*params)
39
+ return weight
40
+
41
+ class RNNCellBase(nn.RNNCellBase):
42
+ def __init__(self, input_size: int, hidden_size: int, bias: bool, num_chunks: int,
43
+ device=None, dtype=None, weight_qparams_dict=None) -> None:
44
+ super().__init__(input_size, hidden_size, bias, num_chunks, device=device, dtype=dtype)
45
+ # TODO(jerryzh168): maybe make this arg a required arg
46
+ if weight_qparams_dict is None:
47
+ weight_qparams = {
48
+ "qscheme": torch.per_tensor_affine,
49
+ "dtype": torch.quint8,
50
+ "scale": 1.0,
51
+ "zero_point": 0
52
+ }
53
+ weight_qparams_dict = {
54
+ "weight_ih": weight_qparams,
55
+ "weight_hh": weight_qparams,
56
+ "is_decomposed": False,
57
+ }
58
+ assert len(weight_qparams_dict) == 3, "Expected length for weight_qparams_dict to be 3 for QuantizedRNNCellBase(Reference)"
59
+ self._init_weight_qparams_dict(weight_qparams_dict, device)
60
+
61
+ def _init_weight_qparams_dict(self, weight_qparams_dict, device):
62
+ assert weight_qparams_dict is not None
63
+ self.is_decomposed = weight_qparams_dict["is_decomposed"]
64
+ for key, weight_qparams in weight_qparams_dict.items():
65
+ if key == "is_decomposed":
66
+ continue
67
+ # TODO: refactor the duplicated code to utils.py
68
+ weight_qscheme = weight_qparams["qscheme"]
69
+ weight_dtype = weight_qparams["dtype"]
70
+ setattr(self, key + "_qscheme", weight_qscheme)
71
+ setattr(self, key + "_dtype", weight_dtype)
72
+ assert weight_qscheme in [None, torch.per_tensor_affine, torch.per_channel_affine], \
73
+ Exception(f"qscheme: {weight_qscheme} is not support in {self._get_name()}")
74
+ if weight_qscheme is not None:
75
+ scale = weight_qparams["scale"]
76
+ scale_tensor = scale.clone().detach() \
77
+ if isinstance(scale, torch.Tensor) else \
78
+ torch.tensor(scale, dtype=torch.float, device=device)
79
+ self.register_buffer(key + "_scale", scale_tensor)
80
+ zp = weight_qparams["zero_point"]
81
+ zp_tensor = zp.clone().detach() \
82
+ if isinstance(zp, torch.Tensor) else \
83
+ torch.tensor(zp, dtype=torch.int, device=device)
84
+ self.register_buffer(key + "_zero_point", zp_tensor)
85
+ if weight_qscheme == torch.per_channel_affine:
86
+ axis = weight_qparams["axis"]
87
+ axis_tensor = axis.clone().detach() \
88
+ if isinstance(axis, torch.Tensor) else \
89
+ torch.tensor(axis, dtype=torch.int, device=device)
90
+ self.register_buffer(key + "_axis", axis_tensor)
91
+ else:
92
+ # added for TorchScriptability, not used
93
+ self.register_buffer(
94
+ key + "_axis", torch.tensor(0, dtype=torch.int, device=device))
95
+ setattr(self, key + "_axis_int", getattr(self, key + "_axis").item())
96
+
97
+ def _get_name(self):
98
+ return "QuantizedRNNCellBase(Reference)"
99
+
100
+ def get_quantized_weight_ih(self):
101
+ return get_quantized_weight(self, "weight_ih")
102
+
103
+ def get_quantized_weight_hh(self):
104
+ return get_quantized_weight(self, "weight_hh")
105
+
106
+ def get_weight_ih(self):
107
+ return _get_quantize_and_dequantized_weight(self, "weight_ih")
108
+
109
+ def get_weight_hh(self):
110
+ return _get_quantize_and_dequantized_weight(self, "weight_hh")
111
+
112
+ class RNNCell(RNNCellBase):
113
+ """
114
+ We'll store weight_qparams for all the weights (weight_ih and weight_hh),
115
+ we need to pass in a `weight_qparams_dict` that maps from weight name,
116
+ e.g. weight_ih, to the weight_qparams for that weight
117
+ """
118
+ def __init__(self, input_size: int, hidden_size: int, bias: bool = True, nonlinearity: str = "tanh",
119
+ device=None, dtype=None, weight_qparams_dict: Optional[Dict[str, Any]] = None) -> None:
120
+ factory_kwargs = {'device': device, 'dtype': dtype, 'weight_qparams_dict': weight_qparams_dict}
121
+ super().__init__(input_size, hidden_size, bias, num_chunks=1, **factory_kwargs)
122
+ self.nonlinearity = nonlinearity
123
+
124
+ def _get_name(self):
125
+ return "QuantizedRNNCell(Reference)"
126
+
127
+ # TODO: refactor nn.RNNCell to have a _forward that takes weight_ih and weight_hh as input
128
+ # and remove duplicated code, same for the other two Cell modules
129
+ def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
130
+ assert input.dim() in (1, 2), \
131
+ f"RNNCell: Expected input to be 1-D or 2-D but received {input.dim()}-D tensor"
132
+ is_batched = input.dim() == 2
133
+ if not is_batched:
134
+ input = input.unsqueeze(0)
135
+
136
+ if hx is None:
137
+ hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
138
+ else:
139
+ hx = hx.unsqueeze(0) if not is_batched else hx
140
+
141
+ if self.nonlinearity == "tanh":
142
+ ret = _VF.rnn_tanh_cell(
143
+ input, hx,
144
+ self.get_weight_ih(), self.get_weight_hh(),
145
+ self.bias_ih, self.bias_hh,
146
+ )
147
+ elif self.nonlinearity == "relu":
148
+ ret = _VF.rnn_relu_cell(
149
+ input, hx,
150
+ self.get_weight_ih(), self.get_weight_hh(),
151
+ self.bias_ih, self.bias_hh,
152
+ )
153
+ else:
154
+ ret = input # TODO: remove when jit supports exception flow
155
+ raise RuntimeError(
156
+ f"Unknown nonlinearity: {self.nonlinearity}")
157
+
158
+ if not is_batched:
159
+ ret = ret.squeeze(0)
160
+
161
+ return ret
162
+
163
+ @classmethod
164
+ def from_float(cls, mod, weight_qparams_dict):
165
+ ref_mod = cls(
166
+ mod.input_size,
167
+ mod.hidden_size,
168
+ mod.bias,
169
+ mod.nonlinearity,
170
+ mod.weight_ih.device,
171
+ mod.weight_ih.dtype,
172
+ weight_qparams_dict)
173
+ ref_mod.weight_ih = mod.weight_ih
174
+ ref_mod.weight_hh = mod.weight_hh
175
+ ref_mod.bias_ih = mod.bias_ih
176
+ ref_mod.bias_hh = mod.bias_hh
177
+ return ref_mod
178
+
179
+ class LSTMCell(RNNCellBase):
180
+ """
181
+ We'll store weight_qparams for all the weights (weight_ih and weight_hh),
182
+ we need to pass in a `weight_qparams_dict` that maps from weight name,
183
+ e.g. weight_ih, to the weight_qparams for that weight
184
+ """
185
+ def __init__(self, input_size: int, hidden_size: int, bias: bool = True,
186
+ device=None, dtype=None, weight_qparams_dict: Optional[Dict[str, Any]] = None) -> None:
187
+ factory_kwargs = {'device': device, 'dtype': dtype, 'weight_qparams_dict': weight_qparams_dict}
188
+ super().__init__(input_size, hidden_size, bias, num_chunks=4, **factory_kwargs)
189
+
190
+ def _get_name(self):
191
+ return "QuantizedLSTMCell(Reference)"
192
+
193
+ def forward(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]:
194
+ assert input.dim() in (1, 2), \
195
+ f"LSTMCell: Expected input to be 1-D or 2-D but received {input.dim()}-D tensor"
196
+ is_batched = input.dim() == 2
197
+ if not is_batched:
198
+ input = input.unsqueeze(0)
199
+
200
+ if hx is None:
201
+ zeros = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
202
+ hx = (zeros, zeros)
203
+ else:
204
+ hx = (hx[0].unsqueeze(0), hx[1].unsqueeze(0)) if not is_batched else hx
205
+
206
+ ret = _VF.lstm_cell(
207
+ input, hx,
208
+ self.get_weight_ih(), self.get_weight_hh(),
209
+ self.bias_ih, self.bias_hh,
210
+ )
211
+
212
+ if not is_batched:
213
+ ret = (ret[0].squeeze(0), ret[1].squeeze(0))
214
+ return ret
215
+
216
+ @classmethod
217
+ def from_float(cls, mod, weight_qparams_dict, use_precomputed_fake_quant=False):
218
+ ref_mod = cls(
219
+ mod.input_size,
220
+ mod.hidden_size,
221
+ mod.bias,
222
+ mod.weight_ih.device,
223
+ mod.weight_ih.dtype,
224
+ weight_qparams_dict)
225
+ ref_mod.weight_ih = mod.weight_ih
226
+ ref_mod.weight_hh = mod.weight_hh
227
+ ref_mod.bias_ih = mod.bias_ih
228
+ ref_mod.bias_hh = mod.bias_hh
229
+ return ref_mod
230
+
231
+ class GRUCell(RNNCellBase):
232
+ """
233
+ We'll store weight_qparams for all the weights (weight_ih and weight_hh),
234
+ we need to pass in a `weight_qparams_dict` that maps from weight name,
235
+ e.g. weight_ih, to the weight_qparams for that weight
236
+ """
237
+ def __init__(self, input_size: int, hidden_size: int, bias: bool = True,
238
+ device=None, dtype=None, weight_qparams_dict: Optional[Dict[str, Any]] = None) -> None:
239
+ factory_kwargs = {'device': device, 'dtype': dtype, 'weight_qparams_dict': weight_qparams_dict}
240
+ super().__init__(input_size, hidden_size, bias, num_chunks=3, **factory_kwargs)
241
+
242
+ def _get_name(self):
243
+ return "QuantizedGRUCell(Reference)"
244
+
245
+ def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
246
+ assert input.dim() in (1, 2), \
247
+ f"GRUCell: Expected input to be 1-D or 2-D but received {input.dim()}-D tensor"
248
+ is_batched = input.dim() == 2
249
+ if not is_batched:
250
+ input = input.unsqueeze(0)
251
+
252
+ if hx is None:
253
+ hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
254
+ else:
255
+ hx = hx.unsqueeze(0) if not is_batched else hx
256
+
257
+ ret = _VF.gru_cell(
258
+ input, hx,
259
+ self.get_weight_ih(), self.get_weight_hh(),
260
+ self.bias_ih, self.bias_hh,
261
+ )
262
+
263
+ if not is_batched:
264
+ ret = ret.squeeze(0)
265
+
266
+ return ret
267
+
268
+ @classmethod
269
+ def from_float(cls, mod, weight_qparams_dict):
270
+ ref_mod = cls(
271
+ mod.input_size,
272
+ mod.hidden_size,
273
+ mod.bias,
274
+ mod.weight_ih.device,
275
+ mod.weight_ih.dtype,
276
+ weight_qparams_dict)
277
+ ref_mod.weight_ih = mod.weight_ih
278
+ ref_mod.weight_hh = mod.weight_hh
279
+ ref_mod.bias_ih = mod.bias_ih
280
+ ref_mod.bias_hh = mod.bias_hh
281
+ return ref_mod
282
+
283
+ class RNNBase(nn.RNNBase):
284
+ def __init__(self, mode: str, input_size: int, hidden_size: int,
285
+ num_layers: int = 1, bias: bool = True, batch_first: bool = False,
286
+ dropout: float = 0., bidirectional: bool = False, proj_size: int = 0,
287
+ device=None, dtype=None,
288
+ weight_qparams_dict: Optional[Dict[str, Any]] = None) -> None:
289
+ super().__init__(
290
+ mode, input_size, hidden_size, num_layers, bias, batch_first, dropout,
291
+ bidirectional, proj_size, device, dtype
292
+ )
293
+ # TODO(jerryzh168): maybe make this arg a required arg
294
+ if weight_qparams_dict is None:
295
+ weight_qparams = {
296
+ 'qscheme': torch.per_tensor_affine,
297
+ 'dtype': torch.quint8,
298
+ 'scale': 1.0,
299
+ 'zero_point': 0
300
+ }
301
+ weight_qparams_dict = {"is_decomposed": False} # type: ignore[dict-item]
302
+ for wn in self._flat_weights_names:
303
+ if wn.startswith("weight"):
304
+ weight_qparams_dict[wn] = weight_qparams
305
+ self._init_weight_qparams_dict(weight_qparams_dict, device)
306
+
307
+ def _init_weight_qparams_dict(self, weight_qparams_dict, device):
308
+ self.is_decomposed = weight_qparams_dict["is_decomposed"]
309
+ for key, weight_qparams in weight_qparams_dict.items():
310
+ if key == "is_decomposed":
311
+ continue
312
+ weight_qscheme = weight_qparams["qscheme"]
313
+ weight_dtype = weight_qparams["dtype"]
314
+ setattr(self, key + "_qscheme", weight_qscheme)
315
+ setattr(self, key + "_dtype", weight_dtype)
316
+ assert weight_qscheme in [None, torch.per_tensor_affine, torch.per_channel_affine], \
317
+ Exception(f"qscheme: {weight_qscheme} is not support in {self._get_name()}")
318
+ if weight_qscheme is not None:
319
+ self.register_buffer(
320
+ key + "_scale",
321
+ torch.tensor(weight_qparams["scale"], dtype=torch.float, device=device))
322
+ self.register_buffer(
323
+ key + "_zero_point",
324
+ torch.tensor(weight_qparams["zero_point"], dtype=torch.int, device=device))
325
+ if weight_qscheme == torch.per_channel_affine:
326
+ self.register_buffer(
327
+ key + "_axis",
328
+ torch.tensor(weight_qparams["axis"], dtype=torch.int, device=device))
329
+ else:
330
+ # added for TorchScriptability, not used
331
+ self.register_buffer(
332
+ key + "_axis", torch.tensor(0, dtype=torch.int, device=device))
333
+ setattr(self, key + "_axis_int", getattr(self, key + "_axis").item())
334
+
335
+ class LSTM(RNNBase):
336
+ """ Reference Quantized LSTM Module
337
+ We'll store weight_qparams for all the weights in _flat_weights, we need to pass in
338
+ a `weight_qparams_dict` that maps from weight name, e.g. weight_ih_l0,
339
+ to the weight_qparams for that weight
340
+ """
341
+ def __init__(self, *args, **kwargs):
342
+ super().__init__('LSTM', *args, **kwargs)
343
+
344
+ # Same as above, see torch/nn/modules/module.py::_forward_unimplemented
345
+ def permute_hidden(self, # type: ignore[override]
346
+ hx: Tuple[Tensor, Tensor],
347
+ permutation: Optional[Tensor]
348
+ ) -> Tuple[Tensor, Tensor]:
349
+ if permutation is None:
350
+ return hx
351
+ return _apply_permutation(hx[0], permutation), _apply_permutation(hx[1], permutation)
352
+
353
+ def get_expected_cell_size(self, input: Tensor, batch_sizes: Optional[Tensor]) -> Tuple[int, int, int]:
354
+ if batch_sizes is not None:
355
+ mini_batch = int(batch_sizes[0])
356
+ else:
357
+ mini_batch = input.size(0) if self.batch_first else input.size(1)
358
+ num_directions = 2 if self.bidirectional else 1
359
+ expected_hidden_size = (self.num_layers * num_directions,
360
+ mini_batch, self.hidden_size)
361
+ return expected_hidden_size
362
+
363
+ # In the future, we should prevent mypy from applying contravariance rules here.
364
+ # See torch/nn/modules/module.py::_forward_unimplemented
365
+ def check_forward_args(self, # type: ignore[override]
366
+ input: Tensor,
367
+ hidden: Tuple[Tensor, Tensor],
368
+ batch_sizes: Optional[Tensor],
369
+ ):
370
+ self.check_input(input, batch_sizes)
371
+ self.check_hidden_size(hidden[0], self.get_expected_hidden_size(input, batch_sizes),
372
+ 'Expected hidden[0] size {}, got {}')
373
+ self.check_hidden_size(hidden[1], self.get_expected_cell_size(input, batch_sizes),
374
+ 'Expected hidden[1] size {}, got {}')
375
+
376
+ def get_quantized_weight_bias_dict(self):
377
+ """ dictionary from flat_weight_name to quantized weight or (unquantized) bias
378
+ e.g.
379
+ {
380
+ "weight_ih_l0": quantized_weight,
381
+ "bias_ih_l0": unquantized_bias,
382
+ ...
383
+ }
384
+ """
385
+ quantized_weight_bias_dict = {}
386
+ for wn in self._flat_weights_names:
387
+ if hasattr(self, wn):
388
+ if wn.startswith("weight"):
389
+ weight_or_bias = get_quantized_weight(self, wn)
390
+ else:
391
+ weight_or_bias = getattr(self, wn)
392
+ else:
393
+ weight_or_bias = None
394
+ quantized_weight_bias_dict[wn] = weight_or_bias
395
+ return quantized_weight_bias_dict
396
+
397
+ def get_flat_weights(self):
398
+ flat_weights = []
399
+ for wn in self._flat_weights_names:
400
+ if hasattr(self, wn):
401
+ weight = getattr(self, wn)
402
+ if wn.startswith("weight"):
403
+ params = _get_weight_and_quantization_params(self, wn)
404
+ weight = _quantize_and_dequantize_weight(*params)
405
+ else:
406
+ weight = None
407
+ flat_weights.append(weight)
408
+ return flat_weights
409
+
410
+ def forward(self, input, hx=None): # noqa: F811
411
+ orig_input = input
412
+ # xxx: isinstance check needs to be in conditional for TorchScript to compile
413
+ batch_sizes = None
414
+ if isinstance(orig_input, PackedSequence):
415
+ input, batch_sizes, sorted_indices, unsorted_indices = input
416
+ max_batch_size = int(batch_sizes[0])
417
+ else:
418
+ batch_sizes = None
419
+ is_batched = input.dim() == 3
420
+ batch_dim = 0 if self.batch_first else 1
421
+ if not is_batched:
422
+ input = input.unsqueeze(batch_dim)
423
+ max_batch_size = input.size(0) if self.batch_first else input.size(1)
424
+ sorted_indices = None
425
+ unsorted_indices = None
426
+
427
+ if hx is None:
428
+ num_directions = 2 if self.bidirectional else 1
429
+ real_hidden_size = self.proj_size if self.proj_size > 0 else self.hidden_size
430
+ h_zeros = torch.zeros(self.num_layers * num_directions,
431
+ max_batch_size, real_hidden_size,
432
+ dtype=input.dtype, device=input.device)
433
+ c_zeros = torch.zeros(self.num_layers * num_directions,
434
+ max_batch_size, self.hidden_size,
435
+ dtype=input.dtype, device=input.device)
436
+ hx = (h_zeros, c_zeros)
437
+ else:
438
+ if batch_sizes is None: # If not PackedSequence input.
439
+ if is_batched: # type: ignore[possibly-undefined]
440
+ if (hx[0].dim() != 3 or hx[1].dim() != 3):
441
+ msg = ("For batched 3-D input, hx and cx should "
442
+ f"also be 3-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors")
443
+ raise RuntimeError(msg)
444
+ else:
445
+ if hx[0].dim() != 2 or hx[1].dim() != 2:
446
+ msg = ("For unbatched 2-D input, hx and cx should "
447
+ f"also be 2-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors")
448
+ raise RuntimeError(msg)
449
+ hx = (hx[0].unsqueeze(1), hx[1].unsqueeze(1))
450
+
451
+ # Each batch of the hidden state should match the input sequence that
452
+ # the user believes he/she is passing in.
453
+ hx = self.permute_hidden(hx, sorted_indices)
454
+
455
+ self.check_forward_args(input, hx, batch_sizes)
456
+ if batch_sizes is None:
457
+ result = _VF.lstm(input, hx, self.get_flat_weights(), self.bias, self.num_layers,
458
+ self.dropout, self.training, self.bidirectional, self.batch_first)
459
+ else:
460
+ result = _VF.lstm(input, batch_sizes, hx, self.get_flat_weights(), self.bias,
461
+ self.num_layers, self.dropout, self.training, self.bidirectional)
462
+ output = result[0]
463
+ hidden = result[1:]
464
+ # xxx: isinstance check needs to be in conditional for TorchScript to compile
465
+ if isinstance(orig_input, PackedSequence):
466
+ output_packed = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices)
467
+ return output_packed, self.permute_hidden(hidden, unsorted_indices)
468
+ else:
469
+ if not is_batched: # type: ignore[possibly-undefined]
470
+ output = output.squeeze(batch_dim) # type: ignore[possibly-undefined]
471
+ hidden = (hidden[0].squeeze(1), hidden[1].squeeze(1))
472
+ return output, self.permute_hidden(hidden, unsorted_indices)
473
+
474
+ def _get_name(self):
475
+ return "QuantizedLSTM(Reference)"
476
+
477
+ @classmethod
478
+ def from_float(cls, mod, weight_qparams_dict):
479
+ ref_mod = cls(
480
+ mod.input_size,
481
+ mod.hidden_size,
482
+ mod.num_layers,
483
+ mod.bias,
484
+ mod.batch_first,
485
+ mod.dropout,
486
+ mod.bidirectional,
487
+ weight_qparams_dict=weight_qparams_dict)
488
+ for wn in mod._flat_weights_names:
489
+ setattr(ref_mod, wn, getattr(mod, wn))
490
+ return ref_mod
491
+
492
+ class GRU(RNNBase):
493
+ """ Reference Quantized GRU Module
494
+ We'll store weight_qparams for all the weights in _flat_weights, we need to pass in
495
+ a `weight_qparams_dict` that maps from weight name, e.g. weight_ih_l0,
496
+ to the weight_qparams for that weight
497
+ """
498
+ def __init__(self, *args, **kwargs):
499
+ if 'proj_size' in kwargs:
500
+ raise ValueError("proj_size argument is only supported for LSTM, not RNN or GRU")
501
+ super().__init__('GRU', *args, **kwargs)
502
+
503
+ def get_quantized_weight_bias_dict(self):
504
+ """ dictionary from flat_weight_name to quantized weight or (unquantized) bias
505
+ e.g.
506
+ {
507
+ "weight_ih_l0": quantized_weight,
508
+ "bias_ih_l0": unquantized_bias,
509
+ ...
510
+ }
511
+ """
512
+ quantized_weight_bias_dict = {}
513
+ for wn in self._flat_weights_names:
514
+ if hasattr(self, wn):
515
+ if wn.startswith("weight"):
516
+ weight_or_bias = get_quantized_weight(self, wn)
517
+ else:
518
+ weight_or_bias = getattr(self, wn)
519
+ else:
520
+ weight_or_bias = None
521
+ quantized_weight_bias_dict[wn] = weight_or_bias
522
+ return quantized_weight_bias_dict
523
+
524
+ def get_flat_weights(self):
525
+ flat_weights = []
526
+ for wn in self._flat_weights_names:
527
+ if hasattr(self, wn):
528
+ weight = getattr(self, wn)
529
+ if wn.startswith("weight"):
530
+ params = _get_weight_and_quantization_params(self, wn)
531
+ weight = _quantize_and_dequantize_weight(*params)
532
+ else:
533
+ weight = None
534
+ flat_weights.append(weight)
535
+ return flat_weights
536
+
537
+ def forward(self, input, hx=None): # noqa: F811
538
+ # Note: this is copied from the forward of GRU in https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/rnn.py
539
+ # only changed self._flat_weights to self.get_flat_weights()
540
+ # TODO: maybe we can try inheriting from that class and define get_flat_weights
541
+ # as a @property? this might interfere with TorchScript, if we remove that
542
+ # requirement in the future we should be able to do this
543
+ orig_input = input
544
+ # xxx: isinstance check needs to be in conditional for TorchScript to compile
545
+ if isinstance(orig_input, PackedSequence):
546
+ input, batch_sizes, sorted_indices, unsorted_indices = input
547
+ max_batch_size = int(batch_sizes[0])
548
+ else:
549
+ batch_sizes = None
550
+ assert (input.dim() in (2, 3)), f"GRU: Expected input to be 2-D or 3-D but received {input.dim()}-D tensor"
551
+ is_batched = input.dim() == 3
552
+ batch_dim = 0 if self.batch_first else 1
553
+ if not is_batched:
554
+ input = input.unsqueeze(batch_dim)
555
+ if hx is not None:
556
+ if hx.dim() != 2:
557
+ raise RuntimeError(
558
+ f"For unbatched 2-D input, hx should also be 2-D but got {hx.dim()}-D tensor")
559
+ hx = hx.unsqueeze(1)
560
+ else:
561
+ if hx is not None and hx.dim() != 3:
562
+ raise RuntimeError(
563
+ f"For batched 3-D input, hx should also be 3-D but got {hx.dim()}-D tensor")
564
+ max_batch_size = input.size(0) if self.batch_first else input.size(1)
565
+ sorted_indices = None
566
+ unsorted_indices = None
567
+
568
+ if hx is None:
569
+ num_directions = 2 if self.bidirectional else 1
570
+ hx = torch.zeros(self.num_layers * num_directions,
571
+ max_batch_size, self.hidden_size,
572
+ dtype=input.dtype, device=input.device)
573
+ else:
574
+ # Each batch of the hidden state should match the input sequence that
575
+ # the user believes he/she is passing in.
576
+ hx = self.permute_hidden(hx, sorted_indices)
577
+
578
+ self.check_forward_args(input, hx, batch_sizes)
579
+ if batch_sizes is None:
580
+ result = _VF.gru(input, hx, self.get_flat_weights(), self.bias, self.num_layers,
581
+ self.dropout, self.training, self.bidirectional, self.batch_first)
582
+ else:
583
+ result = _VF.gru(input, batch_sizes, hx, self.get_flat_weights(), self.bias,
584
+ self.num_layers, self.dropout, self.training, self.bidirectional)
585
+ output = result[0]
586
+ hidden = result[1]
587
+
588
+ # xxx: isinstance check needs to be in conditional for TorchScript to compile
589
+ if isinstance(orig_input, PackedSequence):
590
+ output_packed = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices)
591
+ return output_packed, self.permute_hidden(hidden, unsorted_indices)
592
+ else:
593
+ if not is_batched: # type: ignore[possibly-undefined]
594
+ output = output.squeeze(batch_dim) # type: ignore[possibly-undefined]
595
+ hidden = hidden.squeeze(1)
596
+
597
+ return output, self.permute_hidden(hidden, unsorted_indices)
598
+
599
+ def _get_name(self):
600
+ return "QuantizedGRU(Reference)"
601
+
602
+ @classmethod
603
+ def from_float(cls, mod, weight_qparams_dict):
604
+ ref_mod = cls(
605
+ mod.input_size,
606
+ mod.hidden_size,
607
+ mod.num_layers,
608
+ mod.bias,
609
+ mod.batch_first,
610
+ mod.dropout,
611
+ mod.bidirectional,
612
+ weight_qparams_dict=weight_qparams_dict)
613
+ for wn in mod._flat_weights_names:
614
+ setattr(ref_mod, wn, getattr(mod, wn))
615
+ return ref_mod
parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/sparse.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from torch import Tensor
5
+ from .utils import ReferenceQuantizedModule
6
+ from typing import Optional, Dict, Any
7
+
8
+ __all__ = ['Embedding', 'EmbeddingBag']
9
+
10
+ class Embedding(nn.Embedding, ReferenceQuantizedModule):
11
+ """ A reference quantized Embedding module that fits into the
12
+ FX Graph Mode Quantization workflow, activation will be floating point Tensor,
13
+ we will store floating point weight as well in the module, but in forward we'll
14
+ quantize and dequantize the weight before running the floating point functional
15
+ embedding operator.
16
+ """
17
+ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None,
18
+ max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
19
+ sparse: bool = False, _weight: Optional[Tensor] = None,
20
+ device=None, dtype=None,
21
+ weight_qparams: Optional[Dict[str, Any]] = None) -> None:
22
+ super().__init__(num_embeddings, embedding_dim, padding_idx, max_norm,
23
+ norm_type, scale_grad_by_freq, sparse, _weight, device, dtype)
24
+ self._init_weight_qparams(weight_qparams, device)
25
+
26
+ def _get_name(self):
27
+ return "QuantizedEmbedding(Reference)"
28
+
29
+ def forward(self, input: Tensor) -> Tensor:
30
+ weight_quant_dequant = self.get_weight()
31
+ return F.embedding(
32
+ input, weight_quant_dequant, self.padding_idx, self.max_norm,
33
+ self.norm_type, self.scale_grad_by_freq, self.sparse)
34
+
35
+ @classmethod
36
+ def from_float(cls, mod, weight_qparams):
37
+ return cls(
38
+ mod.num_embeddings,
39
+ mod.embedding_dim,
40
+ mod.padding_idx,
41
+ mod.max_norm,
42
+ mod.norm_type,
43
+ mod.scale_grad_by_freq,
44
+ mod.sparse,
45
+ mod.weight,
46
+ mod.weight.device,
47
+ mod.weight.dtype,
48
+ weight_qparams)
49
+
50
+ class EmbeddingBag(nn.EmbeddingBag, ReferenceQuantizedModule):
51
+ """ A reference quantized EmbeddingBag module that fits into the
52
+ FX Graph Mode Quantization workflow, activation will be floating point Tensor,
53
+ we will store floating point weight as well in the module, but in forward we'll
54
+ quantize and dequantize the weight before running the floating point functional
55
+ embedding operator.
56
+ """
57
+ def __init__(self, num_embeddings: int, embedding_dim: int,
58
+ max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
59
+ mode: str = 'mean', sparse: bool = False, _weight: Optional[Tensor] = None,
60
+ include_last_offset: bool = False, padding_idx: Optional[int] = None,
61
+ device=None, dtype=None,
62
+ weight_qparams: Optional[Dict[str, Any]] = None) -> None:
63
+ super().__init__(num_embeddings, embedding_dim, max_norm, norm_type,
64
+ scale_grad_by_freq, mode, sparse, _weight, include_last_offset,
65
+ padding_idx, device, dtype)
66
+ self._init_weight_qparams(weight_qparams, device)
67
+
68
+ def _get_name(self):
69
+ return "QuantizedEmbedding(Reference)"
70
+
71
+ def forward(self, input: Tensor, offsets: Optional[Tensor] = None, per_sample_weights: Optional[Tensor] = None) -> Tensor:
72
+ weight_quant_dequant = self.get_weight()
73
+ return F.embedding_bag(input, weight_quant_dequant, offsets,
74
+ self.max_norm, self.norm_type,
75
+ self.scale_grad_by_freq, self.mode, self.sparse,
76
+ per_sample_weights, self.include_last_offset,
77
+ self.padding_idx)
78
+
79
+ @classmethod
80
+ def from_float(cls, mod, weight_qparams, use_precomputed_fake_quant=False):
81
+ return cls(
82
+ mod.num_embeddings,
83
+ mod.embedding_dim,
84
+ mod.max_norm,
85
+ mod.norm_type,
86
+ mod.scale_grad_by_freq,
87
+ mod.mode,
88
+ mod.sparse,
89
+ mod.weight,
90
+ mod.include_last_offset,
91
+ mod.padding_idx,
92
+ mod.weight.device,
93
+ mod.weight.dtype,
94
+ weight_qparams
95
+ )
parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from . import quantized
parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (204 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.ao.nn.sparse.quantized import dynamic
2
+
3
+ from .linear import Linear
4
+ from .linear import LinearPackedParams
5
+
6
+ __all__ = [
7
+ "dynamic",
8
+ "Linear",
9
+ "LinearPackedParams",
10
+ ]
parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (359 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__pycache__/linear.cpython-310.pyc ADDED
Binary file (7.52 kB). View file