diff --git a/.gitattributes b/.gitattributes index 5ab3d049bf882ec5334e7176f980c36f3d72346b..841dca48e128949e243e3ec82d2b8550d5980741 100644 --- a/.gitattributes +++ b/.gitattributes @@ -877,3 +877,7 @@ videochat2/lib/python3.10/site-packages/tensorflow/python/distribute/__pycache__ videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/math_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_math_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_array_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/sparse_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_training_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/nn_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/llava_next/share/terminfo/m/mach-gnu-color b/llava_next/share/terminfo/m/mach-gnu-color new file mode 100644 index 0000000000000000000000000000000000000000..b4a03898d01de3f3e4ce7823fe7abae8d041e4f5 Binary files /dev/null and b/llava_next/share/terminfo/m/mach-gnu-color differ diff --git a/llava_next/share/terminfo/m/mgr-sun b/llava_next/share/terminfo/m/mgr-sun new file mode 100644 index 0000000000000000000000000000000000000000..370a79e75071b3e49b94e2874b0a77331af50bcf Binary files /dev/null and b/llava_next/share/terminfo/m/mgr-sun differ diff --git a/llava_next/share/terminfo/m/microterm b/llava_next/share/terminfo/m/microterm new file mode 100644 index 0000000000000000000000000000000000000000..b24bd133a271eed05f4a8c8ceab2a9c462bdc8f3 Binary files /dev/null and b/llava_next/share/terminfo/m/microterm differ diff --git a/llava_next/share/terminfo/m/minix b/llava_next/share/terminfo/m/minix new file mode 100644 index 0000000000000000000000000000000000000000..ce75ec397c6dc44e1190188efa74782f2f5bcb05 Binary files /dev/null and b/llava_next/share/terminfo/m/minix differ diff --git a/llava_next/share/terminfo/m/minix-1.5 b/llava_next/share/terminfo/m/minix-1.5 new file mode 100644 index 0000000000000000000000000000000000000000..8c8d4db5c81339b3edeeca5b9e3658b531ca1c76 Binary files /dev/null and b/llava_next/share/terminfo/m/minix-1.5 differ diff --git a/llava_next/share/terminfo/m/mintty-direct b/llava_next/share/terminfo/m/mintty-direct new file mode 100644 index 0000000000000000000000000000000000000000..5fcfbd20f86440942ef1a1712c602b3510537d1b Binary files /dev/null and b/llava_next/share/terminfo/m/mintty-direct differ diff --git a/llava_next/share/terminfo/m/mlterm-256color b/llava_next/share/terminfo/m/mlterm-256color new file mode 100644 index 0000000000000000000000000000000000000000..43066fd8964d6c004e835c784b5ba61c9bef5ead Binary files /dev/null and b/llava_next/share/terminfo/m/mlterm-256color differ diff --git a/llava_next/share/terminfo/m/mosh b/llava_next/share/terminfo/m/mosh new file mode 100644 index 0000000000000000000000000000000000000000..070ae4a06d51d78db171049a73ef6dec2cff4101 Binary files /dev/null and b/llava_next/share/terminfo/m/mosh differ diff --git a/llava_next/share/terminfo/m/ms-vt-utf8 b/llava_next/share/terminfo/m/ms-vt-utf8 new file mode 100644 index 0000000000000000000000000000000000000000..61a77521818a182ef667498bb7299fdb77684e45 Binary files /dev/null and b/llava_next/share/terminfo/m/ms-vt-utf8 differ diff --git a/llava_next/share/terminfo/m/mskermit22714 b/llava_next/share/terminfo/m/mskermit22714 new file mode 100644 index 0000000000000000000000000000000000000000..487bb61b24f206015877fcb0711289e09d56635a Binary files /dev/null and b/llava_next/share/terminfo/m/mskermit22714 differ diff --git a/llava_next/share/terminfo/m/mterm b/llava_next/share/terminfo/m/mterm new file mode 100644 index 0000000000000000000000000000000000000000..016fced44fa463052149fc92a9ca4f7e8acd3aaa Binary files /dev/null and b/llava_next/share/terminfo/m/mterm differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..376f359bcb6b117ed9dc4bf37e2692e756a764e3 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/linear_relu.py b/parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/linear_relu.py new file mode 100644 index 0000000000000000000000000000000000000000..b8bff1f5e3a9936ab823a0925eb046b028ff3ab5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/dynamic/modules/linear_relu.py @@ -0,0 +1,56 @@ +# mypy: allow-untyped-defs +import torch +import torch.ao.nn.quantized.dynamic as nnqd +import torch.ao.nn.intrinsic as nni + +__all__ = [ + "LinearReLU" +] + +class LinearReLU(nnqd.Linear): + r""" + A LinearReLU module fused from Linear and ReLU modules that can be used + for dynamic quantization. + Supports both, FP16 and INT8 quantization. + + We adopt the same interface as :class:`torch.ao.nn.quantized.dynamic.Linear`. + + Attributes: + Same as torch.ao.nn.quantized.dynamic.Linear + + Examples:: + + >>> # xdoctest: +SKIP + >>> m = nn.intrinsic.quantized.dynamic.LinearReLU(20, 30) + >>> input = torch.randn(128, 20) + >>> output = m(input) + >>> print(output.size()) + torch.Size([128, 30]) + """ + _FLOAT_MODULE = nni.LinearReLU # type: ignore[assignment] + + def __init__(self, in_features, out_features, bias=True, dtype=torch.qint8): + super().__init__(in_features, out_features, bias, dtype) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if self._packed_params.dtype == torch.qint8: + # TODO check if we should set reduce_rage = True by default here + Y = torch.ops.quantized.linear_relu_dynamic( + x, self._packed_params._packed_params, reduce_range=True) + elif self._packed_params.dtype == torch.float16: + Y = torch.ops.quantized.linear_relu_dynamic_fp16( + x, self._packed_params._packed_params) + else: + raise RuntimeError('Unsupported dtype on dynamic quantized linear relu!') + return Y.to(x.dtype) + + def _get_name(self): + return 'DynamicQuantizedLinearReLU' + + @classmethod + def from_float(cls, mod, use_precomputed_fake_quant=False): + return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant) + + @classmethod + def from_reference(cls, ref_qlinear_relu): + return super().from_reference(ref_qlinear_relu[0]) diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__init__.py b/parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..51149bff646cf2f729a958b608be4ca3c0639115 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__init__.py @@ -0,0 +1,17 @@ +from .linear_relu import LinearReLU, LinearLeakyReLU, LinearTanh +from .conv_relu import ConvReLU1d, ConvReLU2d, ConvReLU3d +from .bn_relu import BNReLU2d, BNReLU3d +from .conv_add import ConvAdd2d, ConvAddReLU2d + +__all__ = [ + 'LinearReLU', + 'ConvReLU1d', + 'ConvReLU2d', + 'ConvReLU3d', + 'BNReLU2d', + 'BNReLU3d', + 'LinearLeakyReLU', + 'LinearTanh', + 'ConvAdd2d', + 'ConvAddReLU2d', +] diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/linear_relu.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/linear_relu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ceb415974578d42a31d8b6c4c3863fc9633f49bc Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/__pycache__/linear_relu.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/conv_add.py b/parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/conv_add.py new file mode 100644 index 0000000000000000000000000000000000000000..e7df10597331c024908622ec004d041d81abe992 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/nn/intrinsic/quantized/modules/conv_add.py @@ -0,0 +1,94 @@ +# mypy: allow-untyped-defs +import torch +import torch.ao.nn.intrinsic +import torch.ao.nn.intrinsic.qat +import torch.nn.functional as F +import torch.ao.nn.quantized as nnq + +_reverse_repeat_padding = nnq.modules.conv._reverse_repeat_padding + +class ConvAdd2d(nnq.Conv2d): + r""" + A ConvAdd2d module is a fused module of Conv2d and Add + + We adopt the same interface as :class:`torch.ao.nn.quantized.Conv2d`. + + Attributes: + Same as torch.ao.nn.quantized.Conv2d + + """ + _FLOAT_MODULE = torch.ao.nn.intrinsic.ConvAdd2d # type: ignore[assignment] + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True, + padding_mode='zeros', device=None, dtype=None): + super().__init__( + in_channels, out_channels, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=groups, bias=bias, + padding_mode=padding_mode, device=device, dtype=dtype) + + def forward(self, input, extra_input): + # Temporarily using len(shape) instead of ndim due to JIT issue + # https://github.com/pytorch/pytorch/issues/23890 + if len(input.shape) != 4: + raise ValueError("Input shape must be `(N, C, H, W)`!") + if self.padding_mode != 'zeros': + _reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding) + input = F.pad(input, _reversed_padding_repeated_twice, + mode=self.padding_mode) + return torch.ops.quantized.conv2d_add( + input, extra_input, self._packed_params, self.scale, self.zero_point) + + def _get_name(self): + return 'QuantizedConvAdd2d' + + @classmethod + def from_float(cls, mod, use_precomputed_fake_quant=False): + return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant) + + @classmethod + def from_reference(cls, ref_qconv, output_scale, output_zero_point): + return super().from_reference(ref_qconv[0], output_scale, output_zero_point) + +class ConvAddReLU2d(nnq.Conv2d): + r""" + A ConvAddReLU2d module is a fused module of Conv2d, Add and Relu + + We adopt the same interface as :class:`torch.ao.nn.quantized.Conv2d`. + + Attributes: + Same as torch.ao.nn.quantized.Conv2d + + """ + _FLOAT_MODULE = torch.ao.nn.intrinsic.ConvAddReLU2d # type: ignore[assignment] + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True, + padding_mode='zeros', device=None, dtype=None): + super().__init__( + in_channels, out_channels, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=groups, bias=bias, + padding_mode=padding_mode, device=device, dtype=dtype) + + def forward(self, input, extra_input): + # Temporarily using len(shape) instead of ndim due to JIT issue + # https://github.com/pytorch/pytorch/issues/23890 + if len(input.shape) != 4: + raise ValueError("Input shape must be `(N, C, H, W)`!") + if self.padding_mode != 'zeros': + _reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding) + input = F.pad(input, _reversed_padding_repeated_twice, + mode=self.padding_mode) + return torch.ops.quantized.conv2d_add_relu( + input, extra_input, self._packed_params, self.scale, self.zero_point) + + def _get_name(self): + return 'QuantizedConvAddReLU2d' + + @classmethod + def from_float(cls, mod, use_precomputed_fake_quant=False): + return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant) + + @classmethod + def from_reference(cls, ref_qconv, output_scale, output_zero_point): + return super().from_reference(ref_qconv[0], output_scale, output_zero_point) diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/__init__.py b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3d79bdbfe83209f18b17cc8c7b245f322871d6c0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/__init__.py @@ -0,0 +1 @@ +from .modules import * # noqa: F403 diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0387d6a234c8a1cec0b4fc9ea5d3932e9f713e33 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__init__.py b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7c9fb032a2bb3d5e4452b48d0a870615c186f365 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__init__.py @@ -0,0 +1,9 @@ +from .activation import MultiheadAttention +from .rnn import LSTM +from .rnn import LSTMCell + +__all__ = [ + 'LSTM', + 'LSTMCell', + 'MultiheadAttention', +] diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07739303784055fb709f7dfa7d6cf7bad6c66567 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__pycache__/activation.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__pycache__/activation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe611b0512586283784d17089baf94811ef801b2 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__pycache__/activation.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__pycache__/rnn.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__pycache__/rnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..060119ea6a47972322dd4280062b0d3471e83d7f Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/__pycache__/rnn.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/activation.py b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/activation.py new file mode 100644 index 0000000000000000000000000000000000000000..8a45499fd80ffce1d56cb0e9d4b7b1f45d4ba4f2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/activation.py @@ -0,0 +1,473 @@ +# mypy: allow-untyped-defs +import torch +import torch.jit # this is needed to avoid a circular import +from torch import nn +import torch.nn.functional as nnF + +from torch import Tensor +from typing import Optional, Tuple + +import warnings + +__all__ = [ + "MultiheadAttention" +] + +class MultiheadAttention(nn.MultiheadAttention): + _FLOAT_MODULE = nn.MultiheadAttention + + r"""Quantizable implementation of the MultiheadAttention. + + Note:: + Please, refer to :class:`~torch.nn.MultiheadAttention` for more + information + + Allows the model to jointly attend to information from different + representation subspaces. + See reference: Attention Is All You Need + + The original MHA module is not quantizable. + This reimplements it by explicitly instantiating the linear layers. + + .. math:: + \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O + \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V) + + Args: + embed_dim: total dimension of the model. + num_heads: parallel attention heads. + dropout: a Dropout layer on attn_output_weights. Default: 0.0. + bias: add bias as module parameter. Default: True. + add_bias_kv: add bias to the key and value sequences at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + kdim: total number of features in key. Default: None. + vdim: total number of features in value. Default: None. + batch_first: If ``True``, then the input and output tensors are provided + as (batch, seq, feature). Default: ``False`` (seq, batch, feature). + + Note that if :attr:`kdim` and :attr:`vdim` are None, they will be set + to :attr:`embed_dim` such that query, key, and value have the same + number of features. + + Examples:: + + >>> import torch.ao.nn.quantizable as nnqa + >>> multihead_attn = nnqa.MultiheadAttention(embed_dim, num_heads) + >>> attn_output, attn_output_weights = multihead_attn(query, key, value) + + Note:: + Please, follow the quantization flow to convert the quantizable MHA. + """ + __constants__ = ['batch_first'] + + def __init__(self, embed_dim: int, num_heads: int, + dropout: float = 0., bias: bool = True, + add_bias_kv: bool = False, add_zero_attn: bool = False, + kdim: Optional[int] = None, vdim: Optional[int] = None, batch_first: bool = False, + device=None, dtype=None) -> None: + factory_kwargs = {'device': device, 'dtype': dtype} + super().__init__(embed_dim, num_heads, dropout, + bias, add_bias_kv, + add_zero_attn, kdim, vdim, batch_first, + **factory_kwargs) + self.linear_Q = nn.Linear(self.embed_dim, self.embed_dim, bias=bias, **factory_kwargs) + self.linear_K = nn.Linear(self.kdim, self.embed_dim, bias=bias, **factory_kwargs) + self.linear_V = nn.Linear(self.vdim, self.embed_dim, bias=bias, **factory_kwargs) + # for the type: ignore, see https://github.com/pytorch/pytorch/issues/58969 + self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=bias, **factory_kwargs) # type: ignore[assignment] + + # Functionals + self.q_scaling_product = torch.ao.nn.quantized.FloatFunctional() + # note: importing torch.ao.nn.quantized at top creates a circular import + + # Quant/Dequant + self.quant_attn_output = torch.ao.quantization.QuantStub() + self.quant_attn_output_weights = torch.ao.quantization.QuantStub() + self.dequant_q = torch.ao.quantization.DeQuantStub() + self.dequant_k = torch.ao.quantization.DeQuantStub() + self.dequant_v = torch.ao.quantization.DeQuantStub() + + def _get_name(self): + return 'QuantizableMultiheadAttention' + + @classmethod + def from_float(cls, other): + assert type(other) == cls._FLOAT_MODULE + assert hasattr(other, 'qconfig'), "The float module must have 'qconfig'" + # Setting the dropout to 0.0! + observed = cls(other.embed_dim, other.num_heads, other.dropout, + (other.in_proj_bias is not None), + (other.bias_k is not None), + other.add_zero_attn, other.kdim, other.vdim, + other.batch_first) + observed.bias_k = other.bias_k + observed.bias_v = other.bias_v + observed.qconfig = other.qconfig + + # Set the linear weights + # for the type: ignores, see https://github.com/pytorch/pytorch/issues/58969 + observed.out_proj.weight = other.out_proj.weight # type: ignore[has-type] + observed.out_proj.bias = other.out_proj.bias # type: ignore[has-type] + if other._qkv_same_embed_dim: + # Use separate params + bias = other.in_proj_bias + _start = 0 + _end = _start + other.embed_dim + weight = other.in_proj_weight[_start:_end, :] + if bias is not None: + bias = torch.nn.Parameter(bias[_start:_end], bias.requires_grad) + observed.linear_Q.weight = torch.nn.Parameter(weight, + weight.requires_grad) + observed.linear_Q.bias = bias + + bias = other.in_proj_bias + _start = _end + _end = _start + other.embed_dim + weight = other.in_proj_weight[_start:_end, :] + if bias is not None: + bias = torch.nn.Parameter(bias[_start:_end], bias.requires_grad) + observed.linear_K.weight = torch.nn.Parameter(weight, + weight.requires_grad) + observed.linear_K.bias = bias + + bias = other.in_proj_bias + _start = _end + weight = other.in_proj_weight[_start:, :] + if bias is not None: + bias = torch.nn.Parameter(bias[_start:], bias.requires_grad) + observed.linear_V.weight = torch.nn.Parameter(weight, + weight.requires_grad) + observed.linear_V.bias = bias + else: + observed.linear_Q.weight = nn.Parameter(other.q_proj_weight) + observed.linear_K.weight = nn.Parameter(other.k_proj_weight) + observed.linear_V.weight = nn.Parameter(other.v_proj_weight) + if other.in_proj_bias is None: + observed.linear_Q.bias = None # type: ignore[assignment] + observed.linear_K.bias = None # type: ignore[assignment] + observed.linear_V.bias = None # type: ignore[assignment] + else: + observed.linear_Q.bias = nn.Parameter(other.in_proj_bias[0:other.embed_dim]) + observed.linear_K.bias = nn.Parameter(other.in_proj_bias[other.embed_dim:(other.embed_dim * 2)]) + observed.linear_V.bias = nn.Parameter(other.in_proj_bias[(other.embed_dim * 2):]) + observed.eval() + # Explicit prepare + observed = torch.ao.quantization.prepare(observed, inplace=True) + return observed + + @torch.jit.unused + def dequantize(self): + r"""Utility to convert the quantized MHA back to float. + + The motivation for this is that it is not trivial to conver the weights + from the format that is used in the quantized version back to the + float. + """ + fp = self._FLOAT_MODULE(self.embed_dim, self.num_heads, self.dropout, + (self.linear_Q._weight_bias()[1] is not None), + (self.bias_k is not None), + self.add_zero_attn, self.kdim, self.vdim, self.batch_first) + assert fp._qkv_same_embed_dim == self._qkv_same_embed_dim + if self.bias_k is not None: + fp.bias_k = nn.Parameter(self.bias_k.dequantize()) + if self.bias_v is not None: + fp.bias_v = nn.Parameter(self.bias_v.dequantize()) + + # Set the linear weights + # Note: Because the linear layers are quantized, mypy does not nkow how + # to deal with them -- might need to ignore the typing checks. + # for the type: ignore[has-type], see https://github.com/pytorch/pytorch/issues/58969 + w, b = self.out_proj._weight_bias() # type: ignore[operator, has-type] + fp.out_proj.weight = nn.Parameter(w.dequantize()) + if b is not None: + fp.out_proj.bias = nn.Parameter(b) + + wQ, bQ = self.linear_Q._weight_bias() # type: ignore[operator] + wQ = wQ.dequantize() + wK, bK = self.linear_K._weight_bias() # type: ignore[operator] + wK = wK.dequantize() + wV, bV = self.linear_V._weight_bias() # type: ignore[operator] + wV = wV.dequantize() + if fp._qkv_same_embed_dim: + # Use separate params + _start = 0 + _end = _start + fp.embed_dim + fp.in_proj_weight[_start:_end, :] = wQ + if fp.in_proj_bias is not None: + assert all(bQ == 0) + fp.in_proj_bias[_start:_end] = bQ + + _start = _end + _end = _start + fp.embed_dim + fp.in_proj_weight[_start:_end, :] = wK + if fp.in_proj_bias is not None: + assert all(bK == 0) + fp.in_proj_bias[_start:_end] = bK + + _start = _end + fp.in_proj_weight[_start:, :] = wV + if fp.in_proj_bias is not None: + assert all(bV == 0) + fp.in_proj_bias[_start:] = bV + else: + fp.q_proj_weight = nn.Parameter(wQ) + fp.k_proj_weight = nn.Parameter(wK) + fp.v_proj_weight = nn.Parameter(wV) + if fp.in_proj_bias is None: + self.linear_Q.bias = None + self.linear_K.bias = None + self.linear_V.bias = None + else: + fp.in_proj_bias[0:fp.embed_dim] = bQ + fp.in_proj_bias[fp.embed_dim:(fp.embed_dim * 2)] = bK + fp.in_proj_bias[(fp.embed_dim * 2):] = bV + + return fp + + @classmethod + def from_observed(cls, other): + # The whole flow is float -> observed -> quantized + # This class does float -> observed only + # See nn.quantized.MultiheadAttention + raise NotImplementedError("It looks like you are trying to prepare an " + "MHA module. Please, see " + "the examples on quantizable MHAs.") + + def forward(self, + query: Tensor, + key: Tensor, + value: Tensor, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + average_attn_weights: bool = True, + is_causal: bool = False) -> Tuple[Tensor, Optional[Tensor]]: + r""" + Note:: + Please, refer to :func:`~torch.nn.MultiheadAttention.forward` for more + information + + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. When given a binary mask and a value is True, + the corresponding value on the attention layer will be ignored. + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + + Shape: + - Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. :math:`(N, L, E)` if ``batch_first`` is ``True``. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. :math:`(N, S, E)` if ``batch_first`` is ``True``. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. :math:`(N, S, E)` if ``batch_first`` is ``True``. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked + positions. If a BoolTensor is provided, positions with ``True`` + is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - is_causal: If specified, applies a causal mask as attention mask. Mutually exclusive with providing attn_mask. + Default: ``False``. + - average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across + heads. Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an + effect when ``need_weights=True.``. Default: True (i.e. average weights across heads) + + - Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. :math:`(N, L, E)` if ``batch_first`` is ``True``. + - attn_output_weights: If ``average_attn_weights=True``, returns attention weights averaged + across heads of shape :math:`(N, L, S)`, where N is the batch size, L is the target sequence length, + S is the source sequence length. If ``average_attn_weights=False``, returns attention weights per + head of shape :math:`(N, num_heads, L, S)`. + """ + return self._forward_impl(query, key, value, key_padding_mask, + need_weights, attn_mask, average_attn_weights, + is_causal) + + def _forward_impl(self, + query: Tensor, + key: Tensor, + value: Tensor, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + average_attn_weights: bool = True, + is_causal: bool = False) -> Tuple[Tensor, Optional[Tensor]]: + # This version will not deal with the static key/value pairs. + # Keeping it here for future changes. + # + # TODO: This method has some duplicate lines with the + # `torch.nn.functional.multi_head_attention`. Will need to refactor. + static_k = None + static_v = None + + if attn_mask is not None and is_causal: + raise AssertionError("Only allow causal mask or attn_mask") + + if is_causal: + raise AssertionError("causal mask not supported by AO MHA module") + + if self.batch_first: + query, key, value = (x.transpose(0, 1) for x in (query, key, value)) + + tgt_len, bsz, embed_dim_to_check = query.size() + assert self.embed_dim == embed_dim_to_check + # allow MHA to have different sizes for the feature dimension + assert key.size(0) == value.size(0) and key.size(1) == value.size(1) + + head_dim = self.embed_dim // self.num_heads + assert head_dim * self.num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + scaling = float(head_dim) ** -0.5 + + q = self.linear_Q(query) + k = self.linear_K(key) + v = self.linear_V(value) + + q = self.q_scaling_product.mul_scalar(q, scaling) + + if attn_mask is not None: + if attn_mask.dtype == torch.uint8: + warnings.warn( + "Byte tensor for `attn_mask` in `nn.MultiheadAttention` is deprecated. " + "Use bool tensor instead.", + stacklevel=3, + ) + attn_mask = attn_mask.to(torch.bool) + assert attn_mask.is_floating_point() or attn_mask.dtype == torch.bool, \ + f'Only float and bool types are supported for attn_mask, not {attn_mask.dtype}' + + if attn_mask.dim() == 2: + attn_mask = attn_mask.unsqueeze(0) + if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 2D attn_mask is not correct.') + elif attn_mask.dim() == 3: + if list(attn_mask.size()) != [bsz * self.num_heads, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 3D attn_mask is not correct.') + else: + raise RuntimeError(f"attn_mask's dimension {attn_mask.dim()} is not supported") + # attn_mask's dim is 3 now. + + # convert ByteTensor key_padding_mask to bool + if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8: + warnings.warn( + "Byte tensor for `key_padding_mask` in `nn.MultiheadAttention` is deprecated. " + "Use bool tensor instead.", + stacklevel=3, + ) + key_padding_mask = key_padding_mask.to(torch.bool) + if self.bias_k is not None and self.bias_v is not None: + if static_k is None and static_v is None: + + # Explicitly assert that bias_k and bias_v are not None + # in a way that TorchScript can understand. + bias_k = self.bias_k + assert bias_k is not None + bias_v = self.bias_v + assert bias_v is not None + + k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) + v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) + if attn_mask is not None: + attn_mask = nnF.pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = nnF.pad(key_padding_mask, (0, 1)) + else: + assert static_k is None, "bias cannot be added to static key." + assert static_v is None, "bias cannot be added to static value." + else: + assert self.bias_k is None + assert self.bias_v is None + + q = q.contiguous().view(tgt_len, bsz * self.num_heads, head_dim).transpose(0, 1) + if k is not None: + k = k.contiguous().view(-1, bsz * self.num_heads, head_dim).transpose(0, 1) + if v is not None: + v = v.contiguous().view(-1, bsz * self.num_heads, head_dim).transpose(0, 1) + + if static_k is not None: + assert static_k.size(0) == bsz * self.num_heads + assert static_k.size(2) == head_dim + k = static_k + + if static_v is not None: + assert static_v.size(0) == bsz * self.num_heads + assert static_v.size(2) == head_dim + v = static_v + + src_len = k.size(1) + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz + assert key_padding_mask.size(1) == src_len + + if self.add_zero_attn: + src_len += 1 + k_zeros = torch.zeros((k.size(0), 1) + k.size()[2:]) + if k.is_quantized: + k_zeros = torch.quantize_per_tensor(k_zeros, k.q_scale(), k.q_zero_point(), k.dtype) + k = torch.cat([k, k_zeros], dim=1) + v_zeros = torch.zeros((v.size(0), 1) + k.size()[2:]) + if v.is_quantized: + v_zeros = torch.quantize_per_tensor(v_zeros, v.q_scale(), v.q_zero_point(), v.dtype) + v = torch.cat([v, v_zeros], dim=1) + + if attn_mask is not None: + attn_mask = nnF.pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = nnF.pad(key_padding_mask, (0, 1)) + + # Leaving the quantized zone here + q = self.dequant_q(q) + k = self.dequant_k(k) + v = self.dequant_v(v) + attn_output_weights = torch.bmm(q, k.transpose(1, 2)) + assert list(attn_output_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] + + if attn_mask is not None: + if attn_mask.dtype == torch.bool: + attn_output_weights.masked_fill_(attn_mask, float('-inf')) + else: + attn_output_weights += attn_mask + + if key_padding_mask is not None: + attn_output_weights = attn_output_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_output_weights = attn_output_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + float('-inf'), + ) + attn_output_weights = attn_output_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_output_weights = nnF.softmax( + attn_output_weights, dim=-1) + attn_output_weights = nnF.dropout(attn_output_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_output_weights, v) + assert list(attn_output.size()) == [bsz * self.num_heads, tgt_len, head_dim] + if self.batch_first: + attn_output = attn_output.view(bsz, tgt_len, self.embed_dim) + else: + attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, self.embed_dim) + + # Reentering the quantized zone + attn_output = self.quant_attn_output(attn_output) + # for the type: ignore[has-type], see https://github.com/pytorch/pytorch/issues/58969 + attn_output = self.out_proj(attn_output) # type: ignore[has-type] + attn_output_weights = self.quant_attn_output_weights(attn_output_weights) + + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, self.num_heads, tgt_len, src_len) + if average_attn_weights: + attn_output_weights = attn_output_weights.mean(dim=1) + return attn_output, attn_output_weights + else: + return attn_output, None diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/rnn.py b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..a311587bd984261424926f8a2f48f28fd9e75b13 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantizable/modules/rnn.py @@ -0,0 +1,412 @@ +# mypy: allow-untyped-defs +import numbers +from typing import Optional, Tuple +import warnings + +import torch +from torch import Tensor + +""" +We will recreate all the RNN modules as we require the modules to be decomposed +into its building blocks to be able to observe. +""" + +__all__ = [ + "LSTMCell", + "LSTM" +] + +class LSTMCell(torch.nn.Module): + r"""A quantizable long short-term memory (LSTM) cell. + + For the description and the argument types, please, refer to :class:`~torch.nn.LSTMCell` + + Examples:: + + >>> import torch.ao.nn.quantizable as nnqa + >>> rnn = nnqa.LSTMCell(10, 20) + >>> input = torch.randn(6, 10) + >>> hx = torch.randn(3, 20) + >>> cx = torch.randn(3, 20) + >>> output = [] + >>> for i in range(6): + ... hx, cx = rnn(input[i], (hx, cx)) + ... output.append(hx) + """ + _FLOAT_MODULE = torch.nn.LSTMCell + + def __init__(self, input_dim: int, hidden_dim: int, bias: bool = True, + device=None, dtype=None) -> None: + factory_kwargs = {'device': device, 'dtype': dtype} + super().__init__() + self.input_size = input_dim + self.hidden_size = hidden_dim + self.bias = bias + + self.igates = torch.nn.Linear(input_dim, 4 * hidden_dim, bias=bias, **factory_kwargs) + self.hgates = torch.nn.Linear(hidden_dim, 4 * hidden_dim, bias=bias, **factory_kwargs) + self.gates = torch.ao.nn.quantized.FloatFunctional() + + self.input_gate = torch.nn.Sigmoid() + self.forget_gate = torch.nn.Sigmoid() + self.cell_gate = torch.nn.Tanh() + self.output_gate = torch.nn.Sigmoid() + + self.fgate_cx = torch.ao.nn.quantized.FloatFunctional() + self.igate_cgate = torch.ao.nn.quantized.FloatFunctional() + self.fgate_cx_igate_cgate = torch.ao.nn.quantized.FloatFunctional() + + self.ogate_cy = torch.ao.nn.quantized.FloatFunctional() + + self.initial_hidden_state_qparams: Tuple[float, int] = (1.0, 0) + self.initial_cell_state_qparams: Tuple[float, int] = (1.0, 0) + self.hidden_state_dtype: torch.dtype = torch.quint8 + self.cell_state_dtype: torch.dtype = torch.quint8 + + def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]: + if hidden is None or hidden[0] is None or hidden[1] is None: + hidden = self.initialize_hidden(x.shape[0], x.is_quantized) + hx, cx = hidden + + igates = self.igates(x) + hgates = self.hgates(hx) + gates = self.gates.add(igates, hgates) + + input_gate, forget_gate, cell_gate, out_gate = gates.chunk(4, 1) + + input_gate = self.input_gate(input_gate) + forget_gate = self.forget_gate(forget_gate) + cell_gate = self.cell_gate(cell_gate) + out_gate = self.output_gate(out_gate) + + fgate_cx = self.fgate_cx.mul(forget_gate, cx) + igate_cgate = self.igate_cgate.mul(input_gate, cell_gate) + fgate_cx_igate_cgate = self.fgate_cx_igate_cgate.add(fgate_cx, igate_cgate) + cy = fgate_cx_igate_cgate + + # TODO: make this tanh a member of the module so its qparams can be configured + tanh_cy = torch.tanh(cy) + hy = self.ogate_cy.mul(out_gate, tanh_cy) + return hy, cy + + def initialize_hidden(self, batch_size: int, is_quantized: bool = False) -> Tuple[Tensor, Tensor]: + h, c = torch.zeros((batch_size, self.hidden_size)), torch.zeros((batch_size, self.hidden_size)) + if is_quantized: + (h_scale, h_zp) = self.initial_hidden_state_qparams + (c_scale, c_zp) = self.initial_cell_state_qparams + h = torch.quantize_per_tensor(h, scale=h_scale, zero_point=h_zp, dtype=self.hidden_state_dtype) + c = torch.quantize_per_tensor(c, scale=c_scale, zero_point=c_zp, dtype=self.cell_state_dtype) + return h, c + + def _get_name(self): + return 'QuantizableLSTMCell' + + @classmethod + def from_params(cls, wi, wh, bi=None, bh=None): + """Uses the weights and biases to create a new LSTM cell. + + Args: + wi, wh: Weights for the input and hidden layers + bi, bh: Biases for the input and hidden layers + """ + assert (bi is None) == (bh is None) # Either both None or both have values + input_size = wi.shape[1] + hidden_size = wh.shape[1] + cell = cls(input_dim=input_size, hidden_dim=hidden_size, + bias=(bi is not None)) + cell.igates.weight = torch.nn.Parameter(wi) + if bi is not None: + cell.igates.bias = torch.nn.Parameter(bi) + cell.hgates.weight = torch.nn.Parameter(wh) + if bh is not None: + cell.hgates.bias = torch.nn.Parameter(bh) + return cell + + @classmethod + def from_float(cls, other, use_precomputed_fake_quant=False): + assert type(other) == cls._FLOAT_MODULE + assert hasattr(other, 'qconfig'), "The float module must have 'qconfig'" + observed = cls.from_params(other.weight_ih, other.weight_hh, + other.bias_ih, other.bias_hh) + observed.qconfig = other.qconfig + observed.igates.qconfig = other.qconfig + observed.hgates.qconfig = other.qconfig + return observed + + +class _LSTMSingleLayer(torch.nn.Module): + r"""A single one-directional LSTM layer. + + The difference between a layer and a cell is that the layer can process a + sequence, while the cell only expects an instantaneous value. + """ + def __init__(self, input_dim: int, hidden_dim: int, bias: bool = True, + device=None, dtype=None) -> None: + factory_kwargs = {'device': device, 'dtype': dtype} + super().__init__() + self.cell = LSTMCell(input_dim, hidden_dim, bias=bias, **factory_kwargs) + + def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None): + result = [] + seq_len = x.shape[0] + for i in range(seq_len): + hidden = self.cell(x[i], hidden) + result.append(hidden[0]) # type: ignore[index] + result_tensor = torch.stack(result, 0) + return result_tensor, hidden + + @classmethod + def from_params(cls, *args, **kwargs): + cell = LSTMCell.from_params(*args, **kwargs) + layer = cls(cell.input_size, cell.hidden_size, cell.bias) + layer.cell = cell + return layer + + +class _LSTMLayer(torch.nn.Module): + r"""A single bi-directional LSTM layer.""" + def __init__(self, input_dim: int, hidden_dim: int, bias: bool = True, + batch_first: bool = False, bidirectional: bool = False, + device=None, dtype=None) -> None: + factory_kwargs = {'device': device, 'dtype': dtype} + super().__init__() + self.batch_first = batch_first + self.bidirectional = bidirectional + self.layer_fw = _LSTMSingleLayer(input_dim, hidden_dim, bias=bias, **factory_kwargs) + if self.bidirectional: + self.layer_bw = _LSTMSingleLayer(input_dim, hidden_dim, bias=bias, **factory_kwargs) + + def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None): + if self.batch_first: + x = x.transpose(0, 1) + if hidden is None: + hx_fw, cx_fw = (None, None) + else: + hx_fw, cx_fw = hidden + hidden_bw: Optional[Tuple[Tensor, Tensor]] = None + if self.bidirectional: + if hx_fw is None: + hx_bw = None + else: + hx_bw = hx_fw[1] + hx_fw = hx_fw[0] + if cx_fw is None: + cx_bw = None + else: + cx_bw = cx_fw[1] + cx_fw = cx_fw[0] + if hx_bw is not None and cx_bw is not None: + hidden_bw = hx_bw, cx_bw + if hx_fw is None and cx_fw is None: + hidden_fw = None + else: + hidden_fw = torch.jit._unwrap_optional(hx_fw), torch.jit._unwrap_optional(cx_fw) + result_fw, hidden_fw = self.layer_fw(x, hidden_fw) + + if hasattr(self, 'layer_bw') and self.bidirectional: + x_reversed = x.flip(0) + result_bw, hidden_bw = self.layer_bw(x_reversed, hidden_bw) + result_bw = result_bw.flip(0) + + result = torch.cat([result_fw, result_bw], result_fw.dim() - 1) + if hidden_fw is None and hidden_bw is None: + h = None + c = None + elif hidden_fw is None: + (h, c) = torch.jit._unwrap_optional(hidden_bw) + elif hidden_bw is None: + (h, c) = torch.jit._unwrap_optional(hidden_fw) + else: + h = torch.stack([hidden_fw[0], hidden_bw[0]], 0) # type: ignore[list-item] + c = torch.stack([hidden_fw[1], hidden_bw[1]], 0) # type: ignore[list-item] + else: + result = result_fw + h, c = torch.jit._unwrap_optional(hidden_fw) # type: ignore[assignment] + + if self.batch_first: + result.transpose_(0, 1) + + return result, (h, c) + + @classmethod + def from_float(cls, other, layer_idx=0, qconfig=None, **kwargs): + r""" + There is no FP equivalent of this class. This function is here just to + mimic the behavior of the `prepare` within the `torch.ao.quantization` + flow. + """ + assert hasattr(other, 'qconfig') or (qconfig is not None) + + input_size = kwargs.get('input_size', other.input_size) + hidden_size = kwargs.get('hidden_size', other.hidden_size) + bias = kwargs.get('bias', other.bias) + batch_first = kwargs.get('batch_first', other.batch_first) + bidirectional = kwargs.get('bidirectional', other.bidirectional) + + layer = cls(input_size, hidden_size, bias, batch_first, bidirectional) + layer.qconfig = getattr(other, 'qconfig', qconfig) + wi = getattr(other, f'weight_ih_l{layer_idx}') + wh = getattr(other, f'weight_hh_l{layer_idx}') + bi = getattr(other, f'bias_ih_l{layer_idx}', None) + bh = getattr(other, f'bias_hh_l{layer_idx}', None) + + layer.layer_fw = _LSTMSingleLayer.from_params(wi, wh, bi, bh) + + if other.bidirectional: + wi = getattr(other, f'weight_ih_l{layer_idx}_reverse') + wh = getattr(other, f'weight_hh_l{layer_idx}_reverse') + bi = getattr(other, f'bias_ih_l{layer_idx}_reverse', None) + bh = getattr(other, f'bias_hh_l{layer_idx}_reverse', None) + layer.layer_bw = _LSTMSingleLayer.from_params(wi, wh, bi, bh) + return layer + + +class LSTM(torch.nn.Module): + r"""A quantizable long short-term memory (LSTM). + + For the description and the argument types, please, refer to :class:`~torch.nn.LSTM` + + Attributes: + layers : instances of the `_LSTMLayer` + + .. note:: + To access the weights and biases, you need to access them per layer. + See examples below. + + Examples:: + + >>> import torch.ao.nn.quantizable as nnqa + >>> rnn = nnqa.LSTM(10, 20, 2) + >>> input = torch.randn(5, 3, 10) + >>> h0 = torch.randn(2, 3, 20) + >>> c0 = torch.randn(2, 3, 20) + >>> output, (hn, cn) = rnn(input, (h0, c0)) + >>> # To get the weights: + >>> # xdoctest: +SKIP + >>> print(rnn.layers[0].weight_ih) + tensor([[...]]) + >>> print(rnn.layers[0].weight_hh) + AssertionError: There is no reverse path in the non-bidirectional layer + """ + _FLOAT_MODULE = torch.nn.LSTM + + def __init__(self, input_size: int, hidden_size: int, + num_layers: int = 1, bias: bool = True, + batch_first: bool = False, dropout: float = 0., + bidirectional: bool = False, + device=None, dtype=None) -> None: + factory_kwargs = {'device': device, 'dtype': dtype} + super().__init__() + self.input_size = input_size + self.hidden_size = hidden_size + self.num_layers = num_layers + self.bias = bias + self.batch_first = batch_first + self.dropout = float(dropout) + self.bidirectional = bidirectional + self.training = False # Default to eval mode. If we want to train, we will explicitly set to training. + num_directions = 2 if bidirectional else 1 + + if not isinstance(dropout, numbers.Number) or not 0 <= dropout <= 1 or \ + isinstance(dropout, bool): + raise ValueError("dropout should be a number in range [0, 1] " + "representing the probability of an element being " + "zeroed") + if dropout > 0: + warnings.warn("dropout option for quantizable LSTM is ignored. " + "If you are training, please, use nn.LSTM version " + "followed by `prepare` step.") + if num_layers == 1: + warnings.warn("dropout option adds dropout after all but last " + "recurrent layer, so non-zero dropout expects " + f"num_layers greater than 1, but got dropout={dropout} " + f"and num_layers={num_layers}") + + layers = [_LSTMLayer(self.input_size, self.hidden_size, + self.bias, batch_first=False, + bidirectional=self.bidirectional, **factory_kwargs)] + for layer in range(1, num_layers): + layers.append(_LSTMLayer(self.hidden_size, self.hidden_size, + self.bias, batch_first=False, + bidirectional=self.bidirectional, + **factory_kwargs)) + self.layers = torch.nn.ModuleList(layers) + + def forward(self, x: Tensor, hidden: Optional[Tuple[Tensor, Tensor]] = None): + if self.batch_first: + x = x.transpose(0, 1) + + max_batch_size = x.size(1) + num_directions = 2 if self.bidirectional else 1 + if hidden is None: + zeros = torch.zeros(num_directions, max_batch_size, + self.hidden_size, dtype=torch.float, + device=x.device) + zeros.squeeze_(0) + if x.is_quantized: + zeros = torch.quantize_per_tensor(zeros, scale=1.0, + zero_point=0, dtype=x.dtype) + hxcx = [(zeros, zeros) for _ in range(self.num_layers)] + else: + hidden_non_opt = torch.jit._unwrap_optional(hidden) + if isinstance(hidden_non_opt[0], Tensor): + hx = hidden_non_opt[0].reshape(self.num_layers, num_directions, + max_batch_size, + self.hidden_size) + cx = hidden_non_opt[1].reshape(self.num_layers, num_directions, + max_batch_size, + self.hidden_size) + hxcx = [(hx[idx].squeeze(0), cx[idx].squeeze(0)) for idx in range(self.num_layers)] + else: + hxcx = hidden_non_opt + + hx_list = [] + cx_list = [] + for idx, layer in enumerate(self.layers): + x, (h, c) = layer(x, hxcx[idx]) + hx_list.append(torch.jit._unwrap_optional(h)) + cx_list.append(torch.jit._unwrap_optional(c)) + hx_tensor = torch.stack(hx_list) + cx_tensor = torch.stack(cx_list) + + # We are creating another dimension for bidirectional case + # need to collapse it + hx_tensor = hx_tensor.reshape(-1, hx_tensor.shape[-2], hx_tensor.shape[-1]) + cx_tensor = cx_tensor.reshape(-1, cx_tensor.shape[-2], cx_tensor.shape[-1]) + + if self.batch_first: + x = x.transpose(0, 1) + + return x, (hx_tensor, cx_tensor) + + def _get_name(self): + return 'QuantizableLSTM' + + @classmethod + def from_float(cls, other, qconfig=None): + assert isinstance(other, cls._FLOAT_MODULE) + assert (hasattr(other, 'qconfig') or qconfig) + observed = cls(other.input_size, other.hidden_size, other.num_layers, + other.bias, other.batch_first, other.dropout, + other.bidirectional) + observed.qconfig = getattr(other, 'qconfig', qconfig) + for idx in range(other.num_layers): + observed.layers[idx] = _LSTMLayer.from_float(other, idx, qconfig, + batch_first=False) + + # Prepare the model + if other.training: + observed.train() + observed = torch.ao.quantization.prepare_qat(observed, inplace=True) + else: + observed.eval() + observed = torch.ao.quantization.prepare(observed, inplace=True) + return observed + + @classmethod + def from_observed(cls, other): + # The whole flow is float -> observed -> quantized + # This class does float -> observed only + raise NotImplementedError("It looks like you are trying to convert a " + "non-quantizable LSTM module. Please, see " + "the examples on quantizable LSTMs.") diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c6201ea01b849238078867f04ced1b3378c84f9 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/__init__.py b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3d79bdbfe83209f18b17cc8c7b245f322871d6c0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/__init__.py @@ -0,0 +1 @@ +from .modules import * # noqa: F403 diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..035b0d7d4fe29a4c1fd47c347dafb042c39883c2 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__init__.py b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a7a97e0a8da831d63d25d6f02edf77cb85b280a7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__init__.py @@ -0,0 +1,19 @@ + +from .linear import Linear +from .rnn import LSTM, GRU, LSTMCell, RNNCell, GRUCell +from .conv import Conv1d, Conv2d, Conv3d, ConvTranspose1d, ConvTranspose2d, ConvTranspose3d + +__all__ = [ + 'Linear', + 'LSTM', + 'GRU', + 'LSTMCell', + 'RNNCell', + 'GRUCell', + 'Conv1d', + 'Conv2d', + 'Conv3d', + 'ConvTranspose1d', + 'ConvTranspose2d', + 'ConvTranspose3d', +] diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f4cb4b49efb8fc3ed94afae85531a15e3de5288 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/conv.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/conv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c87ec972316576e2ef979071e75d10397e9b7082 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/conv.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/rnn.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/rnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c912360028f5c3335da1913e19023018685ad228 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/__pycache__/rnn.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/rnn.py b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..9afab93d1a55f6efde85fc2b9c1dfcb4a9ceab13 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/dynamic/modules/rnn.py @@ -0,0 +1,1101 @@ +# mypy: allow-untyped-defs +import numbers +import warnings +from typing_extensions import deprecated + +import torch +import torch.nn as nn +from torch import Tensor # noqa: F401 +from torch._jit_internal import Tuple, Optional, List, Union, Dict # noqa: F401 +from torch.nn.utils.rnn import PackedSequence +from torch.ao.nn.quantized.modules.utils import _quantize_weight + +__all__ = ['pack_weight_bias', 'PackedParameter', 'RNNBase', 'LSTM', 'GRU', 'RNNCellBase', 'RNNCell', 'LSTMCell', + 'GRUCell', "apply_permutation"] + + +def _apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor: + return tensor.index_select(dim, permutation) + + +@deprecated( + "`apply_permutation` is deprecated, please use `tensor.index_select(dim, permutation)` instead", + category=FutureWarning, +) +def apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor: + return _apply_permutation(tensor, permutation, dim) + + +def pack_weight_bias(qweight, bias, dtype): + + if dtype == torch.qint8: + # for each layer, for each direction we need to quantize and pack + # weights and pack parameters in this order: + # + # w_ih, w_hh + packed_weight = \ + torch.ops.quantized.linear_prepack(qweight, bias) + + return packed_weight + else: + # for each layer, for each direction we need to quantize and pack + # weights and pack parameters in this order: + # + # packed_ih, packed_hh, b_ih, b_hh + packed_weight = torch.ops.quantized.linear_prepack_fp16( + qweight, bias) + + return packed_weight + + +class PackedParameter(torch.nn.Module): + def __init__(self, param): + super().__init__() + self.param = param + + def _save_to_state_dict(self, destination, prefix, keep_vars): + super()._save_to_state_dict(destination, prefix, keep_vars) + destination[prefix + 'param'] = self.param + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + self.param = state_dict[prefix + 'param'] + super()._load_from_state_dict(state_dict, prefix, local_metadata, False, + missing_keys, unexpected_keys, error_msgs) + + +class RNNBase(torch.nn.Module): + + _FLOAT_MODULE = nn.RNNBase + + _version = 2 + + def __init__(self, mode, input_size, hidden_size, + num_layers=1, bias=True, batch_first=False, + dropout=0., bidirectional=False, dtype=torch.qint8): + super().__init__() + + self.mode = mode + self.input_size = input_size + self.hidden_size = hidden_size + self.num_layers = num_layers + self.bias = bias + self.batch_first = batch_first + self.dropout = float(dropout) + self.bidirectional = bidirectional + self.dtype = dtype + self.version = 2 + self.training = False + num_directions = 2 if bidirectional else 1 + + # "type: ignore" is required since ints and Numbers are not fully comparable + # https://github.com/python/mypy/issues/8566 + if not isinstance(dropout, numbers.Number) \ + or not 0 <= dropout <= 1 or isinstance(dropout, bool): # type: ignore[operator] + raise ValueError("dropout should be a number in range [0, 1] " + "representing the probability of an element being " + "zeroed") + if dropout > 0 and num_layers == 1: # type: ignore[operator] + warnings.warn("dropout option adds dropout after all but last " + "recurrent layer, so non-zero dropout expects " + f"num_layers greater than 1, but got dropout={dropout} and " + f"num_layers={num_layers}") + + if mode == 'LSTM': + gate_size = 4 * hidden_size + elif mode == 'GRU': + gate_size = 3 * hidden_size + else: + raise ValueError("Unrecognized RNN mode: " + mode) + + _all_weight_values = [] + for layer in range(num_layers): + for direction in range(num_directions): + layer_input_size = input_size if layer == 0 else hidden_size * num_directions + + w_ih = torch.randn(gate_size, layer_input_size).to(torch.float) + w_hh = torch.randn(gate_size, hidden_size).to(torch.float) + b_ih = torch.randn(gate_size).to(torch.float) + b_hh = torch.randn(gate_size).to(torch.float) + if dtype == torch.qint8: + w_ih = torch.quantize_per_tensor(w_ih, scale=0.1, zero_point=0, dtype=torch.qint8) + w_hh = torch.quantize_per_tensor(w_hh, scale=0.1, zero_point=0, dtype=torch.qint8) + packed_ih = \ + torch.ops.quantized.linear_prepack(w_ih, b_ih) + packed_hh = \ + torch.ops.quantized.linear_prepack(w_hh, b_hh) + if self.version is None or self.version < 2: + cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic( + packed_ih, packed_hh, b_ih, b_hh) + else: + cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic( + packed_ih, packed_hh, b_ih, b_hh, True) + else: + packed_ih = torch.ops.quantized.linear_prepack_fp16(w_ih, b_ih) + packed_hh = torch.ops.quantized.linear_prepack_fp16(w_hh, b_hh) + cell_params = torch.ops.quantized.make_quantized_cell_params_fp16( + packed_ih, packed_hh) + + _all_weight_values.append(PackedParameter(cell_params)) + self._all_weight_values = torch.nn.ModuleList(_all_weight_values) + + def _get_name(self): + return 'DynamicQuantizedRNN' + + def extra_repr(self): + s = '{input_size}, {hidden_size}' + if self.num_layers != 1: + s += ', num_layers={num_layers}' + if self.bias is not True: + s += ', bias={bias}' + if self.batch_first is not False: + s += ', batch_first={batch_first}' + if self.dropout != 0: + s += ', dropout={dropout}' + if self.bidirectional is not False: + s += ', bidirectional={bidirectional}' + return s.format(**self.__dict__) + + def __repr__(self): + # We don't want to show `ModuleList` children, hence custom + # `__repr__`. This is the same as nn.Module.__repr__, except the check + # for the `PackedParameter` and `nn.ModuleList`. + # You should still override `extra_repr` to add more info. + extra_lines = [] + extra_repr = self.extra_repr() + # empty string will be split into list [''] + if extra_repr: + extra_lines = extra_repr.split('\n') + child_lines = [] + for key, module in self._modules.items(): + if isinstance(module, (PackedParameter, nn.ModuleList)): + continue + mod_str = repr(module) + mod_str = nn.modules.module._addindent(mod_str, 2) + child_lines.append('(' + key + '): ' + mod_str) + lines = extra_lines + child_lines + + main_str = self._get_name() + '(' + if lines: + # simple one-liner info, which most builtin Modules will use + if len(extra_lines) == 1 and not child_lines: + main_str += extra_lines[0] + else: + main_str += '\n ' + '\n '.join(lines) + '\n' + + main_str += ')' + return main_str + + def check_input(self, input: Tensor, batch_sizes: Optional[Tensor]) -> None: + expected_input_dim = 2 if batch_sizes is not None else 3 + if input.dim() != expected_input_dim: + raise RuntimeError( + f'input must have {expected_input_dim} dimensions, got {input.dim()}') + if self.input_size != input.size(-1): + raise RuntimeError( + f'input.size(-1) must be equal to input_size. Expected {self.input_size}, got {input.size(-1)}') + + def get_expected_hidden_size(self, input: Tensor, batch_sizes: Optional[Tensor]) -> Tuple[int, int, int]: + if batch_sizes is not None: + mini_batch = int(batch_sizes[0]) + else: + mini_batch = input.size(0) if self.batch_first else input.size(1) + num_directions = 2 if self.bidirectional else 1 + expected_hidden_size = (self.num_layers * num_directions, + mini_batch, self.hidden_size) + return expected_hidden_size + + def check_hidden_size( + self, hx: Tensor, expected_hidden_size: Tuple[int, int, int], + msg: str = 'Expected hidden size {}, got {}' + ) -> None: + if hx.size() != expected_hidden_size: + raise RuntimeError(msg.format( + expected_hidden_size, list(hx.size()))) + + def check_forward_args(self, input: Tensor, hidden: Tensor, batch_sizes: Optional[Tensor]) -> None: + self.check_input(input, batch_sizes) + expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes) + self.check_hidden_size(hidden, expected_hidden_size, + msg='Expected hidden size {}, got {}') + + def permute_hidden(self, hx: Tensor, permutation: Optional[Tensor]) -> Tensor: + if permutation is None: + return hx + return _apply_permutation(hx, permutation) + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + version = local_metadata.get('version', None) + self.version = version + super()._load_from_state_dict(state_dict, prefix, local_metadata, False, + missing_keys, unexpected_keys, error_msgs) + + def set_weight_bias(self, weight_bias_dict): + + def weight_bias_name(ihhh, layer, suffix): + weight_name = f"weight_{ihhh}_l{layer}{suffix}" + bias_name = f"bias_{ihhh}_l{layer}{suffix}" + return weight_name, bias_name + + num_directions = 2 if self.bidirectional else 1 + # TODO: dedup with __init__ of RNNBase + _all_weight_values = [] + for layer in range(self.num_layers): + for direction in range(num_directions): + suffix = "_reverse" if direction == 1 else "" + w_ih_name, b_ih_name = weight_bias_name("ih", layer, suffix) + w_hh_name, b_hh_name = weight_bias_name("hh", layer, suffix) + w_ih = weight_bias_dict[w_ih_name] + b_ih = weight_bias_dict[b_ih_name] + w_hh = weight_bias_dict[w_hh_name] + b_hh = weight_bias_dict[b_hh_name] + if w_ih.dtype == torch.qint8: + packed_ih = torch.ops.quantized.linear_prepack(w_ih, b_ih) + packed_hh = torch.ops.quantized.linear_prepack(w_hh, b_hh) + if self.version is None or self.version < 2: + cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic( + packed_ih, packed_hh, b_ih, b_hh) + else: + cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic( + packed_ih, packed_hh, b_ih, b_hh, True) + else: + packed_ih = torch.ops.quantized.linear_prepack_fp16(w_ih, b_ih) + packed_hh = torch.ops.quantized.linear_prepack_fp16(w_hh, b_hh) + cell_params = torch.ops.quantized.make_quantized_cell_params_fp16( + packed_ih, packed_hh) + + _all_weight_values.append(PackedParameter(cell_params)) + self._all_weight_values = torch.nn.ModuleList(_all_weight_values) + + @classmethod + def from_float(cls, mod, use_precomputed_fake_quant=False): + assert type(mod) in {torch.nn.LSTM, + torch.nn.GRU}, 'nn.quantized.dynamic.RNNBase.from_float only works for nn.LSTM and nn.GRU' + assert hasattr( + mod, + 'qconfig' + ), 'Input float module must have qconfig defined' + + if mod.qconfig is not None and mod.qconfig.weight is not None: + weight_observer_method = mod.qconfig.weight + else: + # We have the circular import issues if we import the qconfig in the beginning of this file: + # https://github.com/pytorch/pytorch/pull/24231. The current workaround is to postpone the + # import until we need it. + from torch.ao.quantization.qconfig import default_dynamic_qconfig + weight_observer_method = default_dynamic_qconfig.weight + + dtype = weight_observer_method().dtype + supported_scalar_types = [torch.qint8, torch.float16] + if dtype not in supported_scalar_types: + raise RuntimeError(f'Unsupported dtype for dynamic RNN quantization: {dtype}') + # RNNBase can be either LSTM or GRU + qRNNBase: Union[LSTM, GRU] + if mod.mode == 'LSTM': + qRNNBase = LSTM(mod.input_size, mod.hidden_size, mod.num_layers, + mod.bias, mod.batch_first, mod.dropout, mod.bidirectional, dtype) + elif mod.mode == 'GRU': + qRNNBase = GRU(mod.input_size, mod.hidden_size, mod.num_layers, + mod.bias, mod.batch_first, mod.dropout, mod.bidirectional, dtype) + else: + raise NotImplementedError('Only LSTM/GRU is supported for QuantizedRNN for now') + + num_directions = 2 if mod.bidirectional else 1 + + assert mod.bias + + _all_weight_values = [] + for layer in range(qRNNBase.num_layers): + for direction in range(num_directions): + suffix = '_reverse' if direction == 1 else '' + + def retrieve_weight_bias(ihhh): + weight_name = f'weight_{ihhh}_l{layer}{suffix}' + bias_name = f'bias_{ihhh}_l{layer}{suffix}' + weight = getattr(mod, weight_name) + bias = getattr(mod, bias_name) + return weight, bias + + weight_ih, bias_ih = retrieve_weight_bias('ih') + weight_hh, bias_hh = retrieve_weight_bias('hh') + + if dtype == torch.qint8: + def quantize_and_pack(w, b): + weight_observer = weight_observer_method() + weight_observer(w) + qweight = _quantize_weight(w.float(), weight_observer) + packed_weight = \ + torch.ops.quantized.linear_prepack(qweight, b) + return packed_weight + packed_ih = quantize_and_pack(weight_ih, bias_ih) + packed_hh = quantize_and_pack(weight_hh, bias_hh) + if qRNNBase.version is None or qRNNBase.version < 2: + cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic( + packed_ih, packed_hh, bias_ih, bias_hh) + else: + cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic( + packed_ih, packed_hh, bias_ih, bias_hh, True) + + elif dtype == torch.float16: + packed_ih = torch.ops.quantized.linear_prepack_fp16( + weight_ih.float(), bias_ih) + packed_hh = torch.ops.quantized.linear_prepack_fp16( + weight_hh.float(), bias_hh) + + cell_params = torch.ops.quantized.make_quantized_cell_params_fp16( + packed_ih, packed_hh) + else: + raise RuntimeError('Unsupported dtype specified for dynamic quantized LSTM!') + + _all_weight_values.append(PackedParameter(cell_params)) + qRNNBase._all_weight_values = torch.nn.ModuleList(_all_weight_values) + + return qRNNBase + + def _weight_bias(self): + # Returns a dict of weights and biases + weight_bias_dict: Dict[str, Dict] = {'weight' : {}, 'bias' : {}} + count = 0 + num_directions = 2 if self.bidirectional else 1 + for layer in range(self.num_layers): + for direction in range(num_directions): + suffix = '_reverse' if direction == 1 else '' + key_name1 = f'weight_ih_l{layer}{suffix}' + key_name2 = f'weight_hh_l{layer}{suffix}' + # packed weights are part of torchbind class, CellParamsSerializationType + # Within the packed weight class, the weight and bias are accessible as Tensors + packed_weight_bias = self._all_weight_values[count].param.__getstate__()[0][4] + weight_bias_dict['weight'][key_name1] = packed_weight_bias[0].__getstate__()[0][0] + weight_bias_dict['weight'][key_name2] = packed_weight_bias[1].__getstate__()[0][0] + key_name1 = f'bias_ih_l{layer}{suffix}' + key_name2 = f'bias_hh_l{layer}{suffix}' + weight_bias_dict['bias'][key_name1] = packed_weight_bias[0].__getstate__()[0][1] + weight_bias_dict['bias'][key_name2] = packed_weight_bias[1].__getstate__()[0][1] + count = count + 1 + return weight_bias_dict + + def get_weight(self): + return self._weight_bias()['weight'] + + def get_bias(self): + return self._weight_bias()['bias'] + + +class LSTM(RNNBase): + r""" + A dynamic quantized LSTM module with floating point tensor as inputs and outputs. + We adopt the same interface as `torch.nn.LSTM`, please see + https://pytorch.org/docs/stable/nn.html#torch.nn.LSTM for documentation. + + Examples:: + + >>> # xdoctest: +SKIP + >>> rnn = nn.LSTM(10, 20, 2) + >>> input = torch.randn(5, 3, 10) + >>> h0 = torch.randn(2, 3, 20) + >>> c0 = torch.randn(2, 3, 20) + >>> output, (hn, cn) = rnn(input, (h0, c0)) + """ + _FLOAT_MODULE = nn.LSTM + + __overloads__ = {'forward': ['forward_packed', 'forward_tensor']} + + def __init__(self, *args, **kwargs): + super().__init__('LSTM', *args, **kwargs) + + def _get_name(self): + return 'DynamicQuantizedLSTM' + + def forward_impl( + self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]], + batch_sizes: Optional[Tensor], max_batch_size: int, + sorted_indices: Optional[Tensor] + ) -> Tuple[Tensor, Tuple[Tensor, Tensor]]: + if hx is None: + num_directions = 2 if self.bidirectional else 1 + zeros = torch.zeros(self.num_layers * num_directions, + max_batch_size, self.hidden_size, + dtype=input.dtype, device=input.device) + hx = (zeros, zeros) + else: + # Each batch of the hidden state should match the input sequence that + # the user believes he/she is passing in. + hx = self.permute_hidden(hx, sorted_indices) + + self.check_forward_args(input, hx, batch_sizes) + + _all_params = ([m.param for m in self._all_weight_values]) + if batch_sizes is None: + result = torch.quantized_lstm(input, hx, _all_params, self.bias, self.num_layers, + float(self.dropout), self.training, self.bidirectional, + self.batch_first, dtype=self.dtype, use_dynamic=True) + else: + result = torch.quantized_lstm(input, batch_sizes, hx, _all_params, self.bias, + self.num_layers, float(self.dropout), self.training, + self.bidirectional, dtype=self.dtype, use_dynamic=True) + output = result[0] + hidden = result[1:] + + return output, hidden + + @torch.jit.export + def forward_tensor( + self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None + ) -> Tuple[Tensor, Tuple[Tensor, Tensor]]: + batch_sizes = None + max_batch_size = input.size(0) if self.batch_first else input.size(1) + sorted_indices = None + unsorted_indices = None + + output, hidden = self.forward_impl( + input, hx, batch_sizes, max_batch_size, sorted_indices) + + return output, self.permute_hidden(hidden, unsorted_indices) + + @torch.jit.export + def forward_packed( + self, input: PackedSequence, hx: Optional[Tuple[Tensor, Tensor]] = None + ) -> Tuple[PackedSequence, Tuple[Tensor, Tensor]]: + input_, batch_sizes, sorted_indices, unsorted_indices = input + max_batch_size = int(batch_sizes[0]) + + output_, hidden = self.forward_impl( + input_, hx, batch_sizes, max_batch_size, sorted_indices + ) + + output = PackedSequence(output_, batch_sizes, + sorted_indices, unsorted_indices) + return output, self.permute_hidden(hidden, unsorted_indices) + + # "type: ignore" is required due to issue #43072 + def permute_hidden( # type: ignore[override] + self, hx: Tuple[Tensor, Tensor], permutation: Optional[Tensor] + ) -> Tuple[Tensor, Tensor]: + if permutation is None: + return hx + return _apply_permutation(hx[0], permutation), _apply_permutation(hx[1], permutation) + + # "type: ignore" is required due to issue #43072 + def check_forward_args( # type: ignore[override] + self, input: Tensor, hidden: Tuple[Tensor, Tensor], batch_sizes: Optional[Tensor] + ) -> None: + self.check_input(input, batch_sizes) + expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes) + + self.check_hidden_size(hidden[0], expected_hidden_size, + 'Expected hidden[0] size {}, got {}') + self.check_hidden_size(hidden[1], expected_hidden_size, + 'Expected hidden[1] size {}, got {}') + + @torch.jit.ignore + def forward(self, input, hx=None): + if isinstance(input, PackedSequence): + return self.forward_packed(input, hx) + else: + return self.forward_tensor(input, hx) + + @classmethod + def from_float(cls, mod, use_precomputed_fake_quant=False): + return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant) + + @classmethod + def from_reference(cls, ref_mod): + assert hasattr(ref_mod, "weight_ih_l0_dtype"), "We are assuming weight_ih_l0 " + "exists in LSTM, may need to relax the assumption to support the use case" + qmod = cls( + ref_mod.input_size, + ref_mod.hidden_size, + ref_mod.num_layers, + ref_mod.bias, + ref_mod.batch_first, + ref_mod.dropout, + ref_mod.bidirectional, + # assuming there is layer 0, which should be OK + ref_mod.weight_ih_l0_dtype, + ) + qmod.set_weight_bias(ref_mod.get_quantized_weight_bias_dict()) + return qmod + + +class GRU(RNNBase): + r"""Applies a multi-layer gated recurrent unit (GRU) RNN to an input sequence. + + + For each element in the input sequence, each layer computes the following + function: + + .. math:: + \begin{array}{ll} + r_t = \sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\ + z_t = \sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\ + n_t = \tanh(W_{in} x_t + b_{in} + r_t \odot (W_{hn} h_{(t-1)}+ b_{hn})) \\ + h_t = (1 - z_t) \odot n_t + z_t \odot h_{(t-1)} + \end{array} + + where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is the input + at time `t`, :math:`h_{(t-1)}` is the hidden state of the layer + at time `t-1` or the initial hidden state at time `0`, and :math:`r_t`, + :math:`z_t`, :math:`n_t` are the reset, update, and new gates, respectively. + :math:`\sigma` is the sigmoid function, and :math:`\odot` is the Hadamard product. + + In a multilayer GRU, the input :math:`x^{(l)}_t` of the :math:`l` -th layer + (:math:`l >= 2`) is the hidden state :math:`h^{(l-1)}_t` of the previous layer multiplied by + dropout :math:`\delta^{(l-1)}_t` where each :math:`\delta^{(l-1)}_t` is a Bernoulli random + variable which is :math:`0` with probability :attr:`dropout`. + + Args: + input_size: The number of expected features in the input `x` + hidden_size: The number of features in the hidden state `h` + num_layers: Number of recurrent layers. E.g., setting ``num_layers=2`` + would mean stacking two GRUs together to form a `stacked GRU`, + with the second GRU taking in outputs of the first GRU and + computing the final results. Default: 1 + bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`. + Default: ``True`` + batch_first: If ``True``, then the input and output tensors are provided + as (batch, seq, feature). Default: ``False`` + dropout: If non-zero, introduces a `Dropout` layer on the outputs of each + GRU layer except the last layer, with dropout probability equal to + :attr:`dropout`. Default: 0 + bidirectional: If ``True``, becomes a bidirectional GRU. Default: ``False`` + + Inputs: input, h_0 + - **input** of shape `(seq_len, batch, input_size)`: tensor containing the features + of the input sequence. The input can also be a packed variable length + sequence. See :func:`torch.nn.utils.rnn.pack_padded_sequence` + for details. + - **h_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor + containing the initial hidden state for each element in the batch. + Defaults to zero if not provided. If the RNN is bidirectional, + num_directions should be 2, else it should be 1. + + Outputs: output, h_n + - **output** of shape `(seq_len, batch, num_directions * hidden_size)`: tensor + containing the output features h_t from the last layer of the GRU, + for each `t`. If a :class:`torch.nn.utils.rnn.PackedSequence` has been + given as the input, the output will also be a packed sequence. + For the unpacked case, the directions can be separated + using ``output.view(seq_len, batch, num_directions, hidden_size)``, + with forward and backward being direction `0` and `1` respectively. + + Similarly, the directions can be separated in the packed case. + - **h_n** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor + containing the hidden state for `t = seq_len` + + Like *output*, the layers can be separated using + ``h_n.view(num_layers, num_directions, batch, hidden_size)``. + + Shape: + - Input1: :math:`(L, N, H_{in})` tensor containing input features where + :math:`H_{in}=\text{input\_size}` and `L` represents a sequence length. + - Input2: :math:`(S, N, H_{out})` tensor + containing the initial hidden state for each element in the batch. + :math:`H_{out}=\text{hidden\_size}` + Defaults to zero if not provided. where :math:`S=\text{num\_layers} * \text{num\_directions}` + If the RNN is bidirectional, num_directions should be 2, else it should be 1. + - Output1: :math:`(L, N, H_{all})` where :math:`H_{all}=\text{num\_directions} * \text{hidden\_size}` + - Output2: :math:`(S, N, H_{out})` tensor containing the next hidden state + for each element in the batch + + Attributes: + weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer + (W_ir|W_iz|W_in), of shape `(3*hidden_size, input_size)` for `k = 0`. + Otherwise, the shape is `(3*hidden_size, num_directions * hidden_size)` + weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer + (W_hr|W_hz|W_hn), of shape `(3*hidden_size, hidden_size)` + bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer + (b_ir|b_iz|b_in), of shape `(3*hidden_size)` + bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer + (b_hr|b_hz|b_hn), of shape `(3*hidden_size)` + + .. note:: + All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` + where :math:`k = \frac{1}{\text{hidden\_size}}` + + .. note:: + The calculation of new gate :math:`n_t` subtly differs from the original paper and other frameworks. + In the original implementation, the Hadamard product :math:`(\odot)` between :math:`r_t` and the + previous hidden state :math:`h_{(t-1)}` is done before the multiplication with the weight matrix + `W` and addition of bias: + + .. math:: + \begin{aligned} + n_t = \tanh(W_{in} x_t + b_{in} + W_{hn} ( r_t \odot h_{(t-1)} ) + b_{hn}) + \end{aligned} + + This is in contrast to PyTorch implementation, which is done after :math:`W_{hn} h_{(t-1)}` + + .. math:: + \begin{aligned} + n_t = \tanh(W_{in} x_t + b_{in} + r_t \odot (W_{hn} h_{(t-1)}+ b_{hn})) + \end{aligned} + + This implementation differs on purpose for efficiency. + + .. include:: ../cudnn_persistent_rnn.rst + + Examples:: + + >>> # xdoctest: +SKIP + >>> rnn = nn.GRU(10, 20, 2) + >>> input = torch.randn(5, 3, 10) + >>> h0 = torch.randn(2, 3, 20) + >>> output, hn = rnn(input, h0) + """ + _FLOAT_MODULE = nn.GRU + + __overloads__ = {'forward': ['forward_packed', 'forward_tensor']} + + def __init__(self, *args, **kwargs): + super().__init__('GRU', *args, **kwargs) + + def _get_name(self): + return 'DynamicQuantizedGRU' + + def check_forward_args(self, input: Tensor, hidden: Tensor, batch_sizes: Optional[Tensor]) -> None: + self.check_input(input, batch_sizes) + expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes) + + self.check_hidden_size(hidden, expected_hidden_size, + 'Expected hidden size {}, got {}') + + def forward_impl( + self, input: Tensor, hx: Optional[Tensor], + batch_sizes: Optional[Tensor], max_batch_size: int, + sorted_indices: Optional[Tensor] + ) -> Tuple[Tensor, Tensor]: + if hx is None: + num_directions = 2 if self.bidirectional else 1 + zeros = torch.zeros(self.num_layers * num_directions, + max_batch_size, self.hidden_size, + dtype=input.dtype, device=input.device) + hx = zeros + else: + # Each batch of the hidden state should match the input sequence that + # the user believes he/she is passing in. + hx = self.permute_hidden(hx, sorted_indices) + + self.check_forward_args(input, hx, batch_sizes) + + _all_params = ([m.param for m in self._all_weight_values]) + if batch_sizes is None: + result = torch.quantized_gru(input, + hx, + _all_params, + self.bias, + self.num_layers, + self.dropout, + self.training, + self.bidirectional, + self.batch_first) + else: + result = torch.quantized_gru(input, + batch_sizes, + hx, + _all_params, + self.bias, + self.num_layers, + self.dropout, + self.training, + self.bidirectional) + output = result[0] + hidden = result[1] + + return output, hidden + + + @torch.jit.export + def forward_tensor( + self, input: Tensor, hx: Optional[Tensor] = None + ) -> Tuple[Tensor, Tensor]: + batch_sizes = None + max_batch_size = input.size(0) if self.batch_first else input.size(1) + sorted_indices = None + unsorted_indices = None + + output, hidden = self.forward_impl( + input, hx, batch_sizes, max_batch_size, sorted_indices) + + return output, self.permute_hidden(hidden, unsorted_indices) + + @torch.jit.export + def forward_packed( + self, input: PackedSequence, hx: Optional[Tensor] = None + ) -> Tuple[PackedSequence, Tensor]: + input_, batch_sizes, sorted_indices, unsorted_indices = input + max_batch_size = int(batch_sizes[0]) + output_, hidden = self.forward_impl( + input_, hx, batch_sizes, max_batch_size, sorted_indices + ) + + output = PackedSequence(output_, batch_sizes, + sorted_indices, unsorted_indices) + return output, self.permute_hidden(hidden, unsorted_indices) + + def permute_hidden( + self, hx: Tensor, permutation: Optional[Tensor] + ) -> Tensor: + if permutation is None: + return hx + return _apply_permutation(hx, permutation) + + @torch.jit.ignore + def forward(self, input, hx=None): + if isinstance(input, PackedSequence): + return self.forward_packed(input, hx) + else: + return self.forward_tensor(input, hx) + + @classmethod + def from_float(cls, mod, use_precomputed_fake_quant=False): + return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant) + + @classmethod + def from_reference(cls, ref_mod): + assert hasattr(ref_mod, "weight_ih_l0_dtype"), "We are assuming weight_ih_l0 " + "exists in LSTM, may need to relax the assumption to support the use case" + qmod = cls( + ref_mod.input_size, + ref_mod.hidden_size, + ref_mod.num_layers, + ref_mod.bias, + ref_mod.batch_first, + ref_mod.dropout, + ref_mod.bidirectional, + # assuming there is layer 0, which should be OK + ref_mod.weight_ih_l0_dtype, + ) + qmod.set_weight_bias(ref_mod.get_quantized_weight_bias_dict()) + return qmod + +class RNNCellBase(torch.nn.Module): + # _FLOAT_MODULE = nn.CellRNNBase + __constants__ = ['input_size', 'hidden_size', 'bias'] + + def __init__(self, input_size, hidden_size, bias=True, num_chunks=4, dtype=torch.qint8): + super().__init__() + self.input_size = input_size + self.hidden_size = hidden_size + self.bias = bias + self.weight_dtype = dtype + if bias: + self.bias_ih = torch.randn(num_chunks * hidden_size).to(dtype=torch.float) + self.bias_hh = torch.randn(num_chunks * hidden_size).to(dtype=torch.float) + else: + self.register_parameter('bias_ih', None) + self.register_parameter('bias_hh', None) + + weight_ih = torch.randn(num_chunks * hidden_size, input_size).to(torch.float) + weight_hh = torch.randn(num_chunks * hidden_size, hidden_size).to(torch.float) + if dtype == torch.qint8: + weight_ih = torch.quantize_per_tensor(weight_ih, scale=1, zero_point=0, dtype=torch.qint8) + weight_hh = torch.quantize_per_tensor(weight_hh, scale=1, zero_point=0, dtype=torch.qint8) + + if dtype == torch.qint8: + # for each layer, for each direction we need to quantize and pack + # weights and pack parameters in this order: + # + # w_ih, w_hh + packed_weight_ih = \ + torch.ops.quantized.linear_prepack(weight_ih, self.bias_ih) + packed_weight_hh = \ + torch.ops.quantized.linear_prepack(weight_hh, self.bias_hh) + else: + # for each layer, for each direction we need to quantize and pack + # weights and pack parameters in this order: + # + # packed_ih, packed_hh, b_ih, b_hh + packed_weight_ih = torch.ops.quantized.linear_prepack_fp16( + weight_ih, self.bias_ih) + packed_weight_hh = torch.ops.quantized.linear_prepack_fp16( + weight_hh, self.bias_hh) + + self._packed_weight_ih = packed_weight_ih + self._packed_weight_hh = packed_weight_hh + + def _get_name(self): + return 'DynamicQuantizedRNNBase' + + def extra_repr(self): + s = '{input_size}, {hidden_size}' + if 'bias' in self.__dict__ and self.bias is not True: + s += ', bias={bias}' + if 'nonlinearity' in self.__dict__ and self.nonlinearity != "tanh": + s += ', nonlinearity={nonlinearity}' + return s.format(**self.__dict__) + + def check_forward_input(self, input): + if input.size(1) != self.input_size: + raise RuntimeError( + f"input has inconsistent input_size: got {input.size(1)}, expected {self.input_size}") + + def check_forward_hidden(self, input: Tensor, hx: Tensor, hidden_label: str = '') -> None: + if input.size(0) != hx.size(0): + raise RuntimeError( + f"Input batch size {input.size(0)} doesn't match hidden{hidden_label} batch size {hx.size(0)}") + + if hx.size(1) != self.hidden_size: + raise RuntimeError( + f"hidden{hidden_label} has inconsistent hidden_size: got {hx.size(1)}, expected {self.hidden_size}") + + @classmethod + def from_float(cls, mod, use_precomputed_fake_quant=False): + assert type(mod) in {torch.nn.LSTMCell, + torch.nn.GRUCell, + torch.nn.RNNCell}, 'nn.quantized.dynamic.RNNCellBase.from_float \ + only works for nn.LSTMCell, nn.GRUCell and nn.RNNCell' + assert hasattr( + mod, 'qconfig'), 'Input float module must have qconfig defined' + + if mod.qconfig is not None and mod.qconfig.weight is not None: + weight_observer_method = mod.qconfig.weight + else: + # We have the circular import issues if we import the qconfig in the beginning of this file: + # https://github.com/pytorch/pytorch/pull/24231. The current workaround is to postpone the + # import until we need it. + from torch.ao.quantization.qconfig import default_dynamic_qconfig + weight_observer_method = default_dynamic_qconfig.weight + + dtype = weight_observer_method().dtype + supported_scalar_types = [torch.qint8, torch.float16] + if dtype not in supported_scalar_types: + raise RuntimeError(f'Unsupported dtype for dynamic RNN quantization: {dtype}') + + qRNNCellBase: Union[LSTMCell, GRUCell, RNNCell] + + if type(mod) == torch.nn.LSTMCell: + qRNNCellBase = LSTMCell(mod.input_size, mod.hidden_size, bias=mod.bias, dtype=dtype) + elif type(mod) == torch.nn.GRUCell: + qRNNCellBase = GRUCell(mod.input_size, mod.hidden_size, bias=mod.bias, dtype=dtype) + elif type(mod) == torch.nn.RNNCell: + qRNNCellBase = RNNCell(mod.input_size, mod.hidden_size, bias=mod.bias, nonlinearity=mod.nonlinearity, dtype=dtype) + else: + raise NotImplementedError('Only LSTMCell, GRUCell and RNNCell \ + are supported for QuantizedRNN for now') + + assert mod.bias + + def _observe_and_quantize_weight(weight): + if dtype == torch.qint8: + weight_observer = weight_observer_method() + weight_observer(weight) + qweight = _quantize_weight(weight.float(), weight_observer) + return qweight + else: + return weight.float() + + qRNNCellBase._packed_weight_ih = pack_weight_bias(_observe_and_quantize_weight(mod.weight_ih), mod.bias_ih, dtype) + qRNNCellBase._packed_weight_hh = pack_weight_bias(_observe_and_quantize_weight(mod.weight_hh), mod.bias_hh, dtype) + return qRNNCellBase + + @classmethod + def from_reference(cls, ref_mod): + assert hasattr(ref_mod, "weight_ih_dtype"), "We are assuming weight_ih " + "exists in reference module, may need to relax the assumption to support the use case" + if hasattr(ref_mod, "nonlinearity"): + qmod = cls( + ref_mod.input_size, + ref_mod.hidden_size, + ref_mod.bias, + ref_mod.nonlinearity, + dtype=ref_mod.weight_ih_dtype + ) + else: + qmod = cls( + ref_mod.input_size, + ref_mod.hidden_size, + ref_mod.bias, + dtype=ref_mod.weight_ih_dtype + ) + weight_bias_dict = { + "weight": { + "weight_ih": ref_mod.get_quantized_weight_ih(), + "weight_hh": ref_mod.get_quantized_weight_hh(), + }, + "bias": { + "bias_ih": ref_mod.bias_ih, + "bias_hh": ref_mod.bias_hh, + } + } + qmod.set_weight_bias(weight_bias_dict) + return qmod + + def _weight_bias(self): + # Returns a dict of weights and biases + weight_bias_dict: Dict[str, Dict] = {'weight' : {}, 'bias' : {}} + w1, b1 = self._packed_weight_ih.__getstate__()[0] + w2, b2 = self._packed_weight_hh.__getstate__()[0] + # TODO: these can be simplified to one level? e.g. using weight_ih as key + # directly + weight_bias_dict['weight']['weight_ih'] = w1 + weight_bias_dict['weight']['weight_hh'] = w2 + weight_bias_dict['bias']['bias_ih'] = b1 + weight_bias_dict['bias']['bias_hh'] = b2 + return weight_bias_dict + + def get_weight(self): + return self._weight_bias()['weight'] + + def get_bias(self): + return self._weight_bias()['bias'] + + def set_weight_bias(self, weight_bias_dict): + # TODO: these can be simplified to one level? e.g. using weight_ih as key + # directly + self._packed_weight_ih = pack_weight_bias( + weight_bias_dict["weight"]["weight_ih"], + weight_bias_dict["bias"]["bias_ih"], + self.weight_dtype) + self._packed_weight_hh = pack_weight_bias( + weight_bias_dict["weight"]["weight_hh"], + weight_bias_dict["bias"]["bias_hh"], + self.weight_dtype) + + def _save_to_state_dict(self, destination, prefix, keep_vars): + super()._save_to_state_dict(destination, prefix, keep_vars) + destination[prefix + '_packed_weight_ih'] = self._packed_weight_ih + destination[prefix + '_packed_weight_hh'] = self._packed_weight_hh + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + self._packed_weight_ih = state_dict.pop(prefix + '_packed_weight_ih') + self._packed_weight_hh = state_dict.pop(prefix + '_packed_weight_hh') + super()._load_from_state_dict(state_dict, prefix, local_metadata, False, + missing_keys, unexpected_keys, error_msgs) + + +class RNNCell(RNNCellBase): + r"""An Elman RNN cell with tanh or ReLU non-linearity. + A dynamic quantized RNNCell module with floating point tensor as inputs and outputs. + Weights are quantized to 8 bits. We adopt the same interface as `torch.nn.RNNCell`, + please see https://pytorch.org/docs/stable/nn.html#torch.nn.RNNCell for documentation. + + Examples:: + + >>> # xdoctest: +SKIP + >>> rnn = nn.RNNCell(10, 20) + >>> input = torch.randn(6, 3, 10) + >>> hx = torch.randn(3, 20) + >>> output = [] + >>> for i in range(6): + ... hx = rnn(input[i], hx) + ... output.append(hx) + """ + __constants__ = ['input_size', 'hidden_size', 'bias', 'nonlinearity'] + + def __init__(self, input_size, hidden_size, bias=True, nonlinearity="tanh", dtype=torch.qint8): + super().__init__(input_size, hidden_size, bias, num_chunks=1, dtype=dtype) + self.nonlinearity = nonlinearity + + def _get_name(self): + return 'DynamicQuantizedRNNCell' + + def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor: + self.check_forward_input(input) + if hx is None: + hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device) + self.check_forward_hidden(input, hx, '') + if self.nonlinearity == "tanh": + ret = torch.ops.quantized.quantized_rnn_tanh_cell_dynamic( + input, hx, + self._packed_weight_ih, self._packed_weight_hh, + self.bias_ih, self.bias_hh) + elif self.nonlinearity == "relu": + ret = torch.ops.quantized.quantized_rnn_relu_cell_dynamic( + input, hx, + self._packed_weight_ih, self._packed_weight_hh, + self.bias_ih, self.bias_hh) + else: + ret = input # TODO: remove when jit supports exception flow + raise RuntimeError( + f"Unknown nonlinearity: {self.nonlinearity}") + return ret + + @classmethod + def from_float(cls, mod, use_precomputed_fake_quant=False): + return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant) + + +class LSTMCell(RNNCellBase): + r"""A long short-term memory (LSTM) cell. + + A dynamic quantized LSTMCell module with floating point tensor as inputs and outputs. + Weights are quantized to 8 bits. We adopt the same interface as `torch.nn.LSTMCell`, + please see https://pytorch.org/docs/stable/nn.html#torch.nn.LSTMCell for documentation. + + Examples:: + + >>> # xdoctest: +SKIP + >>> rnn = nn.LSTMCell(10, 20) + >>> input = torch.randn(6, 3, 10) + >>> hx = torch.randn(3, 20) + >>> cx = torch.randn(3, 20) + >>> output = [] + >>> for i in range(6): + ... hx, cx = rnn(input[i], (hx, cx)) + ... output.append(hx) + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, num_chunks=4, **kwargs) # type: ignore[misc] + + def _get_name(self): + return 'DynamicQuantizedLSTMCell' + + def forward(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]: + self.check_forward_input(input) + if hx is None: + zeros = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device) + hx = (zeros, zeros) + self.check_forward_hidden(input, hx[0], '[0]') + self.check_forward_hidden(input, hx[1], '[1]') + return torch.ops.quantized.quantized_lstm_cell_dynamic( + input, hx, + self._packed_weight_ih, self._packed_weight_hh, + self.bias_ih, self.bias_hh) + + @classmethod + def from_float(cls, mod, use_precomputed_fake_quant=False): + return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant) + + +class GRUCell(RNNCellBase): + r"""A gated recurrent unit (GRU) cell + + A dynamic quantized GRUCell module with floating point tensor as inputs and outputs. + Weights are quantized to 8 bits. We adopt the same interface as `torch.nn.GRUCell`, + please see https://pytorch.org/docs/stable/nn.html#torch.nn.GRUCell for documentation. + + Examples:: + + >>> # xdoctest: +SKIP + >>> rnn = nn.GRUCell(10, 20) + >>> input = torch.randn(6, 3, 10) + >>> hx = torch.randn(3, 20) + >>> output = [] + >>> for i in range(6): + ... hx = rnn(input[i], hx) + ... output.append(hx) + """ + + def __init__(self, input_size, hidden_size, bias=True, dtype=torch.qint8): + super().__init__(input_size, hidden_size, bias, num_chunks=3, dtype=dtype) + + def _get_name(self): + return 'DynamicQuantizedGRUCell' + + def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor: + self.check_forward_input(input) + if hx is None: + hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device) + self.check_forward_hidden(input, hx, '') + return torch.ops.quantized.quantized_gru_cell_dynamic( + input, hx, + self._packed_weight_ih, self._packed_weight_hh, + self.bias_ih, self.bias_hh, + ) + + @classmethod + def from_float(cls, mod, use_precomputed_fake_quant=False): + return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant) diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8104a22b2662e03c20055c3993c017e3e109782 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/activation.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/activation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..321013735e44aa2d6952d1d4bf7853be26b3ec1b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/activation.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/dropout.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/dropout.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e85c78f86f4085adf2fc4de7636aadf38f1eff56 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/dropout.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/functional_modules.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/functional_modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad44821b3702196da4d087e42e86cf3fed6a08fe Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/functional_modules.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/rnn.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/rnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..626da1946f6c8374af9d62fc4e7ac8944e302617 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/rnn.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fda342bebb0deef1af6e5423bfbf6315deb0ff0 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/modules/__pycache__/utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/__init__.py b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..081cb17130b3765c6f6f57f0b8ec1ba901e21312 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/__init__.py @@ -0,0 +1,18 @@ +from .modules import * # noqa: F403 + +__all__ = [ + 'Linear', + 'Conv1d', + 'Conv2d', + 'Conv3d', + 'ConvTranspose1d', + 'ConvTranspose2d', + 'ConvTranspose3d', + 'RNNCell', + 'LSTMCell', + 'GRUCell', + 'LSTM', + 'GRU', + 'Embedding', + 'EmbeddingBag', +] diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..832185eb334cac8d033bb2f22924f2a72f091882 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__init__.py b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..884720774c5f83c9122cf4de43a4265c9c6afb59 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/__init__.py @@ -0,0 +1,21 @@ +from .linear import Linear +from .conv import Conv1d, Conv2d, Conv3d, ConvTranspose1d, ConvTranspose2d, ConvTranspose3d +from .rnn import RNNCell, LSTMCell, GRUCell, LSTM, GRU +from .sparse import Embedding, EmbeddingBag + +__all__ = [ + 'Linear', + 'Conv1d', + 'Conv2d', + 'Conv3d', + 'ConvTranspose1d', + 'ConvTranspose2d', + 'ConvTranspose3d', + 'RNNCell', + 'LSTMCell', + 'GRUCell', + 'LSTM', + 'GRU', + 'Embedding', + 'EmbeddingBag', +] diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/conv.py b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/conv.py new file mode 100644 index 0000000000000000000000000000000000000000..a7c285bc7f67a4d2066c71837dd4b1ad4bc902f5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/conv.py @@ -0,0 +1,319 @@ +# mypy: allow-untyped-defs +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Optional, Dict, Any, List +from torch.nn.common_types import _size_1_t +from .utils import ReferenceQuantizedModule + +__all__ = ['Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d'] + +class _ConvNd(torch.nn.modules.conv._ConvNd, ReferenceQuantizedModule): + """ A reference version of nn.quantized.Conv2d + we will not pack the parameters in this module, since weight packing is an + optimization for quantized backends supported in PyTorch (fbgemm/qnnpack), + this is useful when user want to use this module in other backends like Glow. + """ + __annotations__ = {"bias": Optional[torch.Tensor]} + _IS_REFERENCE = True + + @staticmethod + def from_float(cls, float_conv, weight_qparams): + qref_conv = cls( + float_conv.in_channels, + float_conv.out_channels, + float_conv.kernel_size, # type: ignore[arg-type] + float_conv.stride, # type: ignore[arg-type] + float_conv.padding, # type: ignore[arg-type] + float_conv.dilation, # type: ignore[arg-type] + float_conv.groups, + float_conv.bias is not None, # type: ignore[arg-type] + float_conv.padding_mode, + device=float_conv.weight.device, + dtype=float_conv.weight.dtype, + weight_qparams=weight_qparams) + qref_conv.weight = torch.nn.Parameter(float_conv.weight.detach()) + if float_conv.bias is not None: + qref_conv.bias = torch.nn.Parameter(float_conv.bias.detach()) + return qref_conv + +class Conv1d(_ConvNd, nn.Conv1d): + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: _size_1_t, + stride: _size_1_t = 1, + padding: _size_1_t = 0, + dilation: _size_1_t = 1, + groups: int = 1, + bias: bool = True, + padding_mode: str = "zeros", + device=None, + dtype=None, + weight_qparams: Optional[Dict[str, Any]] = None): + nn.Conv1d.__init__( + self, in_channels, out_channels, kernel_size, stride, padding, dilation, + groups, bias, padding_mode, device, dtype) + self._init_weight_qparams(weight_qparams, device) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + we have: + w(float) -- quant - dequant \ + x(float) ------------- F.conv1d --- + + In the full model, we will see + w(float) -- quant - *dequant \ + x -- quant --- *dequant -- *F.conv1d --- *quant - dequant + and the backend should be able to fuse the ops with `*` into a quantized conv1d + """ + weight_quant_dequant = self.get_weight() + result = F.conv1d( + x, weight_quant_dequant, self.bias, self.stride, + self.padding, self.dilation, self.groups) + return result + + def _get_name(self): + return "QuantizedConv1d(Reference)" + + @classmethod + def from_float(cls, float_conv, weight_qparams): + return _ConvNd.from_float(cls, float_conv, weight_qparams) + +class Conv2d(_ConvNd, nn.Conv2d): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True, + padding_mode='zeros', + device=None, + dtype=None, + weight_qparams: Optional[Dict[str, Any]] = None): + nn.Conv2d.__init__( + self, in_channels, out_channels, kernel_size, stride, padding, dilation, + groups, bias, padding_mode, device, dtype) + self._init_weight_qparams(weight_qparams, device) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + we have: + w(float) -- quant - dequant \ + x(float) ------------- F.conv2d --- + + In the full model, we will see + w(float) -- quant - *dequant \ + x -- quant --- *dequant -- *F.conv2d --- *quant - dequant + and the backend should be able to fuse the ops with `*` into a quantized conv2d + """ + weight_quant_dequant = self.get_weight() + result = F.conv2d( + x, weight_quant_dequant, self.bias, self.stride, + self.padding, self.dilation, self.groups) + return result + + def _get_name(self): + return "QuantizedConv2d(Reference)" + + @classmethod + def from_float(cls, float_conv, weight_qparams): + return _ConvNd.from_float(cls, float_conv, weight_qparams) + +class Conv3d(_ConvNd, nn.Conv3d): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True, + padding_mode="zeros", + device=None, + dtype=None, + weight_qparams: Optional[Dict[str, Any]] = None): + nn.Conv3d.__init__( + self, in_channels, out_channels, kernel_size, stride, padding, dilation, + groups, bias, padding_mode, device, dtype) + self._init_weight_qparams(weight_qparams, device) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + we have: + w(float) -- quant - dequant \ + x(float) ------------- F.conv3d --- + + In the full model, we will see + w(float) -- quant - *dequant \ + x -- quant --- *dequant -- *F.conv3d --- *quant - dequant + and the backend should be able to fuse the ops with `*` into a quantized conv3d + """ + weight_quant_dequant = self.get_weight() + result = F.conv3d( + x, weight_quant_dequant, self.bias, self.stride, + self.padding, self.dilation, self.groups) + return result + + def _get_name(self): + return "QuantizedConv3d(Reference)" + + @classmethod + def from_float(cls, float_conv, weight_qparams): + return _ConvNd.from_float(cls, float_conv, weight_qparams) + +class _ConvTransposeNd(_ConvNd, torch.nn.modules.conv._ConvTransposeNd): + """ A reference version of nn.quantized.ConvTranspose2d + we will not pack the parameters in this module, since weight packing is an + optimization for quantized backends supported in PyTorch (fbgemm/qnnpack), + this is useful when user want to use this module in other backends like Glow. + """ + @staticmethod + def from_float(cls, float_conv, weight_qparams): + qref_conv = cls( + float_conv.in_channels, + float_conv.out_channels, + float_conv.kernel_size, # type: ignore[arg-type] + float_conv.stride, # type: ignore[arg-type] + float_conv.padding, # type: ignore[arg-type] + float_conv.output_padding, # type: ignore[arg-type] + float_conv.groups, + float_conv.bias is not None, # type: ignore[arg-type] + float_conv.dilation, # type: ignore[arg-type] + float_conv.padding_mode, + device=float_conv.weight.device, + dtype=float_conv.weight.dtype, + weight_qparams=weight_qparams) + qref_conv.weight = torch.nn.Parameter(float_conv.weight.detach()) + if float_conv.bias is not None: + qref_conv.bias = torch.nn.Parameter(float_conv.bias.detach()) + return qref_conv + + +class ConvTranspose1d(_ConvTransposeNd, nn.ConvTranspose1d): + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: _size_1_t, + stride: _size_1_t = 1, + padding: _size_1_t = 0, + output_padding: _size_1_t = 0, + groups: int = 1, + bias: bool = True, + dilation: _size_1_t = 1, + padding_mode: str = "zeros", + device=None, + dtype=None, + weight_qparams: Optional[Dict[str, Any]] = None): + nn.ConvTranspose1d.__init__( + self, in_channels, out_channels, kernel_size, stride, padding, output_padding, + groups, bias, dilation, padding_mode, device, dtype) + self._init_weight_qparams(weight_qparams, device) + + def forward(self, x: torch.Tensor, output_size: Optional[List[int]] = None) -> torch.Tensor: + """ + we have: + w(float) -- quant - dequant \ + x(float) ------------- F.convTranspose1d --- + In the full model, we will see + w(float) -- quant - *dequant \ + x -- quant --- *dequant -- *F.convTranspose1d --- *quant - dequant + and the backend should be able to fuse the ops with `*` into a quantized conv1d + """ + + assert isinstance(self.padding, tuple) + # One cannot replace List by Tuple or Sequence in "_output_padding" because + # TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`. + output_padding = self._output_padding( + input, output_size, self.stride, self.padding, self.kernel_size, self.dilation) # type: ignore[arg-type] + + weight_quant_dequant = self.get_weight() + result = F.conv_transpose1d( + x, weight_quant_dequant, self.bias, self.stride, + self.padding, output_padding, self.groups, self.dilation) + return result + + def _get_name(self): + return "QuantizedConvTranspose1d(Reference)" + + @classmethod + def from_float(cls, float_conv, weight_qparams): + return _ConvTransposeNd.from_float(cls, float_conv, weight_qparams) + +class ConvTranspose2d(_ConvTransposeNd, nn.ConvTranspose2d): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, output_padding=0, + groups=1, bias=True, dilation=1, + padding_mode='zeros', + device=None, + dtype=None, + weight_qparams: Optional[Dict[str, Any]] = None): + + nn.ConvTranspose2d.__init__( + self, in_channels, out_channels, kernel_size, stride, padding, output_padding, + groups, bias, dilation, padding_mode, device, dtype) + self._init_weight_qparams(weight_qparams, device) + + def forward(self, x: torch.Tensor, output_size: Optional[List[int]] = None) -> torch.Tensor: + """ + we have: + w(float) -- quant - dequant \ + x(float) ------------- F.convTranspose2d --- + In the full model, we will see + w(float) -- quant - *dequant \ + x -- quant --- *dequant -- *F.convTranspose2d --- *quant - dequant + and the backend should be able to fuse the ops with `*` into a quantized conv2d + """ + assert isinstance(self.padding, tuple) + # One cannot replace List by Tuple or Sequence in "_output_padding" because + # TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`. + + output_padding = self._output_padding( + input, output_size, self.stride, self.padding, self.kernel_size, self.dilation) # type: ignore[arg-type] + + weight_quant_dequant = self.get_weight() + result = F.conv_transpose2d( + x, weight_quant_dequant, self.bias, self.stride, + self.padding, output_padding, self.groups, self.dilation) + + return result + + def _get_name(self): + return "QuantizedConvTranspose2d(Reference)" + + @classmethod + def from_float(cls, float_conv, weight_qparams): + return _ConvTransposeNd.from_float(cls, float_conv, weight_qparams) + +class ConvTranspose3d(_ConvTransposeNd, nn.ConvTranspose3d): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, output_padding=0, + groups=1, bias=True, dilation=1, + padding_mode="zeros", + device=None, + dtype=None, + weight_qparams: Optional[Dict[str, Any]] = None): + nn.ConvTranspose3d.__init__( + self, in_channels, out_channels, kernel_size, stride, padding, output_padding, + groups, bias, dilation, padding_mode, device, dtype) + self._init_weight_qparams(weight_qparams, device) + + def forward(self, x: torch.Tensor, output_size: Optional[List[int]] = None) -> torch.Tensor: + """ + we have: + w(float) -- quant - dequant \ + x(float) ------------- F.convTranspose3d --- + In the full model, we will see + w(float) -- quant - *dequant \ + x -- quant --- *dequant -- *F.convTranspose3d --- *quant - dequant + and the backend should be able to fuse the ops with `*` into a quantized conv3d + """ + + assert isinstance(self.padding, tuple) + # One cannot replace List by Tuple or Sequence in "_output_padding" because + # TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`. + output_padding = self._output_padding( + input, output_size, self.stride, self.padding, self.kernel_size, self.dilation) # type: ignore[arg-type] + + weight_quant_dequant = self.get_weight() + result = F.conv_transpose3d( + x, weight_quant_dequant, self.bias, self.stride, + self.padding, output_padding, self.groups, self.dilation) + return result + + def _get_name(self): + return "QuantizedConvTranspose3d(Reference)" + + @classmethod + def from_float(cls, float_conv, weight_qparams): + return _ConvTransposeNd.from_float(cls, float_conv, weight_qparams) diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/rnn.py b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..f5a53d0ceb3e83378730a105774d8144dc1e7f13 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/rnn.py @@ -0,0 +1,615 @@ +# mypy: allow-untyped-defs +import torch +import torch.nn as nn +from torch import Tensor +from .utils import _quantize_and_dequantize_weight +from .utils import _quantize_weight +from typing import Optional, Dict, Any, Tuple +from torch import _VF +from torch.nn.utils.rnn import PackedSequence + +__all__ = ['RNNCellBase', 'RNNCell', 'LSTMCell', 'GRUCell', 'RNNBase', 'LSTM', 'GRU', 'get_quantized_weight'] + +def _apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor: + return tensor.index_select(dim, permutation) + +def _get_weight_and_quantization_params(module, wn): + weight = getattr(module, wn) + params = [weight] + for param_name in [wn + n for n in ["_qscheme", "_dtype", "_scale", "_zero_point", "_axis_int"]]: + if hasattr(module, param_name): + param = getattr(module, param_name) + else: + param = None + params.append(param) + return params + +def get_quantized_weight(module, wn): + if not hasattr(module, wn): + return None + params = _get_weight_and_quantization_params(module, wn) + weight = _quantize_weight(*params) + return weight + +def _get_quantize_and_dequantized_weight(module, wn): + if not hasattr(module, wn): + return None + params = _get_weight_and_quantization_params(module, wn) + weight = _quantize_and_dequantize_weight(*params) + return weight + +class RNNCellBase(nn.RNNCellBase): + def __init__(self, input_size: int, hidden_size: int, bias: bool, num_chunks: int, + device=None, dtype=None, weight_qparams_dict=None) -> None: + super().__init__(input_size, hidden_size, bias, num_chunks, device=device, dtype=dtype) + # TODO(jerryzh168): maybe make this arg a required arg + if weight_qparams_dict is None: + weight_qparams = { + "qscheme": torch.per_tensor_affine, + "dtype": torch.quint8, + "scale": 1.0, + "zero_point": 0 + } + weight_qparams_dict = { + "weight_ih": weight_qparams, + "weight_hh": weight_qparams, + "is_decomposed": False, + } + assert len(weight_qparams_dict) == 3, "Expected length for weight_qparams_dict to be 3 for QuantizedRNNCellBase(Reference)" + self._init_weight_qparams_dict(weight_qparams_dict, device) + + def _init_weight_qparams_dict(self, weight_qparams_dict, device): + assert weight_qparams_dict is not None + self.is_decomposed = weight_qparams_dict["is_decomposed"] + for key, weight_qparams in weight_qparams_dict.items(): + if key == "is_decomposed": + continue + # TODO: refactor the duplicated code to utils.py + weight_qscheme = weight_qparams["qscheme"] + weight_dtype = weight_qparams["dtype"] + setattr(self, key + "_qscheme", weight_qscheme) + setattr(self, key + "_dtype", weight_dtype) + assert weight_qscheme in [None, torch.per_tensor_affine, torch.per_channel_affine], \ + Exception(f"qscheme: {weight_qscheme} is not support in {self._get_name()}") + if weight_qscheme is not None: + scale = weight_qparams["scale"] + scale_tensor = scale.clone().detach() \ + if isinstance(scale, torch.Tensor) else \ + torch.tensor(scale, dtype=torch.float, device=device) + self.register_buffer(key + "_scale", scale_tensor) + zp = weight_qparams["zero_point"] + zp_tensor = zp.clone().detach() \ + if isinstance(zp, torch.Tensor) else \ + torch.tensor(zp, dtype=torch.int, device=device) + self.register_buffer(key + "_zero_point", zp_tensor) + if weight_qscheme == torch.per_channel_affine: + axis = weight_qparams["axis"] + axis_tensor = axis.clone().detach() \ + if isinstance(axis, torch.Tensor) else \ + torch.tensor(axis, dtype=torch.int, device=device) + self.register_buffer(key + "_axis", axis_tensor) + else: + # added for TorchScriptability, not used + self.register_buffer( + key + "_axis", torch.tensor(0, dtype=torch.int, device=device)) + setattr(self, key + "_axis_int", getattr(self, key + "_axis").item()) + + def _get_name(self): + return "QuantizedRNNCellBase(Reference)" + + def get_quantized_weight_ih(self): + return get_quantized_weight(self, "weight_ih") + + def get_quantized_weight_hh(self): + return get_quantized_weight(self, "weight_hh") + + def get_weight_ih(self): + return _get_quantize_and_dequantized_weight(self, "weight_ih") + + def get_weight_hh(self): + return _get_quantize_and_dequantized_weight(self, "weight_hh") + +class RNNCell(RNNCellBase): + """ + We'll store weight_qparams for all the weights (weight_ih and weight_hh), + we need to pass in a `weight_qparams_dict` that maps from weight name, + e.g. weight_ih, to the weight_qparams for that weight + """ + def __init__(self, input_size: int, hidden_size: int, bias: bool = True, nonlinearity: str = "tanh", + device=None, dtype=None, weight_qparams_dict: Optional[Dict[str, Any]] = None) -> None: + factory_kwargs = {'device': device, 'dtype': dtype, 'weight_qparams_dict': weight_qparams_dict} + super().__init__(input_size, hidden_size, bias, num_chunks=1, **factory_kwargs) + self.nonlinearity = nonlinearity + + def _get_name(self): + return "QuantizedRNNCell(Reference)" + + # TODO: refactor nn.RNNCell to have a _forward that takes weight_ih and weight_hh as input + # and remove duplicated code, same for the other two Cell modules + def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor: + assert input.dim() in (1, 2), \ + f"RNNCell: Expected input to be 1-D or 2-D but received {input.dim()}-D tensor" + is_batched = input.dim() == 2 + if not is_batched: + input = input.unsqueeze(0) + + if hx is None: + hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device) + else: + hx = hx.unsqueeze(0) if not is_batched else hx + + if self.nonlinearity == "tanh": + ret = _VF.rnn_tanh_cell( + input, hx, + self.get_weight_ih(), self.get_weight_hh(), + self.bias_ih, self.bias_hh, + ) + elif self.nonlinearity == "relu": + ret = _VF.rnn_relu_cell( + input, hx, + self.get_weight_ih(), self.get_weight_hh(), + self.bias_ih, self.bias_hh, + ) + else: + ret = input # TODO: remove when jit supports exception flow + raise RuntimeError( + f"Unknown nonlinearity: {self.nonlinearity}") + + if not is_batched: + ret = ret.squeeze(0) + + return ret + + @classmethod + def from_float(cls, mod, weight_qparams_dict): + ref_mod = cls( + mod.input_size, + mod.hidden_size, + mod.bias, + mod.nonlinearity, + mod.weight_ih.device, + mod.weight_ih.dtype, + weight_qparams_dict) + ref_mod.weight_ih = mod.weight_ih + ref_mod.weight_hh = mod.weight_hh + ref_mod.bias_ih = mod.bias_ih + ref_mod.bias_hh = mod.bias_hh + return ref_mod + +class LSTMCell(RNNCellBase): + """ + We'll store weight_qparams for all the weights (weight_ih and weight_hh), + we need to pass in a `weight_qparams_dict` that maps from weight name, + e.g. weight_ih, to the weight_qparams for that weight + """ + def __init__(self, input_size: int, hidden_size: int, bias: bool = True, + device=None, dtype=None, weight_qparams_dict: Optional[Dict[str, Any]] = None) -> None: + factory_kwargs = {'device': device, 'dtype': dtype, 'weight_qparams_dict': weight_qparams_dict} + super().__init__(input_size, hidden_size, bias, num_chunks=4, **factory_kwargs) + + def _get_name(self): + return "QuantizedLSTMCell(Reference)" + + def forward(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]: + assert input.dim() in (1, 2), \ + f"LSTMCell: Expected input to be 1-D or 2-D but received {input.dim()}-D tensor" + is_batched = input.dim() == 2 + if not is_batched: + input = input.unsqueeze(0) + + if hx is None: + zeros = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device) + hx = (zeros, zeros) + else: + hx = (hx[0].unsqueeze(0), hx[1].unsqueeze(0)) if not is_batched else hx + + ret = _VF.lstm_cell( + input, hx, + self.get_weight_ih(), self.get_weight_hh(), + self.bias_ih, self.bias_hh, + ) + + if not is_batched: + ret = (ret[0].squeeze(0), ret[1].squeeze(0)) + return ret + + @classmethod + def from_float(cls, mod, weight_qparams_dict, use_precomputed_fake_quant=False): + ref_mod = cls( + mod.input_size, + mod.hidden_size, + mod.bias, + mod.weight_ih.device, + mod.weight_ih.dtype, + weight_qparams_dict) + ref_mod.weight_ih = mod.weight_ih + ref_mod.weight_hh = mod.weight_hh + ref_mod.bias_ih = mod.bias_ih + ref_mod.bias_hh = mod.bias_hh + return ref_mod + +class GRUCell(RNNCellBase): + """ + We'll store weight_qparams for all the weights (weight_ih and weight_hh), + we need to pass in a `weight_qparams_dict` that maps from weight name, + e.g. weight_ih, to the weight_qparams for that weight + """ + def __init__(self, input_size: int, hidden_size: int, bias: bool = True, + device=None, dtype=None, weight_qparams_dict: Optional[Dict[str, Any]] = None) -> None: + factory_kwargs = {'device': device, 'dtype': dtype, 'weight_qparams_dict': weight_qparams_dict} + super().__init__(input_size, hidden_size, bias, num_chunks=3, **factory_kwargs) + + def _get_name(self): + return "QuantizedGRUCell(Reference)" + + def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor: + assert input.dim() in (1, 2), \ + f"GRUCell: Expected input to be 1-D or 2-D but received {input.dim()}-D tensor" + is_batched = input.dim() == 2 + if not is_batched: + input = input.unsqueeze(0) + + if hx is None: + hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device) + else: + hx = hx.unsqueeze(0) if not is_batched else hx + + ret = _VF.gru_cell( + input, hx, + self.get_weight_ih(), self.get_weight_hh(), + self.bias_ih, self.bias_hh, + ) + + if not is_batched: + ret = ret.squeeze(0) + + return ret + + @classmethod + def from_float(cls, mod, weight_qparams_dict): + ref_mod = cls( + mod.input_size, + mod.hidden_size, + mod.bias, + mod.weight_ih.device, + mod.weight_ih.dtype, + weight_qparams_dict) + ref_mod.weight_ih = mod.weight_ih + ref_mod.weight_hh = mod.weight_hh + ref_mod.bias_ih = mod.bias_ih + ref_mod.bias_hh = mod.bias_hh + return ref_mod + +class RNNBase(nn.RNNBase): + def __init__(self, mode: str, input_size: int, hidden_size: int, + num_layers: int = 1, bias: bool = True, batch_first: bool = False, + dropout: float = 0., bidirectional: bool = False, proj_size: int = 0, + device=None, dtype=None, + weight_qparams_dict: Optional[Dict[str, Any]] = None) -> None: + super().__init__( + mode, input_size, hidden_size, num_layers, bias, batch_first, dropout, + bidirectional, proj_size, device, dtype + ) + # TODO(jerryzh168): maybe make this arg a required arg + if weight_qparams_dict is None: + weight_qparams = { + 'qscheme': torch.per_tensor_affine, + 'dtype': torch.quint8, + 'scale': 1.0, + 'zero_point': 0 + } + weight_qparams_dict = {"is_decomposed": False} # type: ignore[dict-item] + for wn in self._flat_weights_names: + if wn.startswith("weight"): + weight_qparams_dict[wn] = weight_qparams + self._init_weight_qparams_dict(weight_qparams_dict, device) + + def _init_weight_qparams_dict(self, weight_qparams_dict, device): + self.is_decomposed = weight_qparams_dict["is_decomposed"] + for key, weight_qparams in weight_qparams_dict.items(): + if key == "is_decomposed": + continue + weight_qscheme = weight_qparams["qscheme"] + weight_dtype = weight_qparams["dtype"] + setattr(self, key + "_qscheme", weight_qscheme) + setattr(self, key + "_dtype", weight_dtype) + assert weight_qscheme in [None, torch.per_tensor_affine, torch.per_channel_affine], \ + Exception(f"qscheme: {weight_qscheme} is not support in {self._get_name()}") + if weight_qscheme is not None: + self.register_buffer( + key + "_scale", + torch.tensor(weight_qparams["scale"], dtype=torch.float, device=device)) + self.register_buffer( + key + "_zero_point", + torch.tensor(weight_qparams["zero_point"], dtype=torch.int, device=device)) + if weight_qscheme == torch.per_channel_affine: + self.register_buffer( + key + "_axis", + torch.tensor(weight_qparams["axis"], dtype=torch.int, device=device)) + else: + # added for TorchScriptability, not used + self.register_buffer( + key + "_axis", torch.tensor(0, dtype=torch.int, device=device)) + setattr(self, key + "_axis_int", getattr(self, key + "_axis").item()) + +class LSTM(RNNBase): + """ Reference Quantized LSTM Module + We'll store weight_qparams for all the weights in _flat_weights, we need to pass in + a `weight_qparams_dict` that maps from weight name, e.g. weight_ih_l0, + to the weight_qparams for that weight + """ + def __init__(self, *args, **kwargs): + super().__init__('LSTM', *args, **kwargs) + + # Same as above, see torch/nn/modules/module.py::_forward_unimplemented + def permute_hidden(self, # type: ignore[override] + hx: Tuple[Tensor, Tensor], + permutation: Optional[Tensor] + ) -> Tuple[Tensor, Tensor]: + if permutation is None: + return hx + return _apply_permutation(hx[0], permutation), _apply_permutation(hx[1], permutation) + + def get_expected_cell_size(self, input: Tensor, batch_sizes: Optional[Tensor]) -> Tuple[int, int, int]: + if batch_sizes is not None: + mini_batch = int(batch_sizes[0]) + else: + mini_batch = input.size(0) if self.batch_first else input.size(1) + num_directions = 2 if self.bidirectional else 1 + expected_hidden_size = (self.num_layers * num_directions, + mini_batch, self.hidden_size) + return expected_hidden_size + + # In the future, we should prevent mypy from applying contravariance rules here. + # See torch/nn/modules/module.py::_forward_unimplemented + def check_forward_args(self, # type: ignore[override] + input: Tensor, + hidden: Tuple[Tensor, Tensor], + batch_sizes: Optional[Tensor], + ): + self.check_input(input, batch_sizes) + self.check_hidden_size(hidden[0], self.get_expected_hidden_size(input, batch_sizes), + 'Expected hidden[0] size {}, got {}') + self.check_hidden_size(hidden[1], self.get_expected_cell_size(input, batch_sizes), + 'Expected hidden[1] size {}, got {}') + + def get_quantized_weight_bias_dict(self): + """ dictionary from flat_weight_name to quantized weight or (unquantized) bias + e.g. + { + "weight_ih_l0": quantized_weight, + "bias_ih_l0": unquantized_bias, + ... + } + """ + quantized_weight_bias_dict = {} + for wn in self._flat_weights_names: + if hasattr(self, wn): + if wn.startswith("weight"): + weight_or_bias = get_quantized_weight(self, wn) + else: + weight_or_bias = getattr(self, wn) + else: + weight_or_bias = None + quantized_weight_bias_dict[wn] = weight_or_bias + return quantized_weight_bias_dict + + def get_flat_weights(self): + flat_weights = [] + for wn in self._flat_weights_names: + if hasattr(self, wn): + weight = getattr(self, wn) + if wn.startswith("weight"): + params = _get_weight_and_quantization_params(self, wn) + weight = _quantize_and_dequantize_weight(*params) + else: + weight = None + flat_weights.append(weight) + return flat_weights + + def forward(self, input, hx=None): # noqa: F811 + orig_input = input + # xxx: isinstance check needs to be in conditional for TorchScript to compile + batch_sizes = None + if isinstance(orig_input, PackedSequence): + input, batch_sizes, sorted_indices, unsorted_indices = input + max_batch_size = int(batch_sizes[0]) + else: + batch_sizes = None + is_batched = input.dim() == 3 + batch_dim = 0 if self.batch_first else 1 + if not is_batched: + input = input.unsqueeze(batch_dim) + max_batch_size = input.size(0) if self.batch_first else input.size(1) + sorted_indices = None + unsorted_indices = None + + if hx is None: + num_directions = 2 if self.bidirectional else 1 + real_hidden_size = self.proj_size if self.proj_size > 0 else self.hidden_size + h_zeros = torch.zeros(self.num_layers * num_directions, + max_batch_size, real_hidden_size, + dtype=input.dtype, device=input.device) + c_zeros = torch.zeros(self.num_layers * num_directions, + max_batch_size, self.hidden_size, + dtype=input.dtype, device=input.device) + hx = (h_zeros, c_zeros) + else: + if batch_sizes is None: # If not PackedSequence input. + if is_batched: # type: ignore[possibly-undefined] + if (hx[0].dim() != 3 or hx[1].dim() != 3): + msg = ("For batched 3-D input, hx and cx should " + f"also be 3-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors") + raise RuntimeError(msg) + else: + if hx[0].dim() != 2 or hx[1].dim() != 2: + msg = ("For unbatched 2-D input, hx and cx should " + f"also be 2-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors") + raise RuntimeError(msg) + hx = (hx[0].unsqueeze(1), hx[1].unsqueeze(1)) + + # Each batch of the hidden state should match the input sequence that + # the user believes he/she is passing in. + hx = self.permute_hidden(hx, sorted_indices) + + self.check_forward_args(input, hx, batch_sizes) + if batch_sizes is None: + result = _VF.lstm(input, hx, self.get_flat_weights(), self.bias, self.num_layers, + self.dropout, self.training, self.bidirectional, self.batch_first) + else: + result = _VF.lstm(input, batch_sizes, hx, self.get_flat_weights(), self.bias, + self.num_layers, self.dropout, self.training, self.bidirectional) + output = result[0] + hidden = result[1:] + # xxx: isinstance check needs to be in conditional for TorchScript to compile + if isinstance(orig_input, PackedSequence): + output_packed = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices) + return output_packed, self.permute_hidden(hidden, unsorted_indices) + else: + if not is_batched: # type: ignore[possibly-undefined] + output = output.squeeze(batch_dim) # type: ignore[possibly-undefined] + hidden = (hidden[0].squeeze(1), hidden[1].squeeze(1)) + return output, self.permute_hidden(hidden, unsorted_indices) + + def _get_name(self): + return "QuantizedLSTM(Reference)" + + @classmethod + def from_float(cls, mod, weight_qparams_dict): + ref_mod = cls( + mod.input_size, + mod.hidden_size, + mod.num_layers, + mod.bias, + mod.batch_first, + mod.dropout, + mod.bidirectional, + weight_qparams_dict=weight_qparams_dict) + for wn in mod._flat_weights_names: + setattr(ref_mod, wn, getattr(mod, wn)) + return ref_mod + +class GRU(RNNBase): + """ Reference Quantized GRU Module + We'll store weight_qparams for all the weights in _flat_weights, we need to pass in + a `weight_qparams_dict` that maps from weight name, e.g. weight_ih_l0, + to the weight_qparams for that weight + """ + def __init__(self, *args, **kwargs): + if 'proj_size' in kwargs: + raise ValueError("proj_size argument is only supported for LSTM, not RNN or GRU") + super().__init__('GRU', *args, **kwargs) + + def get_quantized_weight_bias_dict(self): + """ dictionary from flat_weight_name to quantized weight or (unquantized) bias + e.g. + { + "weight_ih_l0": quantized_weight, + "bias_ih_l0": unquantized_bias, + ... + } + """ + quantized_weight_bias_dict = {} + for wn in self._flat_weights_names: + if hasattr(self, wn): + if wn.startswith("weight"): + weight_or_bias = get_quantized_weight(self, wn) + else: + weight_or_bias = getattr(self, wn) + else: + weight_or_bias = None + quantized_weight_bias_dict[wn] = weight_or_bias + return quantized_weight_bias_dict + + def get_flat_weights(self): + flat_weights = [] + for wn in self._flat_weights_names: + if hasattr(self, wn): + weight = getattr(self, wn) + if wn.startswith("weight"): + params = _get_weight_and_quantization_params(self, wn) + weight = _quantize_and_dequantize_weight(*params) + else: + weight = None + flat_weights.append(weight) + return flat_weights + + def forward(self, input, hx=None): # noqa: F811 + # Note: this is copied from the forward of GRU in https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/rnn.py + # only changed self._flat_weights to self.get_flat_weights() + # TODO: maybe we can try inheriting from that class and define get_flat_weights + # as a @property? this might interfere with TorchScript, if we remove that + # requirement in the future we should be able to do this + orig_input = input + # xxx: isinstance check needs to be in conditional for TorchScript to compile + if isinstance(orig_input, PackedSequence): + input, batch_sizes, sorted_indices, unsorted_indices = input + max_batch_size = int(batch_sizes[0]) + else: + batch_sizes = None + assert (input.dim() in (2, 3)), f"GRU: Expected input to be 2-D or 3-D but received {input.dim()}-D tensor" + is_batched = input.dim() == 3 + batch_dim = 0 if self.batch_first else 1 + if not is_batched: + input = input.unsqueeze(batch_dim) + if hx is not None: + if hx.dim() != 2: + raise RuntimeError( + f"For unbatched 2-D input, hx should also be 2-D but got {hx.dim()}-D tensor") + hx = hx.unsqueeze(1) + else: + if hx is not None and hx.dim() != 3: + raise RuntimeError( + f"For batched 3-D input, hx should also be 3-D but got {hx.dim()}-D tensor") + max_batch_size = input.size(0) if self.batch_first else input.size(1) + sorted_indices = None + unsorted_indices = None + + if hx is None: + num_directions = 2 if self.bidirectional else 1 + hx = torch.zeros(self.num_layers * num_directions, + max_batch_size, self.hidden_size, + dtype=input.dtype, device=input.device) + else: + # Each batch of the hidden state should match the input sequence that + # the user believes he/she is passing in. + hx = self.permute_hidden(hx, sorted_indices) + + self.check_forward_args(input, hx, batch_sizes) + if batch_sizes is None: + result = _VF.gru(input, hx, self.get_flat_weights(), self.bias, self.num_layers, + self.dropout, self.training, self.bidirectional, self.batch_first) + else: + result = _VF.gru(input, batch_sizes, hx, self.get_flat_weights(), self.bias, + self.num_layers, self.dropout, self.training, self.bidirectional) + output = result[0] + hidden = result[1] + + # xxx: isinstance check needs to be in conditional for TorchScript to compile + if isinstance(orig_input, PackedSequence): + output_packed = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices) + return output_packed, self.permute_hidden(hidden, unsorted_indices) + else: + if not is_batched: # type: ignore[possibly-undefined] + output = output.squeeze(batch_dim) # type: ignore[possibly-undefined] + hidden = hidden.squeeze(1) + + return output, self.permute_hidden(hidden, unsorted_indices) + + def _get_name(self): + return "QuantizedGRU(Reference)" + + @classmethod + def from_float(cls, mod, weight_qparams_dict): + ref_mod = cls( + mod.input_size, + mod.hidden_size, + mod.num_layers, + mod.bias, + mod.batch_first, + mod.dropout, + mod.bidirectional, + weight_qparams_dict=weight_qparams_dict) + for wn in mod._flat_weights_names: + setattr(ref_mod, wn, getattr(mod, wn)) + return ref_mod diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/sparse.py b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/sparse.py new file mode 100644 index 0000000000000000000000000000000000000000..8db3f14b08ce4a95dd0f32aa58b3fe54859fefcf --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/nn/quantized/reference/modules/sparse.py @@ -0,0 +1,95 @@ +# mypy: allow-untyped-defs +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor +from .utils import ReferenceQuantizedModule +from typing import Optional, Dict, Any + +__all__ = ['Embedding', 'EmbeddingBag'] + +class Embedding(nn.Embedding, ReferenceQuantizedModule): + """ A reference quantized Embedding module that fits into the + FX Graph Mode Quantization workflow, activation will be floating point Tensor, + we will store floating point weight as well in the module, but in forward we'll + quantize and dequantize the weight before running the floating point functional + embedding operator. + """ + def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None, + max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False, + sparse: bool = False, _weight: Optional[Tensor] = None, + device=None, dtype=None, + weight_qparams: Optional[Dict[str, Any]] = None) -> None: + super().__init__(num_embeddings, embedding_dim, padding_idx, max_norm, + norm_type, scale_grad_by_freq, sparse, _weight, device, dtype) + self._init_weight_qparams(weight_qparams, device) + + def _get_name(self): + return "QuantizedEmbedding(Reference)" + + def forward(self, input: Tensor) -> Tensor: + weight_quant_dequant = self.get_weight() + return F.embedding( + input, weight_quant_dequant, self.padding_idx, self.max_norm, + self.norm_type, self.scale_grad_by_freq, self.sparse) + + @classmethod + def from_float(cls, mod, weight_qparams): + return cls( + mod.num_embeddings, + mod.embedding_dim, + mod.padding_idx, + mod.max_norm, + mod.norm_type, + mod.scale_grad_by_freq, + mod.sparse, + mod.weight, + mod.weight.device, + mod.weight.dtype, + weight_qparams) + +class EmbeddingBag(nn.EmbeddingBag, ReferenceQuantizedModule): + """ A reference quantized EmbeddingBag module that fits into the + FX Graph Mode Quantization workflow, activation will be floating point Tensor, + we will store floating point weight as well in the module, but in forward we'll + quantize and dequantize the weight before running the floating point functional + embedding operator. + """ + def __init__(self, num_embeddings: int, embedding_dim: int, + max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False, + mode: str = 'mean', sparse: bool = False, _weight: Optional[Tensor] = None, + include_last_offset: bool = False, padding_idx: Optional[int] = None, + device=None, dtype=None, + weight_qparams: Optional[Dict[str, Any]] = None) -> None: + super().__init__(num_embeddings, embedding_dim, max_norm, norm_type, + scale_grad_by_freq, mode, sparse, _weight, include_last_offset, + padding_idx, device, dtype) + self._init_weight_qparams(weight_qparams, device) + + def _get_name(self): + return "QuantizedEmbedding(Reference)" + + def forward(self, input: Tensor, offsets: Optional[Tensor] = None, per_sample_weights: Optional[Tensor] = None) -> Tensor: + weight_quant_dequant = self.get_weight() + return F.embedding_bag(input, weight_quant_dequant, offsets, + self.max_norm, self.norm_type, + self.scale_grad_by_freq, self.mode, self.sparse, + per_sample_weights, self.include_last_offset, + self.padding_idx) + + @classmethod + def from_float(cls, mod, weight_qparams, use_precomputed_fake_quant=False): + return cls( + mod.num_embeddings, + mod.embedding_dim, + mod.max_norm, + mod.norm_type, + mod.scale_grad_by_freq, + mod.mode, + mod.sparse, + mod.weight, + mod.include_last_offset, + mod.padding_idx, + mod.weight.device, + mod.weight.dtype, + weight_qparams + ) diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/__init__.py b/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0fda5a58f2984ee05b0d167297b458f62c37fc59 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/__init__.py @@ -0,0 +1 @@ +from . import quantized diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63553d865e607bdadba2d698456427ca04166974 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__init__.py b/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..86596ba18cf1f08e979a9e4c0ae0485627c44845 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__init__.py @@ -0,0 +1,10 @@ +from torch.ao.nn.sparse.quantized import dynamic + +from .linear import Linear +from .linear import LinearPackedParams + +__all__ = [ + "dynamic", + "Linear", + "LinearPackedParams", +] diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7709a0848d0950f63256ac63cec948afc4fd415 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__pycache__/linear.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__pycache__/linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..139fc76ca637bdded85b4687f8473e972bf3f5df Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__pycache__/linear.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__pycache__/utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bd15b71c6c12eaef9dee938d95bbba57ef99dff Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/__pycache__/utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/dynamic/__init__.py b/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/dynamic/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..83a394f4df276171e7e5b2a1eb0cee843f9d4e99 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/dynamic/__init__.py @@ -0,0 +1,5 @@ +from .linear import Linear + +__all__ = [ + "Linear", +] diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/dynamic/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/dynamic/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1dc47c9500fd609d00f4b013cf37bc4ead093515 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/dynamic/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/dynamic/__pycache__/linear.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/dynamic/__pycache__/linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0baa265283640b80e69eba6d5fed30b3ec9f45de Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/dynamic/__pycache__/linear.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/linear.py b/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/linear.py new file mode 100644 index 0000000000000000000000000000000000000000..26388e2e2c7b5a48e9bec213de7b974dc07fd55d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/linear.py @@ -0,0 +1,197 @@ +# mypy: allow-untyped-defs +from typing import Optional + +import torch +from torch.ao.nn.quantized.modules.utils import _quantize_weight, _hide_packed_params_repr + +__all__ = ['LinearPackedParams', 'Linear'] + +# TODO (zaf): Inherit from `quantized.LinearPackedParams` (T83294430) +class LinearPackedParams(torch.nn.Module): + _version = 1 + + def __init__(self, row_block_size=1, col_block_size=4, dtype=torch.qint8): + super().__init__() + + if dtype != torch.qint8: + raise NotImplementedError("Linear prepacking only supports QINT8") + self.dtype = dtype + wq = torch._empty_affine_quantized([1, 1], scale=1.0, zero_point=0, dtype=torch.qint8) + self.set_weight_bias(wq, None, row_block_size, col_block_size) + + def _get_name(self): + return "SparseQuantizedLinearPackedParams" + + @torch.jit.export + def set_weight_bias(self, weight: torch.Tensor, bias: Optional[torch.Tensor], + row_block_size: Optional[int], col_block_size: Optional[int]) -> None: + assert row_block_size is not None and col_block_size is not None + self._packed_params = torch.ops.sparse.qlinear_prepack(weight, bias, row_block_size, col_block_size) + + @torch.jit.export + def _weight_bias(self): + (weight, bias, block_sizes) = torch.ops.sparse.qlinear_unpack(self._packed_params) + return (weight, bias, block_sizes[0], block_sizes[1]) + + def forward(self, x): + return x + + def _save_to_state_dict(self, destination, prefix, keep_vars): + super()._save_to_state_dict(destination, prefix, keep_vars) + destination[prefix + 'dtype'] = self.dtype + destination[prefix + '_packed_params'] = self._weight_bias() + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + version = local_metadata.get('version', None) + assert version <= self._version + + self.dtype = state_dict.pop(prefix + 'dtype') + weight, bias, row_block_size, col_block_size = state_dict.pop(prefix + '_packed_params') + self.set_weight_bias(weight, bias, row_block_size, col_block_size) + + super()._load_from_state_dict(state_dict, prefix, local_metadata, False, + missing_keys, unexpected_keys, error_msgs) + + @torch.jit.export + def __getstate__(self): + return self._packed_params, self.training, self.dtype + + @torch.jit.export + def __setstate__(self, state): + (self._packed_params, self.training, self.dtype) = state + + def __repr__(self): + return self._weight_bias().__repr__() + +# TODO (zaf): Inherit from `quantized.Linear` (T83294430) +class Linear(torch.nn.Module): + r""" + A quantized sparse linear module with quantized tensor as inputs and outputs. + """ + _version = 1 + _FLOAT_MODULE = torch.nn.Linear + + def __init__(self, in_features, out_features, row_block_size, col_block_size, bias=True, dtype=torch.qint8): + super().__init__() + + if dtype != torch.qint8: + raise NotImplementedError("Only QINT8 is supported for Sparse Quantized Linear") + + self.in_features = in_features + self.out_features = out_features + + if bias: + bias = torch.zeros(self.out_features, dtype=torch.float) + else: + bias = None + + qweight = torch._empty_affine_quantized([out_features, in_features], + scale=1, zero_point=0, dtype=torch.qint8) + self._packed_params = LinearPackedParams(row_block_size=row_block_size, + col_block_size=col_block_size, + dtype=dtype) + self._packed_params.set_weight_bias(qweight, bias, row_block_size, col_block_size) + self.scale = 1.0 + self.zero_point = 0 + + @classmethod + def _get_name(cls): + return 'SparseQuantizedLinear' + + def extra_repr(self): + return (f'in_features={self.in_features}, out_features={self.out_features}, scale={self.scale}, ' + f'zero_point={self.zero_point}, qscheme={self.weight().qscheme()}') + + def __repr__(self): + return _hide_packed_params_repr(self, LinearPackedParams) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return torch.ops.sparse.qlinear(x, self._packed_params._packed_params, self.scale, self.zero_point) + + def _save_to_state_dict(self, destination, prefix, keep_vars): + super()._save_to_state_dict(destination, prefix, keep_vars) + destination[prefix + 'scale'] = torch.tensor(self.scale) + destination[prefix + 'zero_point'] = torch.tensor(self.zero_point) + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + self.scale = float(state_dict[prefix + 'scale']) + state_dict.pop(prefix + 'scale') + + self.zero_point = int(state_dict[prefix + 'zero_point']) + state_dict.pop(prefix + 'zero_point') + + op_type = int(state_dict[prefix + 'op_type']) + state_dict.pop(prefix + 'op_type') + + version = local_metadata.get('version', None) + assert version <= self._version + + super()._load_from_state_dict( + state_dict, prefix, local_metadata, False, + missing_keys, unexpected_keys, error_msgs) + + def _weight_bias(self): + return self._packed_params._weight_bias() + + def weight(self): + return self._weight_bias()[0] + + def bias(self): + return self._weight_bias()[1] + + def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor], + row_block_size: Optional[int], col_block_size: Optional[int]) -> None: + assert row_block_size is not None and col_block_size is not None + self._packed_params.set_weight_bias(w, b, row_block_size, col_block_size) + + @classmethod + def from_float(cls, mod, use_precomputed_fake_quant=False): + r"""Create a quantized sparse module from a float module. + + We only care about the convert at this stage, no need for observers just yet. + + TODO(zaf): Need to add the sparse params to the qconfig + """ + assert type(mod) == cls._FLOAT_MODULE, cls._get_name() + \ + '.from_float only works for ' + cls._FLOAT_MODULE.__name__ + assert hasattr(mod, 'sparse_params'), \ + ('Expecting the Linear to have `sparse_params`. Make sure you have provided arguments ' + 'in the `sparsifier.squash_mask(params_to_save=("sparse_block_shape",))` method.') + sparse_block_shape = mod.sparse_params.get('sparse_block_shape', None) # type: ignore[operator, union-attr] + assert isinstance(sparse_block_shape, (tuple, list)) + assert len(sparse_block_shape) == 2 + # TODO: Need to add options to qconfig to avoid the calibration. + # TODO: Add calibration for the sparsity + assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined' + activation_post_process = mod.activation_post_process + weight_post_process = mod.qconfig.weight() # type: ignore[operator, union-attr] + + # Assumption is that the weight is already sparsified by the + # `sparsifier.convert` + weight = mod.weight + + weight_post_process(weight) + dtype = weight_post_process.dtype + act_scale, act_zp = activation_post_process.calculate_qparams() # type: ignore[operator, union-attr] + assert dtype == torch.qint8, 'Weight observer must have dtype torch.qint8' + w_sc, w_zp = weight_post_process.calculate_qparams() + if isinstance(w_zp, torch.Tensor): + assert not torch.any(w_zp.bool()), "All weight zero points must map to 0" + else: + assert w_zp == 0, 'Weight zero point must map to 0' + qweight = _quantize_weight(weight.float(), weight_post_process) + + row_block_size = mod.sparse_params['sparse_block_shape'][0] # type: ignore[index] + col_block_size = mod.sparse_params['sparse_block_shape'][1] # type: ignore[index] + qlinear = cls(mod.in_features, + mod.out_features, + row_block_size, + col_block_size, + dtype=dtype) + qlinear.set_weight_bias(qweight, mod.bias, + row_block_size, col_block_size) # type: ignore[arg-type] + qlinear.scale = float(act_scale) + qlinear.zero_point = int(act_zp) + return qlinear diff --git a/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/utils.py b/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..46b1cb1e5b717389769bbd4e8f1b84c611aa890e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/nn/sparse/quantized/utils.py @@ -0,0 +1,43 @@ +# mypy: allow-untyped-defs +import threading + +__all__ = [ + "LinearBlockSparsePattern" +] + +def _is_valid_linear_block_sparse_pattern(row_block_size, col_block_size): + return (row_block_size == 1 and col_block_size == 4) or \ + (row_block_size == 8 and col_block_size == 1) + +# This is a stop-gap measure as current flow does not allow module +# specific block sparse pattern. +# Infact there is no way to convey sparse pattern via module config +# of quantization flow. Thus using the global context to convey +# sparsity pattern. +# Once the flow supports it, this should be removed. +class LinearBlockSparsePattern: + rlock = threading.RLock() + row_block_size = 1 + col_block_size = 4 + prev_row_block_size = 1 + prev_col_block_size = 4 + + def __init__(self, row_block_size=1, col_block_size=4): + assert _is_valid_linear_block_sparse_pattern(row_block_size, col_block_size) + LinearBlockSparsePattern.rlock.acquire() + LinearBlockSparsePattern.prev_row_block_size = LinearBlockSparsePattern.row_block_size + LinearBlockSparsePattern.prev_col_block_size = LinearBlockSparsePattern.col_block_size + LinearBlockSparsePattern.row_block_size = row_block_size + LinearBlockSparsePattern.col_block_size = col_block_size + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_value, backtrace): + LinearBlockSparsePattern.row_block_size = LinearBlockSparsePattern.prev_row_block_size + LinearBlockSparsePattern.col_block_size = LinearBlockSparsePattern.prev_col_block_size + LinearBlockSparsePattern.rlock.release() + + @staticmethod + def block_size(): + return LinearBlockSparsePattern.row_block_size, LinearBlockSparsePattern.col_block_size diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/compat/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/compat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/compat/__pycache__/v2_compat.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/compat/__pycache__/v2_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd588a87d55c04f8d3d089bd72ac759d44491925 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/compat/__pycache__/v2_compat.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/compat/compat.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/compat/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..c2deafd94336d08ccb150bd2e96e92dd26baa6fd --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/compat/compat.py @@ -0,0 +1,168 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for API compatibility between TensorFlow release versions. + +See [Version +Compatibility](https://tensorflow.org/guide/version_compat#backward_forward) +""" + +import datetime +import os + +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.util import tf_contextlib +from tensorflow.python.util.tf_export import tf_export + + +# This value changes every day with an automatic CL. It can be modified in code +# via `forward_compatibility_horizon()` or with the environment variable +# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date. +_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2024, 2, 8) +_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS" +_FORWARD_COMPATIBILITY_DATE_NUMBER = None + + +def _date_to_date_number(year, month, day): + return (year << 9) | (month << 5) | day + + +def _update_forward_compatibility_date_number(date_to_override=None): + """Update the base date to compare in forward_compatible function.""" + + global _FORWARD_COMPATIBILITY_DATE_NUMBER + + if date_to_override: + date = date_to_override + else: + date = _FORWARD_COMPATIBILITY_HORIZON + delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME) + if delta_days: + date += datetime.timedelta(days=int(delta_days)) + + if date < _FORWARD_COMPATIBILITY_HORIZON: + logging.warning("Trying to set the forward compatibility date to the past" + " date %s. This will be ignored by TensorFlow." % (date)) + return + _FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number( + date.year, date.month, date.day) + + +_update_forward_compatibility_date_number() + + +@tf_export("compat.forward_compatible") +def forward_compatible(year, month, day): + """Return true if the forward compatibility window has expired. + + See [Version + compatibility](https://www.tensorflow.org/guide/versions#backward_and_partial_forward_compatibility). + + Forward-compatibility refers to scenarios where the producer of a TensorFlow + model (a GraphDef or SavedModel) is compiled against a version of the + TensorFlow library newer than what the consumer was compiled against. The + "producer" is typically a Python program that constructs and trains a model + while the "consumer" is typically another program that loads and serves the + model. + + TensorFlow has been supporting a 3 week forward-compatibility window for + programs compiled from source at HEAD. + + For example, consider the case where a new operation `MyNewAwesomeAdd` is + created with the intent of replacing the implementation of an existing Python + wrapper - `tf.add`. The Python wrapper implementation should change from + something like: + + ```python + def add(inputs, name=None): + return gen_math_ops.add(inputs, name) + ``` + + to: + + ```python + from tensorflow.python.compat import compat + + def add(inputs, name=None): + if compat.forward_compatible(year, month, day): + # Can use the awesome new implementation. + return gen_math_ops.my_new_awesome_add(inputs, name) + # To maintain forward compatibility, use the old implementation. + return gen_math_ops.add(inputs, name) + ``` + + Where `year`, `month`, and `day` specify the date beyond which binaries + that consume a model are expected to have been updated to include the + new operations. This date is typically at least 3 weeks beyond the date + the code that adds the new operation is committed. + + Args: + year: A year (e.g., 2018). Must be an `int`. + month: A month (1 <= month <= 12) in year. Must be an `int`. + day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an + `int`. + + Returns: + True if the caller can expect that serialized TensorFlow graphs produced + can be consumed by programs that are compiled with the TensorFlow library + source code after (year, month, day). + """ + return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number( + year, month, day) + + +@tf_export("compat.forward_compatibility_horizon") +@tf_contextlib.contextmanager +def forward_compatibility_horizon(year, month, day): + """Context manager for testing forward compatibility of generated graphs. + + See [Version + compatibility](https://www.tensorflow.org/guide/versions#backward_and_partial_forward_compatibility). + + To ensure forward compatibility of generated graphs (see `forward_compatible`) + with older binaries, new features can be gated with: + + ```python + if compat.forward_compatible(year=2018, month=08, day=01): + generate_graph_with_new_features() + else: + generate_graph_so_older_binaries_can_consume_it() + ``` + + However, when adding new features, one may want to unittest it before + the forward compatibility window expires. This context manager enables + such tests. For example: + + ```python + from tensorflow.python.compat import compat + + def testMyNewFeature(self): + with compat.forward_compatibility_horizon(2018, 08, 02): + # Test that generate_graph_with_new_features() has an effect + ``` + + Args: + year: A year (e.g., 2018). Must be an `int`. + month: A month (1 <= month <= 12) in year. Must be an `int`. + day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an + `int`. + + Yields: + Nothing. + """ + try: + _update_forward_compatibility_date_number(datetime.date(year, month, day)) + yield + finally: + _update_forward_compatibility_date_number() diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/compat/v2_compat.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/compat/v2_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..5820e477eb2e5fc504f49aea4743da4a20003332 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/compat/v2_compat.py @@ -0,0 +1,105 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Switching v2 features on and off.""" + +from tensorflow.python import tf2 +from tensorflow.python.eager import monitoring +from tensorflow.python.framework import ops +from tensorflow.python.framework import registry +from tensorflow.python.framework import tensor +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import control_flow_v2_toggles +from tensorflow.python.ops import resource_variables_toggle +from tensorflow.python.util.tf_export import tf_export + +# Metrics to track the status of v2_behavior +_v2_behavior_usage_gauge = monitoring.BoolGauge( + "/tensorflow/version/v2_behavior", + "whether v2_behavior is enabled or disabled", "status") + +_DATA_V2_CALLBACKS = registry.Registry("data_v2_callbacks") + + +def register_data_v2_callback(data_v2_func): + _DATA_V2_CALLBACKS.register(data_v2_func, data_v2_func.__module__) + + +@tf_export(v1=["enable_v2_behavior"]) +def enable_v2_behavior(): + """Enables TensorFlow 2.x behaviors. + + This function can be called at the beginning of the program (before `Tensors`, + `Graphs` or other structures have been created, and before devices have been + initialized. It switches all global behaviors that are different between + TensorFlow 1.x and 2.x to behave as intended for 2.x. + + This function is called in the main TensorFlow `__init__.py` file, user should + not need to call it, except during complex migrations. + + @compatibility(TF2) + This function is not necessary if you are using TF2. V2 behavior is enabled by + default. + @end_compatibility + """ + _v2_behavior_usage_gauge.get_cell("enable").set(True) + # TF2 behavior is enabled if either 1) enable_v2_behavior() is called or + # 2) the TF2_BEHAVIOR=1 environment variable is set. In the latter case, + # the modules below independently check if tf2.enabled(). + tf2.enable() + ops.enable_eager_execution() + tensor_shape.enable_v2_tensorshape() # Also switched by tf2 + resource_variables_toggle.enable_resource_variables() + tensor.enable_tensor_equality() + # Enables TensorArrayV2 and control flow V2. + control_flow_v2_toggles.enable_control_flow_v2() + # Make sure internal uses of tf.data symbols map to V2 versions. + for v2_enabler_name in _DATA_V2_CALLBACKS.list(): + v2_enabler = _DATA_V2_CALLBACKS.lookup(v2_enabler_name) + v2_enabler() + + +@tf_export(v1=["disable_v2_behavior"]) +def disable_v2_behavior(): + """Disables TensorFlow 2.x behaviors. + + This function can be called at the beginning of the program (before `Tensors`, + `Graphs` or other structures have been created, and before devices have been + initialized. It switches all global behaviors that are different between + TensorFlow 1.x and 2.x to behave as intended for 1.x. + + User can call this function to disable 2.x behavior during complex migrations. + + @compatibility(TF2) + Using this function indicates that your software is not compatible + with eager execution and `tf.function` in TF2. + + To migrate to TF2, rewrite your code to be compatible with eager execution. + Please refer to the [migration guide] + (https://www.tensorflow.org/guide/migrate) for additional resource on the + topic. + @end_compatibility + """ + _v2_behavior_usage_gauge.get_cell("disable").set(True) + tf2.disable() + ops.disable_eager_execution() + tensor_shape.disable_v2_tensorshape() # Also switched by tf2 + resource_variables_toggle.disable_resource_variables() + tensor.disable_tensor_equality() + # Disables TensorArrayV2 and control flow V2. + control_flow_v2_toggles.disable_control_flow_v2() + # Make sure internal uses of tf.data symbols map to V1 versions. + for v2_disabler_name in _DATA_V2_CALLBACKS.list(): + v2_disabler = _DATA_V2_CALLBACKS.lookup(v2_disabler_name) + v2_disabler() diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_training_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_training_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ffcbc329e2a7b4e52da92f1b988f8a7e930b79c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_training_ops.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7028a84debd5dae284a233ac6e90f34bfee59d3d3e44c99e5c815dddba3570a6 +size 165566 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/nn_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/nn_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32fcfdd5362cad7a178b397ac0b9bff9f3c1722d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/nn_ops.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5cd8748a635588f80c81fcbe6aadfe2f17408fa9645fb41bcaccceb1323dea7 +size 226477 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/sparse_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/sparse_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9dec7d7cb488a9db28168a66ac1edd4632c76cb --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/sparse_ops.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8c7702ccaa9786051acd8fbbfd340f4943a1149a6354ea94db0ccf4a08bcc67 +size 121696 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12af1effdbf2f9360d76f5f593a56ee12a759644 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9b3b97d58f7cfcf6bef8a630ca69d4d5990742cc634d89c07ae0817a1a7d09b +size 101175 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/builder_impl.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/builder_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..824ae24859ca9787e0f4c19a0c308e40eb4c5e22 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/builder_impl.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/fingerprinting.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/fingerprinting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2bb796811df1522ab3f140b66aed06bc609eef6 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/fingerprinting.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/function_serialization.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/function_serialization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a192e9fd3820a88767293c3b84a055c8c39eb81d Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/function_serialization.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/load_options.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/load_options.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30ccfee87b0e695de15568687790a0c14b98a9bb Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/load_options.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/loader.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/loader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..240f077a30b04e50a05191ca62fec495c794a0f7 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/loader.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/method_name_updater.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/method_name_updater.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1924e7f98ccf8274d8d7db098f5adb5912aaf31e Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/method_name_updater.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/nested_structure_coder.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/nested_structure_coder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5092043a65a745b1486dc64ad2bb820c18455561 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/nested_structure_coder.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/path_helpers.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/path_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5490e2d30ddc0956a46ef637dc0790a8c96a0c7a Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/path_helpers.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/signature_constants.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/signature_constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73262aa7e3b059b190571a1fd7d0624d28b5c0e9 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/signature_constants.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/utils_impl.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/utils_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..369e0dfa6eee30082bd29d18d495c697211c6ba5 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/utils_impl.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/pywrap_saved_model/__init__.pyi b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/pywrap_saved_model/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..94e7a3f354791972bf246b28ce87d7b7d526c62d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/pywrap_saved_model/__init__.pyi @@ -0,0 +1,16 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +def Save(arg0: str) -> None: ... diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/pywrap_saved_model/constants.pyi b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/pywrap_saved_model/constants.pyi new file mode 100644 index 0000000000000000000000000000000000000000..461a232097108a6bacb0b188415748c7df226b69 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/pywrap_saved_model/constants.pyi @@ -0,0 +1,33 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +ASSETS_DIRECTORY: str +ASSETS_KEY: str +DEBUG_DIRECTORY: str +DEBUG_INFO_FILENAME_PB: str +EXTRA_ASSETS_DIRECTORY: str +FINGERPRINT_FILENAME: str +INIT_OP_SIGNATURE_KEY: str +LEGACY_INIT_OP_KEY: str +MAIN_OP_KEY: str +SAVED_MODEL_FILENAME_CPB: str +SAVED_MODEL_FILENAME_PB: str +SAVED_MODEL_FILENAME_PBTXT: str +SAVED_MODEL_FILENAME_PREFIX: str +SAVED_MODEL_SCHEMA_VERSION: int +TRAIN_OP_KEY: str +TRAIN_OP_SIGNATURE_KEY: str +VARIABLES_DIRECTORY: str +VARIABLES_FILENAME: str diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/pywrap_saved_model/fingerprinting.pyi b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/pywrap_saved_model/fingerprinting.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f1cbab2a737bd257914f50cef5d9df44f103a9d2 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/pywrap_saved_model/fingerprinting.pyi @@ -0,0 +1,24 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +class FileNotFoundException(Exception): ... + +class FingerprintException(Exception): ... + +def CreateFingerprintDef(export_dir: str) -> bytes: ... +def ReadSavedModelFingerprint(export_dir: str) -> bytes: ... +def Singleprint(graph_def_program_hash: int, signature_def_hash: int, saved_object_graph_hash: int, checkpoint_hash: int) -> str: ... +def SingleprintFromFP(export_dir: str) -> str: ... +def SingleprintFromSM(export_dir: str) -> str: ... diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/pywrap_saved_model/merger.pyi b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/pywrap_saved_model/merger.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4023ce61ee5cb1257e11a3412653dc90de7ccbab --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/pywrap_saved_model/merger.pyi @@ -0,0 +1,20 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from typing import Any + +class MergerException(Exception): ... + +def MergerRead(*args, **kwargs) -> Any: ... diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/pywrap_saved_model/metrics.pyi b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/pywrap_saved_model/metrics.pyi new file mode 100644 index 0000000000000000000000000000000000000000..6228fca0cebb9765b07cea3736b8eec77404d0b6 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/pywrap_saved_model/metrics.pyi @@ -0,0 +1,62 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from typing import Any + +kFingerprintError: str +kFingerprintFound: str +kFingerprintNotFound: str + +class MetricException(Exception): ... + +def AddAsyncCheckpointWriteDuration(*args, **kwargs) -> Any: ... +def AddCheckpointReadDuration(*args, **kwargs) -> Any: ... +def AddCheckpointWriteDuration(*args, **kwargs) -> Any: ... +def AddNumCheckpointShardsWritten(*args, **kwargs) -> Any: ... +def AddShardingCallbackDuration(*args, **kwargs) -> Any: ... +def AddTrainingTimeSaved(*args, **kwargs) -> Any: ... +def CalculateFileSize(arg0: str) -> int: ... +def GetAsyncCheckpointWriteDurations(*args, **kwargs) -> Any: ... +def GetCheckpointReadDurations(*args, **kwargs) -> Any: ... +def GetCheckpointSize(*args, **kwargs) -> Any: ... +def GetCheckpointWriteDurations(*args, **kwargs) -> Any: ... +def GetFoundFingerprintOnLoad() -> str: ... +def GetNumCheckpointShardsWritten() -> int: ... +def GetRead(*args, **kwargs) -> Any: ... +def GetReadApi(arg0: str) -> int: ... +def GetReadFingerprint() -> str: ... +def GetReadPath() -> str: ... +def GetReadPathAndSingleprint() -> tuple[str,str]: ... +def GetShardingCallbackDescription() -> str: ... +def GetShardingCallbackDuration() -> int: ... +def GetTrainingTimeSaved(*args, **kwargs) -> Any: ... +def GetWrite(*args, **kwargs) -> Any: ... +def GetWriteApi(arg0: str) -> int: ... +def GetWriteFingerprint() -> str: ... +def GetWritePath() -> str: ... +def GetWritePathAndSingleprint() -> tuple[str,str]: ... +def IncrementRead(*args, **kwargs) -> Any: ... +def IncrementReadApi(arg0: str) -> None: ... +def IncrementWrite(*args, **kwargs) -> Any: ... +def IncrementWriteApi(arg0: str) -> None: ... +def RecordCheckpointSize(*args, **kwargs) -> Any: ... +def SetFoundFingerprintOnLoad(*args, **kwargs) -> Any: ... +def SetReadFingerprint(*args, **kwargs) -> Any: ... +def SetReadPath(*args, **kwargs) -> Any: ... +def SetReadPathAndSingleprint(*args, **kwargs) -> Any: ... +def SetShardingCallbackDescription(*args, **kwargs) -> Any: ... +def SetWriteFingerprint(*args, **kwargs) -> Any: ... +def SetWritePath(*args, **kwargs) -> Any: ... +def SetWritePathAndSingleprint(*args, **kwargs) -> Any: ... diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/registration/__pycache__/registration.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/registration/__pycache__/registration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56eaa11651ceb76378f2889206326d79b8e35061 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/registration/__pycache__/registration.cpython-310.pyc differ