diff --git a/.gitattributes b/.gitattributes index a8cdb09be3de44317b994da4e5837ed4e8670534..aecea43b9d2d54a62563ce47892b9aff4293582a 100644 --- a/.gitattributes +++ b/.gitattributes @@ -832,3 +832,7 @@ videochat2/lib/python3.10/site-packages/tensorflow/python/framework/_proto_compa videochat2/lib/python3.10/site-packages/tensorflow/python/framework/_pywrap_python_op_gen.so filter=lfs diff=lfs merge=lfs -text videochat2/lib/python3.10/site-packages/tensorflow/python/framework/_op_def_library_pybind.so filter=lfs diff=lfs merge=lfs -text videochat2/lib/python3.10/site-packages/tensorflow/python/framework/__pycache__/ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/torch/lib/libc10_cuda.so filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/torch/_inductor/__pycache__/ir.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/llava_next/share/terminfo/a/aaa-30-s b/llava_next/share/terminfo/a/aaa-30-s new file mode 100644 index 0000000000000000000000000000000000000000..0a11df5c28513f47df258b9904ff61d28f96ad2c Binary files /dev/null and b/llava_next/share/terminfo/a/aaa-30-s differ diff --git a/llava_next/share/terminfo/a/aaa-rv b/llava_next/share/terminfo/a/aaa-rv new file mode 100644 index 0000000000000000000000000000000000000000..505f50f9c6ecc281233df14b4034b75901004acb Binary files /dev/null and b/llava_next/share/terminfo/a/aaa-rv differ diff --git a/llava_next/share/terminfo/a/aixterm b/llava_next/share/terminfo/a/aixterm new file mode 100644 index 0000000000000000000000000000000000000000..1211a1d789182764ceaaa33284f6b4de637dcc5d Binary files /dev/null and b/llava_next/share/terminfo/a/aixterm differ diff --git a/llava_next/share/terminfo/a/altos3 b/llava_next/share/terminfo/a/altos3 new file mode 100644 index 0000000000000000000000000000000000000000..1589774e513a871dcc45cdd2020b523a2f37a7c3 Binary files /dev/null and b/llava_next/share/terminfo/a/altos3 differ diff --git a/llava_next/share/terminfo/a/ampex175-b b/llava_next/share/terminfo/a/ampex175-b new file mode 100644 index 0000000000000000000000000000000000000000..d9488187166a52ed7718c4154bfa39a695af6831 Binary files /dev/null and b/llava_next/share/terminfo/a/ampex175-b differ diff --git a/llava_next/share/terminfo/a/ampex232 b/llava_next/share/terminfo/a/ampex232 new file mode 100644 index 0000000000000000000000000000000000000000..698c33d51674d2d50a8e815ffa36b08ece95646a Binary files /dev/null and b/llava_next/share/terminfo/a/ampex232 differ diff --git a/llava_next/share/terminfo/a/ampex80 b/llava_next/share/terminfo/a/ampex80 new file mode 100644 index 0000000000000000000000000000000000000000..1265d5eddbf8c0390c3e954a7e2d7f4d57d86da8 Binary files /dev/null and b/llava_next/share/terminfo/a/ampex80 differ diff --git a/llava_next/share/terminfo/a/ansi+sgrbold b/llava_next/share/terminfo/a/ansi+sgrbold new file mode 100644 index 0000000000000000000000000000000000000000..0a10f6a658b07e79965af3a8c75e4635b97482fb Binary files /dev/null and b/llava_next/share/terminfo/a/ansi+sgrbold differ diff --git a/llava_next/share/terminfo/a/ansi-generic b/llava_next/share/terminfo/a/ansi-generic new file mode 100644 index 0000000000000000000000000000000000000000..1b72658c62e9f6fcfd200c26d6f57e65ed5716b0 Binary files /dev/null and b/llava_next/share/terminfo/a/ansi-generic differ diff --git a/llava_next/share/terminfo/a/ansi-nt b/llava_next/share/terminfo/a/ansi-nt new file mode 100644 index 0000000000000000000000000000000000000000..b6bc8b8f576d8726fc0ec20ecccc67988fe960a6 Binary files /dev/null and b/llava_next/share/terminfo/a/ansi-nt differ diff --git a/llava_next/share/terminfo/a/ansi.sysk b/llava_next/share/terminfo/a/ansi.sysk new file mode 100644 index 0000000000000000000000000000000000000000..55c1aa7b9d3e1f65c644bbaf4a99f765fc991456 Binary files /dev/null and b/llava_next/share/terminfo/a/ansi.sysk differ diff --git a/llava_next/share/terminfo/a/ansi80x50 b/llava_next/share/terminfo/a/ansi80x50 new file mode 100644 index 0000000000000000000000000000000000000000..60c9bd0f233a4d972ca2c9d5fdce164f475e9423 Binary files /dev/null and b/llava_next/share/terminfo/a/ansi80x50 differ diff --git a/llava_next/share/terminfo/a/apple-ae b/llava_next/share/terminfo/a/apple-ae new file mode 100644 index 0000000000000000000000000000000000000000..a5a2335317a995c4b4f676116f25c0c7cf2486e8 Binary files /dev/null and b/llava_next/share/terminfo/a/apple-ae differ diff --git a/llava_next/share/terminfo/a/att4415-w-rv b/llava_next/share/terminfo/a/att4415-w-rv new file mode 100644 index 0000000000000000000000000000000000000000..dde6ba9dbaa732ad429d60e78639b741e2f6b6c6 Binary files /dev/null and b/llava_next/share/terminfo/a/att4415-w-rv differ diff --git a/llava_next/share/terminfo/a/att5420 b/llava_next/share/terminfo/a/att5420 new file mode 100644 index 0000000000000000000000000000000000000000..c1ff8e5aa105f1c8aab1b4a3faf0ada6fc321883 Binary files /dev/null and b/llava_next/share/terminfo/a/att5420 differ diff --git a/llava_next/share/terminfo/a/att605-w b/llava_next/share/terminfo/a/att605-w new file mode 100644 index 0000000000000000000000000000000000000000..5e37491f78dc558c9ed8d7bfcc443278a8d16612 Binary files /dev/null and b/llava_next/share/terminfo/a/att605-w differ diff --git a/llava_next/share/terminfo/a/att610+cvis b/llava_next/share/terminfo/a/att610+cvis new file mode 100644 index 0000000000000000000000000000000000000000..5f94bfb59d8eecaaa1a08b74c49d99ab347fccd8 Binary files /dev/null and b/llava_next/share/terminfo/a/att610+cvis differ diff --git a/llava_next/share/terminfo/a/att620 b/llava_next/share/terminfo/a/att620 new file mode 100644 index 0000000000000000000000000000000000000000..9e7cdc8106fd7b68095158f25560a6b053cecc2d Binary files /dev/null and b/llava_next/share/terminfo/a/att620 differ diff --git a/llava_next/share/terminfo/a/avatar0+ b/llava_next/share/terminfo/a/avatar0+ new file mode 100644 index 0000000000000000000000000000000000000000..664cd0de5856aa896acd822e270d0dfbe765c7b8 Binary files /dev/null and b/llava_next/share/terminfo/a/avatar0+ differ diff --git a/parrot/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so b/parrot/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..7969364a317a85353298ab9503b7cb35554af04e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:365bdc10ffac948c351faa9b05e8f157310ec4a1b7dc19edb5401d4a83f5c00e +size 766040 diff --git a/parrot/lib/python3.10/site-packages/torch/_inductor/__pycache__/ir.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/_inductor/__pycache__/ir.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..744830f6d18c815407a202c9b7bf35c257a8c5cb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/_inductor/__pycache__/ir.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f03d6cd6f3f2b3ded71c109ee11b184f6a94d3414410f392ed06075119f2ad03 +size 224766 diff --git a/parrot/lib/python3.10/site-packages/torch/ao/pruning/__pycache__/_mappings.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/pruning/__pycache__/_mappings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8f0b4e5352e40898511f1dcc4921e2e1209c065 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/pruning/__pycache__/_mappings.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/activation_sparsifier/__init__.py b/parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/activation_sparsifier/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/base_data_sparsifier.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/base_data_sparsifier.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b59578742bc70672c1f3224d8a92bc66db1a5ab Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/base_data_sparsifier.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/data_norm_sparsifier.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/data_norm_sparsifier.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e14dae02f96b893cf8f24d94cc22db86bd4cdb4 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/data_norm_sparsifier.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/quantization_utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/quantization_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa3e518a16e9848e9c444a85450d67a5ac087f40 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/__pycache__/quantization_utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/__init__.py b/parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/__pycache__/data_sparsity.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/__pycache__/data_sparsity.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d9b549dabe1a8b84436a3332b98e633857cda1d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/__pycache__/data_sparsity.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/weight_norm_sparsifier.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/weight_norm_sparsifier.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9886373fd202582f1cb8b35e99a99e99b35b266c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/weight_norm_sparsifier.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/__init__.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f77969b321491180743b075db81b9509913d57b4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__init__.py @@ -0,0 +1,190 @@ +# mypy: allow-untyped-defs +# flake8: noqa: F403 + +from .fake_quantize import * # noqa: F403 +from .fuse_modules import fuse_modules # noqa: F403 +from .fuse_modules import fuse_modules_qat # noqa: F403 +from .fuser_method_mappings import * # noqa: F403 +from .observer import * # noqa: F403 +from .qconfig import * # noqa: F403 +from .qconfig_mapping import * # noqa: F403 +from .quant_type import * # noqa: F403 +from .quantization_mappings import * # type: ignore[no-redef] +from .quantize import * # noqa: F403 +from .quantize_jit import * # noqa: F403 +from .stubs import * # noqa: F403 +from .pt2e.export_utils import _move_exported_model_to_eval as move_exported_model_to_eval +from .pt2e.export_utils import _move_exported_model_to_train as move_exported_model_to_train +from .pt2e.export_utils import _allow_exported_model_train_eval as allow_exported_model_train_eval +from .pt2e.generate_numeric_debug_handle import generate_numeric_debug_handle # noqa: F401 +from typing import Union, List, Callable, Tuple, Optional +from torch import Tensor +import torch + +ObserverOrFakeQuantize = Union[ObserverBase, FakeQuantizeBase] +ObserverOrFakeQuantize.__module__ = "torch.ao.quantization" + +__all__ = [ + "DeQuantStub", + "FakeQuantize", + "FakeQuantizeBase", + "FixedQParamsFakeQuantize", + "FixedQParamsObserver", + "FusedMovingAvgObsFakeQuantize", + "HistogramObserver", + "MatchAllNode", + "MinMaxObserver", + "MovingAverageMinMaxObserver", + "MovingAveragePerChannelMinMaxObserver", + "NoopObserver", + "ObserverBase", + "ObserverOrFakeQuantize", + "Pattern", + "PerChannelMinMaxObserver", + "PlaceholderObserver", + "QConfig", + "QConfigAny", + "QConfigDynamic", + "QConfigMapping", + "QuantStub", + "QuantType", + "QuantWrapper", + "RecordingObserver", + "ReuseInputObserver", + "UniformQuantizationObserverBase", + "add_quant_dequant", + "convert", + "convert_dynamic_jit", + "convert_jit", + "default_affine_fixed_qparams_fake_quant", + "default_affine_fixed_qparams_observer", + "default_debug_observer", + "default_dynamic_fake_quant", + "default_dynamic_quant_observer", + "default_embedding_fake_quant", + "default_embedding_fake_quant_4bit", + "default_eval_fn", + "default_fake_quant", + "default_fixed_qparams_range_0to1_fake_quant", + "default_fixed_qparams_range_0to1_observer", + "default_fixed_qparams_range_neg1to1_fake_quant", + "default_fixed_qparams_range_neg1to1_observer", + "default_float_qparams_observer", + "default_float_qparams_observer_4bit", + "default_fused_act_fake_quant", + "default_fused_per_channel_wt_fake_quant", + "default_fused_wt_fake_quant", + "default_histogram_fake_quant", + "default_histogram_observer", + "default_observer", + "default_per_channel_weight_fake_quant", + "default_per_channel_weight_observer", + "default_placeholder_observer", + "default_reuse_input_observer", + "default_symmetric_fixed_qparams_fake_quant", + "default_symmetric_fixed_qparams_observer", + "default_weight_fake_quant", + "default_weight_observer", + "disable_fake_quant", + "disable_observer", + "enable_fake_quant", + "enable_observer", + "fuse_conv_bn", + "fuse_conv_bn_jit", + "fuse_conv_bn_relu", + "fuse_convtranspose_bn", + "fuse_linear_bn", + "fuse_modules", + "fuse_modules_qat", + "fused_per_channel_wt_fake_quant_range_neg_127_to_127", + "fused_wt_fake_quant_range_neg_127_to_127", + "get_combined_dict", + "get_default_compare_output_module_list", + "get_default_custom_config_dict", + "get_default_dynamic_quant_module_mappings", + "get_default_dynamic_sparse_quant_module_mappings", + "get_default_float_to_quantized_operator_mappings", + "get_default_qat_module_mappings", + "get_default_qat_qconfig", + "get_default_qat_qconfig_dict", + "get_default_qat_qconfig_mapping", + "get_default_qconfig", + "get_default_qconfig_dict", + "get_default_qconfig_mapping", + "get_default_qconfig_propagation_list", + "get_default_static_quant_module_mappings", + "get_default_static_quant_reference_module_mappings", + "get_default_static_sparse_quant_module_mappings", + "get_dynamic_quant_module_class", + "get_embedding_qat_module_mappings", + "get_embedding_static_quant_module_mappings", + "get_fuser_method", + "get_fuser_method_new", + "get_observer_state_dict", + "get_quantized_operator", + "get_static_quant_module_class", + "load_observer_state_dict", + "move_exported_model_to_eval", + "move_exported_model_to_train", + "allow_exported_model_train_eval", + "no_observer_set", + "per_channel_weight_observer_range_neg_127_to_127", + "prepare", + "prepare_dynamic_jit", + "prepare_jit", + "prepare_qat", + "propagate_qconfig_", + "qconfig_equals", + "quantize", + "quantize_dynamic", + "quantize_dynamic_jit", + "quantize_jit", + "quantize_qat", + "script_qconfig", + "script_qconfig_dict", + "swap_module", + "weight_observer_range_neg_127_to_127", + "generate_numeric_debug_handle", +] + +def default_eval_fn(model, calib_data): + r"""Define the default evaluation function. + + Default evaluation function takes a torch.utils.data.Dataset or a list of + input Tensors and run the model on the dataset + """ + for data, target in calib_data: + model(data) + +class _DerivedObserverOrFakeQuantize(ObserverBase): + r"""This observer is used to describe an observer whose quantization parameters + are derived from other observers + """ + + def __init__( + self, + dtype: torch.dtype, + obs_or_fqs: List[ObserverOrFakeQuantize], + derive_qparams_fn: Callable[[List[ObserverOrFakeQuantize]], Tuple[Tensor, Tensor]], + quant_min: Optional[int]=None, + quant_max: Optional[int]=None, + qscheme: Optional[torch.qscheme]=None, + ch_axis: Optional[int] = None + ): + super().__init__(dtype) + self.obs_or_fqs = obs_or_fqs + self.derive_qparams_fn = derive_qparams_fn + self.quant_min = quant_min + self.quant_max = quant_max + self.qscheme = qscheme + self.ch_axis = ch_axis + + from .utils import is_per_channel + if is_per_channel(self.qscheme): + assert self.ch_axis is not None, "Must provide a valid ch_axis if qscheme is per channel" + + def forward(self, x: Tensor) -> Tensor: + return x + + def calculate_qparams(self): + return self.derive_qparams_fn(self.obs_or_fqs) diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_correct_bias.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_correct_bias.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1d50256878e986ad93b64e74475238fccc51847 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_correct_bias.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_equalize.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_equalize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ea77ca1b6af4e7c9d776e703db0e6383a155ec2 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/_equalize.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/fake_quantize.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/fake_quantize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5317d01eec51e48cd7de03bfaa5ea9e88b92219b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/fake_quantize.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/fuse_modules.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/fuse_modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..671efd767ba876ee4b551a09116747a0e3723e7d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/fuse_modules.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/observer.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/observer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..792d4c583aaea29b22c1fad87424b77abfa1e8da Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/observer.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/qconfig_mapping.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/qconfig_mapping.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48d80cb0c5828ec4ca8650e3fb07a4bb80ab7011 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/qconfig_mapping.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quant_type.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quant_type.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b3bdbf76d1dc019fe9a401fa9fc4bd9fe852aa1 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quant_type.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantization_mappings.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantization_mappings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f568ef8fec7881c5bb3c8f6aafe917f1ca3e548 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantization_mappings.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantize.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcd1c197d14839b6431e3f4e3d9a453fde254d9d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantize.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantize_fx.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantize_fx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c896a08fc50993e0e93f060b018ff4b96323bdb Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantize_fx.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantize_pt2e.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantize_pt2e.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..362f08e30349074fdd75394e4d2d6ce5580837bb Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/quantize_pt2e.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/stubs.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/stubs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1cf32cafb86698a053d22b5cae8b4db1623f004f Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/quantization/__pycache__/stubs.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/_correct_bias.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/_correct_bias.py new file mode 100644 index 0000000000000000000000000000000000000000..bf6b42a4a0dc05a24120faceaad8de3ff4fc1ace --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/_correct_bias.py @@ -0,0 +1,145 @@ +# mypy: allow-untyped-defs +import torch +import torch.nn as nn +import torch.ao.nn.quantized as nnq + +import torch.ao.quantization +import torch.ao.ns._numeric_suite as ns + +__all__ = [ + "get_module", + "parent_child_names", + "get_param", + "MeanShadowLogger", + "bias_correction", +] + +_supported_modules = {nn.Linear, nn.Conv2d} +_supported_modules_quantized = {nnq.Linear, nnq.Conv2d} + +def get_module(model, name): + """Given name of submodule, this function grabs the submodule from given model.""" + return dict(model.named_modules())[name] + +def parent_child_names(name): + """Split full name of submodule into parent submodule's full name and submodule's name.""" + split_name = name.rsplit('.', 1) + if len(split_name) == 1: + return '', split_name[0] + else: + return split_name[0], split_name[1] + +def get_param(module, attr): + """Get the parameter given a module and attribute. + + Sometimes the weights/bias attribute gives you the raw tensor, but sometimes + gives a function that will give you the raw tensor, this function takes care of that logic + """ + param = getattr(module, attr, None) + if callable(param): + return param() + else: + return param + +class MeanShadowLogger(ns.Logger): + """Mean Logger for a Shadow module. + + A logger for a Shadow module whose purpose is to record the rolling mean + of the data passed to the floating point and quantized models + """ + + def __init__(self): + """Set up initial values for float and quantized stats, count, float sum, and quant sum.""" + super().__init__() + self.stats["float"] = None + self.stats["quantized"] = None + self.count = 0 + self.float_sum = None + self.quant_sum = None + + def forward(self, x, y): + """Compute the average of quantized and floating-point data from modules. + + The inputs x,y are output data from the quantized and floating-point modules. + x is for the quantized module, y is for the floating point module + """ + if x.is_quantized: + x = x.dequantize() + + self.count += 1 + if self.stats["quantized"] is None: + self.stats["quantized"] = x + self.quant_sum = x + else: + self.quant_sum += x + self.stats["quantized"] = self.quant_sum / self.count + + if self.stats["float"] is None: + self.stats["float"] = y + self.float_sum = y + else: + self.float_sum += y + self.stats["float"] = self.float_sum / self.count + + def clear(self): + self.stats["float"] = None + self.stats["quantized"] = None + self.count = 0 + self.float_sum = None + self.quant_sum = None + +def bias_correction(float_model, quantized_model, img_data, target_modules=_supported_modules_quantized, neval_batches=None): + """Perform bias correction on a module. + + Using numeric suite shadow module, the expected output of the floating point and quantized modules + is recorded. Using that data the bias of supported modules is shifted to compensate for the drift caused + by quantization + Paper reference: https://arxiv.org/pdf/1906.04721.pdf (Section 4.2) + + Args: + float_model: a trained model that serves as a reference to what bias correction should aim for + quantized_model: quantized form of float_model that bias correction is to applied to + img_data: calibration data to estimate the expected output (used to find quantization error) + target_modules: specifies what submodules in quantized_model need bias correction (can be extended to + unquantized submodules) + neval_batches: a cap to the number of batches you want to be used for estimating the expected output + """ + ns.prepare_model_with_stubs(float_model, quantized_model, _supported_modules, MeanShadowLogger) + + uncorrected_modules = {} + for name, submodule in quantized_model.named_modules(): + if type(submodule) in target_modules: + uncorrected_modules[name] = submodule + + for uncorrected_module in uncorrected_modules: + quantized_submodule = get_module(quantized_model, uncorrected_module) + bias = get_param(quantized_submodule, 'bias') + if bias is not None: + + count = 0 + for data in img_data: + quantized_model(data[0]) + count += 1 + if count == neval_batches: + break + ob_dict = ns.get_logger_dict(quantized_model) + parent_name, _ = parent_child_names(uncorrected_module) + + float_data = ob_dict[parent_name + '.stats']['float'] + quant_data = ob_dict[parent_name + '.stats']['quantized'] + + # math for expected_error + quantization_error = quant_data - float_data + dims = list(range(quantization_error.dim())) + # Note: we don't want to take the mean over the output channel dimension + dims.remove(1) + expected_error = torch.mean(quantization_error, dims) + + updated_bias = bias.data - expected_error + + bias.data = updated_bias + + # Resets the data contained in the loggers + for name, submodule in quantized_model.named_modules(): + if isinstance(submodule, MeanShadowLogger): + submodule.clear() diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/_learnable_fake_quantize.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/_learnable_fake_quantize.py new file mode 100644 index 0000000000000000000000000000000000000000..ce23e80de150b582877fed7d5d35f16db65c16e0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/_learnable_fake_quantize.py @@ -0,0 +1,164 @@ +# mypy: allow-untyped-defs +import torch +from torch.nn.parameter import Parameter +from typing import List + +__all__: List[str] = [] + +class _LearnableFakeQuantize(torch.ao.quantization.FakeQuantizeBase): + r"""Generalized extension of the FakeQuantize module in fake_quantize.py. + + This is an extension of the FakeQuantize module in fake_quantize.py, which + supports more generalized lower-bit quantization and supports learning of the scale + and zero point parameters through backpropagation. + + In addition to the attributes in the original FakeQuantize module, the _LearnableFakeQuantize + module also includes the following attributes to support quantization parameter learning. + + * :attr:`channel_len` defines the length of the channel when initializing scale and zero point + for the per channel case. + + * :attr:`use_grad_scaling` defines the flag for whether the gradients for scale and zero point are + normalized by the constant, which is proportional to the square root of the number of + elements in the tensor. The related literature justifying the use of this particular constant + can be found here: https://openreview.net/pdf?id=rkgO66VKDS. + + * :attr:`fake_quant_enabled` defines the flag for enabling fake quantization on the output. + + * :attr:`static_enabled` defines the flag for using observer's static estimation for + scale and zero point. + + * :attr:`learning_enabled` defines the flag for enabling backpropagation for scale and zero point. + """ + def __init__(self, observer, quant_min=0, quant_max=255, scale=1., zero_point=0., channel_len=-1, + use_grad_scaling=False, **observer_kwargs): + super().__init__() + assert quant_min < quant_max, 'quant_min must be strictly less than quant_max.' + self.quant_min = quant_min + self.quant_max = quant_max + # also pass quant_min and quant_max to observer + observer_kwargs["quant_min"] = quant_min + observer_kwargs["quant_max"] = quant_max + self.use_grad_scaling = use_grad_scaling + if channel_len == -1: + self.scale = Parameter(torch.tensor([scale])) + self.zero_point = Parameter(torch.tensor([zero_point])) + else: + assert isinstance(channel_len, int) and channel_len > 0, "Channel size must be a positive integer." + self.scale = Parameter(torch.tensor([scale] * channel_len)) + self.zero_point = Parameter(torch.tensor([zero_point] * channel_len)) + + self.activation_post_process = observer(**observer_kwargs) + assert torch.iinfo(self.activation_post_process.dtype).min <= quant_min, \ + 'quant_min out of bound' + assert quant_max <= torch.iinfo(self.activation_post_process.dtype).max, \ + 'quant_max out of bound' + self.dtype = self.activation_post_process.dtype + self.qscheme = self.activation_post_process.qscheme + self.ch_axis = self.activation_post_process.ch_axis \ + if hasattr(self.activation_post_process, 'ch_axis') else -1 + self.register_buffer('fake_quant_enabled', torch.tensor([1], dtype=torch.uint8)) + self.register_buffer('static_enabled', torch.tensor([1], dtype=torch.uint8)) + self.register_buffer('learning_enabled', torch.tensor([0], dtype=torch.uint8)) + + bitrange = torch.tensor(quant_max - quant_min + 1).double() + self.bitwidth = int(torch.log2(bitrange).item()) + self.register_buffer('eps', torch.tensor([torch.finfo(torch.float32).eps])) + + @torch.jit.export + def enable_param_learning(self): + r"""Enable parameter learning over static observer estimates. + + Enables learning of quantization parameters and + disables static observer estimates. Forward path returns fake quantized X. + """ + self.toggle_qparam_learning(enabled=True) \ + .toggle_fake_quant(enabled=True) \ + .toggle_observer_update(enabled=False) + return self + + @torch.jit.export + def enable_static_estimate(self): + """Enable static estimates of quantization parameters. + + Enables static observer estimates and disables learning of + quantization parameters. Forward path returns fake quantized X. + """ + self.toggle_qparam_learning(enabled=False) \ + .toggle_fake_quant(enabled=True) \ + .toggle_observer_update(enabled=True) + + @torch.jit.export + def enable_static_observation(self): + """Enable accumulation of data without updating quantization parameters. + + Enables static observer accumulating data from input but doesn't + update the quantization parameters. Forward path returns the original X. + """ + self.toggle_qparam_learning(enabled=False) \ + .toggle_fake_quant(enabled=False) \ + .toggle_observer_update(enabled=True) + + @torch.jit.export + def toggle_observer_update(self, enabled=True): + self.static_enabled[0] = int(enabled) # type: ignore[operator] + return self + + @torch.jit.export + def enable_observer(self, enabled=True): + self.toggle_observer_update(enabled) + + @torch.jit.export + def toggle_qparam_learning(self, enabled=True): + self.learning_enabled[0] = int(enabled) # type: ignore[operator] + self.scale.requires_grad = enabled + self.zero_point.requires_grad = enabled + return self + + @torch.jit.export + def toggle_fake_quant(self, enabled=True): + self.fake_quant_enabled[0] = int(enabled) + return self + + @torch.jit.export + def observe_quant_params(self): + print(f'_LearnableFakeQuantize Scale: {self.scale.detach()}') + print(f'_LearnableFakeQuantize Zero Point: {self.zero_point.detach()}') + + @torch.jit.export + def calculate_qparams(self): + self.scale.data.clamp_(min=self.eps.item()) # type: ignore[operator] + scale = self.scale.detach() + zero_point = self.zero_point.detach().round().clamp(self.quant_min, self.quant_max).long() + return scale, zero_point + + def forward(self, X): + if self.static_enabled[0] == 1: # type: ignore[index] + self.activation_post_process(X.detach()) + _scale, _zero_point = self.activation_post_process.calculate_qparams() + _scale = _scale.to(self.scale.device) + _zero_point = _zero_point.to(self.zero_point.device) + self.scale.data.copy_(_scale) + self.zero_point.data.copy_(_zero_point) + else: + self.scale.data.clamp_(min=self.eps.item()) # type: ignore[operator] + + if self.fake_quant_enabled[0] == 1: + if self.qscheme in (torch.per_channel_symmetric, torch.per_tensor_symmetric): + self.zero_point.data.zero_() + + if self.use_grad_scaling: + grad_factor = 1.0 / (X.numel() * self.quant_max) ** 0.5 + else: + grad_factor = 1.0 + if self.qscheme in ( + torch.per_channel_symmetric, torch.per_channel_affine): + X = torch._fake_quantize_learnable_per_channel_affine( + X, self.scale, self.zero_point, self.ch_axis, + self.quant_min, self.quant_max, grad_factor) + else: + X = torch._fake_quantize_learnable_per_tensor_affine( + X, self.scale, self.zero_point, + self.quant_min, self.quant_max, grad_factor) + + return X diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/fake_quantize.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fake_quantize.py new file mode 100644 index 0000000000000000000000000000000000000000..b921df39217a699ef525e43ac715b0dd78ffdc22 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fake_quantize.py @@ -0,0 +1,536 @@ +# mypy: allow-untyped-defs +"""Implements modules used to perform fake quantization.""" + +import torch +from torch.nn import Module +from torch.ao.quantization.observer import ( + MovingAverageMinMaxObserver, + HistogramObserver, + MovingAveragePerChannelMinMaxObserver, + FixedQParamsObserver, + default_fixed_qparams_range_0to1_observer, + default_fixed_qparams_range_neg1to1_observer, + _with_args, +) +import re +from abc import ABC, abstractmethod +from typing import Any, Tuple + +__all__ = [ + "FakeQuantizeBase", + "FakeQuantize", + "FixedQParamsFakeQuantize", + "FusedMovingAvgObsFakeQuantize", + "disable_fake_quant", + "disable_observer", + "enable_fake_quant", + "enable_observer", + "default_fake_quant", + "default_weight_fake_quant", + "default_dynamic_fake_quant", + "default_fixed_qparams_range_neg1to1_fake_quant", + "default_fixed_qparams_range_0to1_fake_quant", + "default_symmetric_fixed_qparams_fake_quant", + "default_affine_fixed_qparams_fake_quant", + "default_per_channel_weight_fake_quant", + "default_embedding_fake_quant", + "default_embedding_fake_quant_4bit", + "default_histogram_fake_quant", + "default_fused_act_fake_quant", + "default_fused_wt_fake_quant", + "default_fused_per_channel_wt_fake_quant", + "fused_wt_fake_quant_range_neg_127_to_127", + "fused_per_channel_wt_fake_quant_range_neg_127_to_127", +] + +def _is_per_channel(qscheme: 'torch.qscheme') -> bool: + return qscheme in [torch.per_channel_symmetric, torch.per_channel_affine, torch.per_channel_affine_float_qparams] + +def _is_per_tensor(qscheme: 'torch.qscheme') -> bool: + return qscheme in [torch.per_tensor_symmetric, torch.per_tensor_affine] + +def _is_symmetric_quant(qscheme: 'torch.qscheme') -> bool: + return qscheme in [torch.per_tensor_symmetric, torch.per_channel_symmetric] + +def _is_float_qparams(qscheme: 'torch.qscheme') -> bool: + return qscheme in [torch.per_channel_affine_float_qparams, ] + +class FakeQuantizeBase(ABC, Module): + r"""Base fake quantize module. + + Base fake quantize module + Any fake quantize implementation should derive from this class. + + Concrete fake quantize module should follow the same API. In forward, they will update + the statistics of the observed Tensor and fake quantize the input. They should also provide a + `calculate_qparams` function that computes the quantization parameters given + the collected statistics. + + """ + + fake_quant_enabled: torch.Tensor + observer_enabled: torch.Tensor + + def __init__(self): + """Set fake_quant_enabled and observer_enabled.""" + super().__init__() + # fake_quant_enabled and observer_enabled are buffers to support their + # replication in DDP. Data type is uint8 because NCCL does not support + # bool tensors. + self.register_buffer('fake_quant_enabled', torch.tensor([1], dtype=torch.uint8)) + self.register_buffer('observer_enabled', torch.tensor([1], dtype=torch.uint8)) + + @abstractmethod + def forward(self, x): + pass + + @abstractmethod + def calculate_qparams(self, **kwargs): + pass + + @torch.jit.export + def enable_fake_quant(self, enabled: bool = True) -> None: + self.fake_quant_enabled[0] = 1 if enabled else 0 + + @torch.jit.export + def disable_fake_quant(self): + self.enable_fake_quant(False) + + @torch.jit.export + def enable_observer(self, enabled: bool = True) -> None: + self.observer_enabled[0] = 1 if enabled else 0 + + @torch.jit.export + def disable_observer(self): + self.enable_observer(False) + + @classmethod + def with_args(cls, **kwargs): + fake_quant_constructor = _with_args(cls, **kwargs) + # need to assign the correct module to fake_quantize + # constructors to satisfy public v private requirements + fake_quant_constructor.__module__ = "torch.ao.quantization.fake_quantize" + return fake_quant_constructor + +class FakeQuantize(FakeQuantizeBase): + r"""Simulate the quantize and dequantize operations in training time. + + The output of this module is given by:: + + x_out = ( + clamp(round(x/scale + zero_point), quant_min, quant_max) - zero_point + ) * scale + + * :attr:`is_dynamic` indicates whether the fake quantie is a placeholder for dynamic quantization + operators (choose_qparams -> q -> dq) or static quantization operators (q -> dq) + + * :attr:`scale` defines the scale factor used for quantization. + + * :attr:`zero_point` specifies the quantized value to which 0 in floating point maps to + + * :attr:`fake_quant_enabled` controls the application of fake quantization on tensors, note that + statistics can still be updated. + + * :attr:`observer_enabled` controls statistics collection on tensors + + * :attr:`dtype` specifies the quantized dtype that is being emulated with fake-quantization, + allowable values are torch.qint8 and torch.quint8. + + Args: + + observer (module): Module for observing statistics on input tensors and calculating scale + and zero-point. + observer_kwargs (optional): Arguments for the observer module + + Attributes: + activation_post_process (Module): User provided module that collects statistics on the input tensor and + provides a method to calculate scale and zero-point. + + """ + + scale: torch.Tensor + zero_point: torch.Tensor + + def __init__(self, observer=MovingAverageMinMaxObserver, quant_min=None, quant_max=None, is_dynamic=False, **observer_kwargs): + super().__init__() + # Populate quant_min/quant_max to observer_kwargs if valid + if quant_min is not None and quant_max is not None: + assert quant_min <= quant_max, \ + 'quant_min must be less than or equal to quant_max' + dtype = observer_kwargs.get("dtype", torch.quint8) + if hasattr(observer, "p"): + # In case observer is _PartialWrapper, dtype can be stored in + # observer.p.keywords["dtype"] + dtype = getattr(getattr(observer, "p", {}), "keywords", {}).get( + "dtype", dtype + ) + assert torch.iinfo(dtype).min <= quant_min, 'quant_min out of bound' + assert quant_max <= torch.iinfo(dtype).max, 'quant_max out of bound' + observer_kwargs.update({"quant_min": quant_min, "quant_max": quant_max}) + observer_kwargs["is_dynamic"] = is_dynamic + self.activation_post_process = observer(**observer_kwargs) + # TODO: keeping self.quant_min/max for BC; remove after a couple releases + # Users should use self.activation_post_process.quant_min + self.quant_min = self.activation_post_process.quant_min + self.quant_max = self.activation_post_process.quant_max + self.is_dynamic = self.activation_post_process.is_dynamic + if _is_float_qparams(self.activation_post_process.qscheme): + zero_point_dtype = torch.float + else: + zero_point_dtype = torch.int + self.register_buffer('scale', torch.tensor([1.0], dtype=torch.float)) + self.register_buffer('zero_point', torch.tensor([0], dtype=zero_point_dtype)) + self.dtype = self.activation_post_process.dtype + self.qscheme = self.activation_post_process.qscheme + self.ch_axis = self.activation_post_process.ch_axis \ + if hasattr(self.activation_post_process, 'ch_axis') else -1 + assert _is_per_channel(self.qscheme) or \ + _is_per_tensor(self.qscheme), \ + 'Only per channel and per tensor quantization are supported in fake quantize' + \ + ' got qscheme: ' + str(self.qscheme) + self.is_per_channel = _is_per_channel(self.qscheme) + + @torch.jit.export + def calculate_qparams(self): + return self.activation_post_process.calculate_qparams() + + def forward(self, X): + if self.observer_enabled[0] == 1: + self.activation_post_process(X.detach()) + _scale, _zero_point = self.calculate_qparams() + _scale, _zero_point = _scale.to(self.scale.device), _zero_point.to(self.zero_point.device) + if self.scale.shape != _scale.shape: + self.scale.resize_(_scale.shape) + self.zero_point.resize_(_zero_point.shape) + self.scale.copy_(_scale) + self.zero_point.copy_(_zero_point) + + if self.fake_quant_enabled[0] == 1: + if self.is_per_channel: + X = torch.fake_quantize_per_channel_affine( + X, self.scale, self.zero_point, + self.ch_axis, self.activation_post_process.quant_min, self.activation_post_process.quant_max) + else: + X = torch.fake_quantize_per_tensor_affine( + X, self.scale, self.zero_point, + self.activation_post_process.quant_min, self.activation_post_process.quant_max) + return X + + @torch.jit.export + def extra_repr(self): + return f'fake_quant_enabled={self.fake_quant_enabled}, observer_enabled={self.observer_enabled}, ' \ + f'quant_min={self.activation_post_process.quant_min}, quant_max={self.activation_post_process.quant_max}, ' \ + f'dtype={self.dtype}, qscheme={self.qscheme}, ch_axis={self.ch_axis}, ' \ + f'scale={self.scale}, zero_point={self.zero_point}' + + def _save_to_state_dict(self, destination, prefix, keep_vars): + # We cannot currently register scalar values as buffers, so need to manually + # specify serialization here. + super()._save_to_state_dict(destination, prefix, keep_vars) + destination[prefix + 'scale'] = self.scale + destination[prefix + 'zero_point'] = self.zero_point + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + # Removing this function throws an error that the size of the loaded tensor does not match the original size + # i.e., These buffers start out with numel 0 and become numel 1 once they have their first forward pass. + local_state = ['scale', 'zero_point'] + for name in local_state: + key = prefix + name + if key in state_dict: + val = state_dict[key] + # Custom handling to allow loading scale and zero_point + # of size N into uninitialized buffers of size 0. The + # buffers are resized here, and the values are copied in + # the default state_dict loading code of the parent. + if name == 'scale': + self.scale.resize_(val.shape) + else: + assert name == 'zero_point' + self.zero_point.resize_(val.shape) + # For torchscript module we need to update the attributes here since we do not + # call the `_load_from_state_dict` function defined module.py + if torch.jit.is_scripting(): + if name == 'scale': + self.scale.copy_(val) + else: + assert name == 'zero_point' + self.zero_point.copy_(val) + elif strict: + missing_keys.append(key) + super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs) + + +class FixedQParamsFakeQuantize(FakeQuantize): + """Simulate quantize and dequantize in training time. + + Simulate quantize and dequantize with fixed quantization + parameters in training time. Only per tensor quantization + is supported. + """ + + # TODO: rename observer to observer_ctr + def __init__(self, observer): + super().__init__(observer=observer) + assert type(self.activation_post_process) == FixedQParamsObserver, \ + f"{self.__class__.__name__}'s observer must be a {FixedQParamsObserver.__name__}" + self._observer_ctr = observer + self.scale = self.activation_post_process.scale + self.zero_point = self.activation_post_process.zero_point + assert _is_per_tensor(self.qscheme), 'Only per tensor quantization is supported' + \ + ' FixedQParamsFakeQuantize module, got qscheme:' + str(self.qscheme) + + @torch.jit.export + def calculate_qparams(self): + return self.scale, self.zero_point + + @torch.jit.export + def extra_repr(self): + """Define a string representation of the object's attributes.""" + return f'fake_quant_enabled={self.fake_quant_enabled}, observer_enabled={self.observer_enabled}, ' \ + f'scale={self.scale}, zero_point={self.zero_point}, ' \ + f'dtype={self.dtype}, quant_min={self.activation_post_process.quant_min}, ' \ + f'quant_max={self.activation_post_process.quant_max}, qscheme={self.qscheme}' + + +class FusedMovingAvgObsFakeQuantize(FakeQuantize): + r"""Define a fused module to observe the tensor. + + Fused module that is used to observe the input tensor (compute min/max), compute + scale/zero_point and fake_quantize the tensor. + This module uses calculation similar MovingAverageMinMaxObserver for the inputs, + to compute the min/max values in order to compute the scale/zero_point. + The qscheme input in the observer is used to differentiate between symmetric/affine + quantization scheme. + + The output of this module is given by + x_out = (clamp(round(x/scale + zero_point), quant_min, quant_max)-zero_point)*scale + + Similar to :class:`~torch.ao.quantization.FakeQuantize`, and accepts the same attributes as the + base class. + + """ + + def __init__( + self, + observer: Any = MovingAverageMinMaxObserver, + quant_min: int = 0, + quant_max: int = 255, + **observer_kwargs: Any + ) -> None: + super().__init__(observer, quant_min, quant_max, **observer_kwargs) + assert isinstance(self.activation_post_process, (MovingAverageMinMaxObserver, MovingAveragePerChannelMinMaxObserver)), \ + "Fused observer+fake_quant module only works with MovingAverageMinMaxObserver" + self.register_buffer("fake_quant_enabled", torch.tensor([1], dtype=torch.long)) + self.register_buffer("observer_enabled", torch.tensor([1], dtype=torch.long)) + self.is_symmetric_quant = _is_symmetric_quant(self.activation_post_process.qscheme) + + @torch.jit.export + def calculate_qparams(self) -> Tuple[torch.Tensor, torch.Tensor]: + return self.activation_post_process.calculate_qparams() + + @torch.jit.export + def extra_repr(self) -> str: + return ( + f"fake_quant_enabled={self.fake_quant_enabled}, observer_enabled={self.observer_enabled}, " + f"scale={self.scale}, zero_point={self.zero_point}, dtype={self.dtype}, " + f"quant_min={self.activation_post_process.quant_min}, quant_max={self.activation_post_process.quant_max}, " + f"qscheme={self.qscheme}, reduce_range={self.activation_post_process.reduce_range}" + ) + + def forward(self, X: torch.Tensor) -> torch.Tensor: + return torch.fused_moving_avg_obs_fake_quant( + X, + self.observer_enabled, + self.fake_quant_enabled, + self.activation_post_process.min_val, + self.activation_post_process.max_val, + self.scale, + self.zero_point, + self.activation_post_process.averaging_constant, + self.activation_post_process.quant_min, + self.activation_post_process.quant_max, + self.ch_axis, + self.is_per_channel, + self.is_symmetric_quant, + ) + +default_fake_quant = FakeQuantize.with_args(observer=MovingAverageMinMaxObserver, quant_min=0, quant_max=255, + dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=True) +""" +Default fake_quant for activations. +""" + +default_weight_fake_quant = FakeQuantize.with_args(observer=MovingAverageMinMaxObserver, quant_min=-128, quant_max=127, + dtype=torch.qint8, qscheme=torch.per_tensor_symmetric, reduce_range=False) +""" +Default fake_quant for weights. +Observer is memoryless since averaging_constant is 1. +""" + +default_dynamic_fake_quant = FakeQuantize.with_args( + observer=MovingAverageMinMaxObserver, quant_min=0, quant_max=255, is_dynamic=True, + dtype=torch.quint8, averaging_constant=1) +""" +Default dynamic fake_quant for activations. +""" + +default_fixed_qparams_range_neg1to1_fake_quant = ( + FixedQParamsFakeQuantize.with_args(observer=default_fixed_qparams_range_neg1to1_observer) +) +default_fixed_qparams_range_0to1_fake_quant = ( + FixedQParamsFakeQuantize.with_args(observer=default_fixed_qparams_range_0to1_observer) +) +# TODO: the following 2 variables are kept for backwards compatibility; remove after a few releases +default_symmetric_fixed_qparams_fake_quant = default_fixed_qparams_range_neg1to1_fake_quant +default_affine_fixed_qparams_fake_quant = default_fixed_qparams_range_0to1_fake_quant + +default_per_channel_weight_fake_quant = FakeQuantize.with_args(observer=MovingAveragePerChannelMinMaxObserver, + quant_min=-128, + quant_max=127, + dtype=torch.qint8, + qscheme=torch.per_channel_symmetric, + reduce_range=False, + ch_axis=0) +""" +Default fake_quant for per-channel weights. +Observer is memoryless since averaging_constant is 1. +""" +default_embedding_fake_quant = FakeQuantize.with_args(observer=MovingAveragePerChannelMinMaxObserver, + qscheme=torch.per_channel_affine_float_qparams, + dtype=torch.quint8, + quant_min=0, + quant_max=255, + ch_axis=0, + averaging_constant=1) +""" +Default fake_quant for embeddings. +Observer is memoryless since averaging_constant is 1. +""" + +default_embedding_fake_quant_4bit = FakeQuantize.with_args(observer=MovingAveragePerChannelMinMaxObserver, + qscheme=torch.per_channel_affine_float_qparams, + ch_axis=0, + dtype=torch.quint4x2, + averaging_constant=1) + +default_histogram_fake_quant = FakeQuantize.with_args(observer=HistogramObserver, + quant_min=0, + quant_max=255, + dtype=torch.quint8, + qscheme=torch.per_tensor_affine, + reduce_range=True) +""" +Fake_quant for activations using a histogram.. +""" + + +default_fused_act_fake_quant = FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver, + quant_min=0, + quant_max=255, + dtype=torch.quint8,) + +""" +Fused version of `default_fake_quant`, with improved performance. +""" + + +default_fused_wt_fake_quant = FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver, + quant_min=-128, + quant_max=127, + dtype=torch.qint8, + qscheme=torch.per_tensor_symmetric) +""" +Fused version of `default_weight_fake_quant`, with improved performance. +""" + +default_fused_per_channel_wt_fake_quant = FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAveragePerChannelMinMaxObserver, + quant_min=-128, + quant_max=127, + dtype=torch.qint8, + qscheme=torch.per_channel_symmetric) +""" +Fused version of `default_per_channel_weight_fake_quant`, with improved performance. +""" + +fused_wt_fake_quant_range_neg_127_to_127 = FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver, + quant_min=-127, + quant_max=127, + dtype=torch.qint8, + qscheme=torch.per_tensor_symmetric, + eps=2 ** -12) +""" +Fused version of `default_weight_fake_quant`, with the 8-bit values restricted to [-127, +127], excluding -128. +""" + +fused_per_channel_wt_fake_quant_range_neg_127_to_127 = \ + FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAveragePerChannelMinMaxObserver, + quant_min=-127, + quant_max=127, + dtype=torch.qint8, + qscheme=torch.per_channel_symmetric, + eps=2 ** -12) + +""" +Fused version of `default_per_channel_weight_fake_quant`, with the 8-bit values restricted to [-127, +127], excluding -128. +""" + + +def _is_fake_quant_script_module(mod): + """Return true if given mod is an instance of FakeQuantize script module.""" + if isinstance(mod, torch.jit.RecursiveScriptModule): + # qualified name looks like '__torch__.torch.ao.quantization.fake_quantize.___torch_mangle_2.FakeQuantize' + suffix = mod._c.qualified_name.split('.', 1)[1] + name = re.sub(r'\.___torch_mangle_\d+', '', suffix) + return name == 'torch.ao.quantization.fake_quantize.FakeQuantize' or \ + name == 'torch.ao.quantization.fake_quantize.FusedMovingAvgObsFakeQuantize' + return False + +def disable_fake_quant(mod): + """Disable fake quantization for the module. + + Disable fake quantization for this module, if applicable. Example usage:: + + # model is any PyTorch model + model.apply(torch.ao.quantization.disable_fake_quant) + + """ + if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod): + mod.disable_fake_quant() + +def enable_fake_quant(mod): + """Enable fake quantization for the module. + + Enable fake quantization for this module, if applicable. Example usage:: + + # model is any PyTorch model + model.apply(torch.ao.quantization.enable_fake_quant) + + """ + if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod): + mod.enable_fake_quant() + +def disable_observer(mod): + """Disable observation for this module. + + Disable observation for this module, if applicable. Example usage:: + + # model is any PyTorch model + model.apply(torch.ao.quantization.disable_observer) + + """ + if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod): + mod.disable_observer() + +def enable_observer(mod): + """Enable observation for this module. + + Enable observation for this module, if applicable. Example usage:: + + # model is any PyTorch model + model.apply(torch.ao.quantization.enable_observer) + + """ + if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod): + mod.enable_observer() diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/fuser_method_mappings.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fuser_method_mappings.py new file mode 100644 index 0000000000000000000000000000000000000000..a989ae298825f423fe8922cb2234a70f432a2d17 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fuser_method_mappings.py @@ -0,0 +1,260 @@ +# mypy: allow-untyped-defs +import torch.nn as nn +import torch.ao.nn.intrinsic as nni + +from typing import Any, Union, Callable, List, Tuple, Dict, Optional, Type +from torch.ao.quantization.utils import Pattern, get_combined_dict, MatchAllNode +import itertools + +__all__ = [ + "fuse_conv_bn", + "fuse_conv_bn_relu", + "fuse_linear_bn", + "fuse_convtranspose_bn", + "get_fuser_method", + "get_fuser_method_new", +] + +def fuse_conv_bn(is_qat, conv, bn): + r"""Return the fused the conv and bn modules. + Given the conv and bn modules, fuses them and returns the fused module + + Args: + is_qat: a flag for whether we are using quantization aware training fusion + or post training quantization fusion + conv: Module instance of type conv2d/conv3d + bn: Spatial BN instance that needs to be fused with the conv + + Examples:: + + >>> m1 = nn.Conv2d(10, 20, 3) + >>> b1 = nn.BatchNorm2d(20) + >>> # xdoctest: +SKIP + >>> m2 = fuse_conv_bn(m1, b1) + """ + assert conv.training == bn.training, \ + "Conv and BN both must be in the same mode (train or eval)." + + fused_module_class_map = { + nn.Conv1d: nni.ConvBn1d, + nn.Conv2d: nni.ConvBn2d, + nn.Conv3d: nni.ConvBn3d, + } + + if is_qat: + assert bn.num_features == conv.out_channels, 'Output channel of Conv2d must match num_features of BatchNorm2d' + assert bn.affine, 'Only support fusing BatchNorm2d with affine set to True' + assert bn.track_running_stats, 'Only support fusing BatchNorm2d with tracking_running_stats set to True' + fused_module_class = fused_module_class_map.get((type(conv)), None) + if fused_module_class is not None: + return fused_module_class(conv, bn) + else: + raise NotImplementedError(f"Cannot fuse train modules: {(conv, bn)}") + else: + return nn.utils.fuse_conv_bn_eval(conv, bn) + +def fuse_conv_bn_relu(is_qat, conv, bn, relu): + r"""Return the fused conv and bv modules. + + Given the conv and bn modules, fuses them and returns the fused module + + Args: + is_qat: a flag for whether we are using quantization aware training fusion + or post training quantization fusion + conv: Module instance of type conv2d/conv3d + bn: Spatial BN instance that needs to be fused with the conv + + Examples:: + + >>> m1 = nn.Conv2d(10, 20, 3) + >>> b1 = nn.BatchNorm2d(20) + >>> r1 = nn.ReLU(inplace=False) + >>> # xdoctest: +SKIP + >>> m2 = fuse_conv_bn_relu(m1, b1, r1) + """ + assert conv.training == bn.training == relu.training, \ + "Conv and BN both must be in the same mode (train or eval)." + fused_module : Optional[Type[nn.Sequential]] = None + if is_qat: + map_to_fused_module_train = { + nn.Conv1d: nni.ConvBnReLU1d, + nn.Conv2d: nni.ConvBnReLU2d, + nn.Conv3d: nni.ConvBnReLU3d, + } + assert bn.num_features == conv.out_channels, 'Output channel of Conv must match num_features of BatchNorm' + assert bn.affine, 'Only support fusing BatchNorm with affine set to True' + assert bn.track_running_stats, 'Only support fusing BatchNorm with tracking_running_stats set to True' + fused_module = map_to_fused_module_train.get(type(conv), None) + if fused_module is not None: + return fused_module(conv, bn, relu) + else: + raise NotImplementedError(f"Cannot fuse train modules: {(conv, bn, relu)}") + else: + map_to_fused_module_eval = { + nn.Conv1d: nni.ConvReLU1d, + nn.Conv2d: nni.ConvReLU2d, + nn.Conv3d: nni.ConvReLU3d, + } + fused_module = map_to_fused_module_eval.get(type(conv), None) + if fused_module is not None: + fused_conv = nn.utils.fusion.fuse_conv_bn_eval(conv, bn) + return fused_module(fused_conv, relu) + else: + raise NotImplementedError(f"Cannot fuse eval modules: {(conv, bn, relu)}") + +def fuse_linear_bn(is_qat, linear, bn): + r"""Return the fused linear and bn modules. + Given the linear and bn modules, fuses them and returns the fused module + + Args: + is_qat: a flag for whether we are using quantization aware training fusion + or post training quantization fusion + linear: Module instance of type Linear + bn: BatchNorm1d instance that needs to be fused with the linear layer + + Examples:: + + >>> m1 = nn.Linear(20, 10) + >>> b1 = nn.BatchNorm1d(10) + >>> # xdoctest: +SKIP + >>> m2 = fuse_linear_bn(m1, b1) + """ + assert linear.training == bn.training, \ + "Linear and BN both must be in the same mode (train or eval)." + + if is_qat: + assert bn.num_features == linear.out_features, \ + "Output features of Linear must match num_features of BatchNorm1d" + assert bn.affine, "Only support fusing BatchNorm1d with affine set to True" + assert bn.track_running_stats, \ + "Only support fusing BatchNorm1d with tracking_running_stats set to True" + return nni.LinearBn1d(linear, bn) + else: + return nn.utils.fusion.fuse_linear_bn_eval(linear, bn) + +def fuse_convtranspose_bn(is_qat, convt, bn): + r"""Return the fused ConvTranspose and bn modules. + Given ConvTranspose and bn modules, fuses them and returns the fused module + + Args: + convt: Module instance of type ConvTransposeNd + bn: BatchNormNd instance that needs to be fused with the linear layer. + batch norm N should match the ConvTranspose N + + Examples:: + + >>> m1 = nn.ConvTranspose2d(10, 20, 3) + >>> b1 = nn.BatchNorm2d(20) + >>> # xdoctest: +SKIP + >>> m2 = fuse_convtranspose_bn(m1, b1) + """ + assert convt.training == bn.training, \ + "ConvTranspose and BN both must be in the same mode (train or eval)." + + if is_qat: + raise Exception("Fusing ConvTranspose+BatchNorm not yet supported in QAT.") # noqa: TRY002 + else: + return nn.utils.fusion.fuse_conv_bn_eval(convt, bn, transpose=True) + +def _sequential_wrapper2(sequential): + """Return a sequential wrapped that for is_qat and two modules. + Given a sequential class for two modules, return a function that takes + is_qat, and then two modules as argument, that ignores the is_qat flag + and always returns the sequential that combines the two input modules + """ + def fuser_method(is_qat, m1, m2): + return sequential(m1, m2) + return fuser_method + +_DEFAULT_OP_LIST_TO_FUSER_METHOD: Dict[Tuple, Union[nn.Sequential, Callable]] = { + (nn.Conv1d, nn.BatchNorm1d): fuse_conv_bn, + (nn.Conv1d, nn.BatchNorm1d, nn.ReLU): fuse_conv_bn_relu, + (nn.Conv2d, nn.BatchNorm2d): fuse_conv_bn, + (nn.Conv2d, nn.BatchNorm2d, nn.ReLU): fuse_conv_bn_relu, + (nn.Conv3d, nn.BatchNorm3d): fuse_conv_bn, + (nn.Conv3d, nn.BatchNorm3d, nn.ReLU): fuse_conv_bn_relu, + (nn.Conv1d, nn.ReLU): _sequential_wrapper2(nni.ConvReLU1d), + (nn.Conv2d, nn.ReLU): _sequential_wrapper2(nni.ConvReLU2d), + (nn.Conv3d, nn.ReLU): _sequential_wrapper2(nni.ConvReLU3d), + (nn.Linear, nn.BatchNorm1d): fuse_linear_bn, + (nn.Linear, nn.ReLU): _sequential_wrapper2(nni.LinearReLU), + (nn.BatchNorm2d, nn.ReLU): _sequential_wrapper2(nni.BNReLU2d), + (nn.BatchNorm3d, nn.ReLU): _sequential_wrapper2(nni.BNReLU3d), + (nn.ConvTranspose1d, nn.BatchNorm1d): fuse_convtranspose_bn, + (nn.ConvTranspose2d, nn.BatchNorm2d): fuse_convtranspose_bn, + (nn.ConvTranspose3d, nn.BatchNorm3d): fuse_convtranspose_bn, +} + +def get_fuser_method(op_list, additional_fuser_method_mapping=None): + """Get fuser method for the given list of module types. + + Get fuser method for the given list of module types, + return None if fuser method does not exist + """ + if additional_fuser_method_mapping is None: + additional_fuser_method_mapping = {} + all_mappings = get_combined_dict(_DEFAULT_OP_LIST_TO_FUSER_METHOD, + additional_fuser_method_mapping) + fuser_method = all_mappings.get(op_list, None) + assert fuser_method is not None, f"did not find fuser method for: {op_list} " + return fuser_method + +def _reverse2(f): + def reversed(is_qat, x, y): + return f(is_qat, y, x) + return reversed + +def _reverse3(f): + def reversed(is_qat, x, w): + y, z = w + return f(is_qat, z, y, x) + return reversed + +def _get_valid_patterns(op_pattern): + """Return a list of valid patterns generated from the op_pattern. + + Returns a list of valid patterns generated from the op_pattern, + since MatchAllNode can match all types of nodes, + e.g. pattern (torch.nn.Conv2d, torch.add) should also be able to match keys like + (MatchAllNode, torch.add) and (torch.nn.Conv2d, MatchAllNode) + + Example Input: + (torch.add, (torch.nn.ReLU, torch.nn.Conv2d)) + + Example Output: + [(torch.add, (torch.nn.ReLU, torch.nn.Conv2d)), + (torch.add, (torch.nn.ReLU, MatchAllNode)), + (torch.add, (MatchAllNode, torch.nn.Conv2d)), + (torch.add, (MatchAllNode, MatchAllNode)), + (MatchAllNode, (torch.nn.ReLU, torch.nn.Conv2d)), + (MatchAllNode, (torch.nn.ReLU, MatchAllNode)), + (MatchAllNode, (MatchAllNode, torch.nn.Conv2d)), + (MatchAllNode, (MatchAllNode, MatchAllNode)), + ] + """ + result: List[Any] + if isinstance(op_pattern, (tuple, list)): + sub_combs = [] + for sub_pattern in op_pattern: + sub_combs.append(_get_valid_patterns(sub_pattern)) + result = list(itertools.product(*sub_combs)) + else: + result = [op_pattern, MatchAllNode] + return result + +def get_fuser_method_new( + op_pattern: Pattern, + fuser_method_mapping: Dict[Pattern, Union[nn.Sequential, Callable]]): + """Get fuser method. + + This will be made default after we deprecate the get_fuser_method + Would like to implement this first and have a separate PR for deprecation + """ + op_patterns = _get_valid_patterns(op_pattern) + fuser_method = None + for op_pattern in op_patterns: + fuser_method = fuser_method_mapping.get(op_pattern, None) + if fuser_method is not None: + break + assert fuser_method is not None, f"did not find fuser method for: {op_pattern} " + return fuser_method diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_decomposed.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_decomposed.py new file mode 100644 index 0000000000000000000000000000000000000000..72ce4b2471f537fe0509d11f1fe43e6ef631c697 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_decomposed.py @@ -0,0 +1,1033 @@ +# mypy: allow-untyped-defs +import math +from typing import Optional, Tuple + +import torch +from torch._refs import _unsqueeze_multiple +from torch.ao.quantization.utils import determine_qparams, validate_qmin_qmax +from torch.library import impl, Library + +# Note: decomposed means decomposed quantized tensor, using decomposed so that the +# name is not too long +quantized_decomposed_lib = Library("quantized_decomposed", "DEF") + +_INTEGER_DTYPES = [torch.uint8, torch.int8, torch.int16, torch.int32] +_FLOAT_DTYPES = [torch.float8_e5m2, torch.float8_e4m3fn] + +_DTYPE_TO_QVALUE_BOUNDS = {k : (torch.iinfo(k).min, torch.iinfo(k).max) for k in _INTEGER_DTYPES} +_DTYPE_TO_QVALUE_BOUNDS.update({k : (int(torch.finfo(k).min), int(torch.finfo(k).max)) for k in _FLOAT_DTYPES}) + +# Helper to check the passed in quant min and max are valid for the dtype +def _quant_min_max_bounds_check(quant_min, quant_max, dtype): + if dtype not in _DTYPE_TO_QVALUE_BOUNDS: + raise ValueError(f"Unsupported dtype: {dtype}") + quant_min_lower_bound, quant_max_upper_bound = _DTYPE_TO_QVALUE_BOUNDS[dtype] + + assert quant_min >= quant_min_lower_bound, \ + "quant_min out of bound for dtype, " \ + f"quant_min_lower_bound: {quant_min_lower_bound} quant_min: {quant_min}" + + assert quant_max <= quant_max_upper_bound, \ + "quant_max out of bound for dtype, " \ + f"quant_max_upper_bound: {quant_max_upper_bound} quant_max: {quant_max}" + +quantized_decomposed_lib.define( + "quantize_per_tensor(Tensor input, float scale, int zero_point, " + "int quant_min, int quant_max, ScalarType dtype) -> Tensor") + +@impl(quantized_decomposed_lib, "quantize_per_tensor", "CompositeExplicitAutograd") +def quantize_per_tensor( + input: torch.Tensor, + scale: float, + zero_point: int, + quant_min: int, + quant_max: int, + dtype: torch.dtype +) -> torch.Tensor: + """ Affine quantization for the Tensor using the same quantization parameters to map + from floating point to quantized values + + Args: + input (torch.Tensor): original float32 or bfloat16 Tensor + scale (float): quantization parameter for affine quantization + zero_point (int): quantization parameter for affine quantization + quant_min (int): minimum quantized value for output Tensor + quant_max (int): maximum quantized value for output Tensor + dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor + + Returns: + Tensor with requested dtype (e.g. torch.uint8), note the quantization parameters + are not stored in the Tensor, we are storing them in function arguments instead + """ + if input.dtype in [torch.float16, torch.bfloat16]: + input = input.to(torch.float32) + assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}" + _quant_min_max_bounds_check(quant_min, quant_max, dtype) + + inv_scale = 1.0 / scale + return torch.clamp(torch.round(input * inv_scale) + zero_point, quant_min, quant_max).to(dtype) + +@impl(quantized_decomposed_lib, "quantize_per_tensor", "Meta") +def quantize_per_tensor_meta( + input: torch.Tensor, + scale: float, + zero_point: int, + quant_min: int, + quant_max: int, + dtype: torch.dtype +) -> torch.Tensor: + if input.dtype in [torch.float16, torch.bfloat16]: + input = input.to(torch.float32) + assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}" + return torch.empty_like(input, dtype=dtype) + +quantized_decomposed_lib.define( + "quantize_per_tensor.tensor(Tensor input, Tensor scale, Tensor zero_point, " + "int quant_min, int quant_max, ScalarType dtype) -> Tensor") + +@impl(quantized_decomposed_lib, "quantize_per_tensor.tensor", "CompositeExplicitAutograd") +def quantize_per_tensor_tensor( + input: torch.Tensor, + scale: torch.Tensor, + zero_point: torch.Tensor, + quant_min: int, + quant_max: int, + dtype: torch.dtype +) -> torch.Tensor: + """ Affine quantization for the Tensor using the same quantization parameters to map + from floating point to quantized values + Same as `quantize_per_tensor` but scale and zero_point are Scalar Tensor instead of + scalar values + """ + assert zero_point.numel() == 1, f"Expecting zero_point tensor to be one element, but received : {zero_point.numel()}" + assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}" + return quantize_per_tensor(input, scale.item(), zero_point.item(), quant_min, quant_max, dtype) + +@impl(quantized_decomposed_lib, "quantize_per_tensor.tensor", "Meta") +def quantize_per_tensor_tensor_meta( + input: torch.Tensor, + scale: torch.Tensor, + zero_point: torch.Tensor, + quant_min: int, + quant_max: int, + dtype: torch.dtype +) -> torch.Tensor: + if input.dtype in [torch.float16, torch.bfloat16]: + input = input.to(torch.float32) + assert zero_point.numel() == 1, f"Expecting zero_point tensor to be one element, but received : {zero_point.numel()}" + assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}" + assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}" + return torch.empty_like(input, dtype=dtype) + +# TODO: remove other variants and keep this one +quantized_decomposed_lib.define( + "quantize_per_tensor.tensor2(Tensor input, Tensor scale, Tensor zero_point, " + "Tensor quant_min, Tensor quant_max, ScalarType dtype) -> Tensor") + +@impl(quantized_decomposed_lib, "quantize_per_tensor.tensor2", "CompositeExplicitAutograd") +def quantize_per_tensor_tensor2( + input: torch.Tensor, + scale: torch.Tensor, + zero_point: torch.Tensor, + quant_min: torch.Tensor, + quant_max: torch.Tensor, + dtype: torch.dtype +) -> torch.Tensor: + """ Affine quantization for the Tensor using the same quantization parameters to map + from floating point to quantized values + Same as `quantize_per_tensor` but scale and zero_point are Scalar Tensor instead of + scalar values + """ + assert zero_point.numel() == 1, f"Expecting zero_point tensor to be one element, but received : {zero_point.numel()}" + assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}" + return quantize_per_tensor(input, scale.item(), zero_point.item(), quant_min.item(), quant_max.item(), dtype) + +@impl(quantized_decomposed_lib, "quantize_per_tensor.tensor2", "Meta") +def quantize_per_tensor_tensor2_meta( + input: torch.Tensor, + scale: torch.Tensor, + zero_point: torch.Tensor, + quant_min: torch.Tensor, + quant_max: torch.Tensor, + dtype: torch.dtype +) -> torch.Tensor: + return quantize_per_tensor_tensor_meta(input, scale, zero_point, quant_min, quant_max, dtype) + +# Note: quant_min/quant_max/dtype are not used in the operator, but for now it's kept in +# the signature as metadata for the input Tensor, this might be useful for pattern +# matching in the future +# We will revisit this later if we found there are no use cases for it +quantized_decomposed_lib.define( + "dequantize_per_tensor(Tensor input, float scale, int zero_point, " + "int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor") + +@impl(quantized_decomposed_lib, "dequantize_per_tensor", "CompositeExplicitAutograd") +def dequantize_per_tensor( + input: torch.Tensor, + scale: float, + zero_point: int, + quant_min: int, + quant_max: int, + dtype: torch.dtype, + *, + out_dtype: Optional[torch.dtype] = None +) -> torch.Tensor: + """ Affine dequantization for the Tensor using the same quantization parameters to map + from quantized values to floating point values + + Args: + input (torch.Tensor): Tensor with dtype matching `dtype` argument, + e.g. (`torch.uint8`), it is a per tensor quantized Tensor if combined with + quantization parameters in the argument of this function (scale/zero_point) + + scale (float): quantization parameter for affine quantization + + zero_point (int): quantization parameter for affine quantization + + quant_min (int): minimum quantized value for input Tensor (not used in computation, + reserved for pattern matching) + + quant_max (int): maximum quantized value for input Tensor (not used in computation, + reserved for pattern matching) + + dtype (torch.dtype): dtype for input Tensor (not used in computation, + reserved for pattern matching) + + out_dtype (torch.dtype?): optional dtype for output Tensor + + Returns: + dequantized float32 Tensor + """ + assert input.dtype == dtype, f"Expecting input to have dtype: {dtype}, but got {input.dtype}" + if out_dtype is None: + out_dtype = torch.float32 + if dtype in _DTYPE_TO_QVALUE_BOUNDS: + # TODO: investigate why + # (input - zero_point).to(torch.float32) * scale + # failed the test + return (input.to(out_dtype) - zero_point) * scale + else: + raise ValueError(f"Unsupported dtype in dequantize_per_tensor: {dtype}") + +@impl(quantized_decomposed_lib, "dequantize_per_tensor", "Meta") +def dequantize_per_tensor_meta( + input: torch.Tensor, + scale: torch.Tensor, + zero_point: torch.Tensor, + quant_min: int, + quant_max: int, + dtype: torch.dtype, + *, + out_dtype: Optional[torch.dtype] = None +) -> torch.Tensor: + if out_dtype is None: + out_dtype = torch.float32 + return torch.empty_like(input, dtype=out_dtype) + +quantized_decomposed_lib.define( + "dequantize_per_tensor.tensor(Tensor input, Tensor scale, Tensor zero_point, " + "int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor") + +@impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor", "CompositeExplicitAutograd") +def dequantize_per_tensor_tensor( + input: torch.Tensor, + scale: torch.Tensor, + zero_point: torch.Tensor, + quant_min: int, + quant_max: int, + dtype: torch.dtype, + *, + out_dtype: Optional[torch.dtype] = None +) -> torch.Tensor: + """ Affine dequantization for the Tensor using the same quantization parameters to map + from quantized values to floating point values + Same as `dequantize_per_tensor` but scale and zero_point are Scalar Tensor instead of + scalar values + """ + assert zero_point.numel() == 1, f"Expecting zero_point tensor to be one element, but received : {zero_point.numel()}" + assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}" + return dequantize_per_tensor(input, scale.item(), zero_point.item(), quant_min, quant_max, dtype, out_dtype=out_dtype) + +@impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor", "Meta") +def dequantize_per_tensor_tensor_meta( + input: torch.Tensor, + scale: torch.Tensor, + zero_point: torch.Tensor, + quant_min: int, + quant_max: int, + dtype: torch.dtype, + *, + out_dtype: Optional[torch.dtype] = None +) -> torch.Tensor: + if out_dtype is None: + out_dtype = torch.float32 + assert zero_point.numel() == 1, f"Expecting zero_point tensor to be one element, but received : {zero_point.numel()}" + assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}" + assert input.dtype == dtype, f"Expecting input to have dtype: {dtype}" + if dtype in _DTYPE_TO_QVALUE_BOUNDS: + return torch.empty_like(input, dtype=out_dtype) + else: + raise ValueError(f"Unsupported dtype in dequantize_per_tensor: {dtype}") + +# TODO: remove other variants and keep this one +quantized_decomposed_lib.define( + "dequantize_per_tensor.tensor2(Tensor input, Tensor scale, Tensor zero_point, " + "Tensor quant_min, Tensor quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor") + +@impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor2", "CompositeExplicitAutograd") +def dequantize_per_tensor_tensor2( + input: torch.Tensor, + scale: torch.Tensor, + zero_point: torch.Tensor, + quant_min: torch.Tensor, + quant_max: torch.Tensor, + dtype: torch.dtype, + *, + out_dtype: Optional[torch.dtype] = None +) -> torch.Tensor: + """ Affine dequantization for the Tensor using the same quantization parameters to map + from quantized values to floating point values + Same as `dequantize_per_tensor` but scale and zero_point are Scalar Tensor instead of + scalar values + """ + assert zero_point.numel() == 1, f"Expecting zero_point tensor to be one element, but received : {zero_point.numel()}" + assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}" + return dequantize_per_tensor( + input, scale.item(), zero_point.item(), quant_min.item(), quant_max.item(), dtype, out_dtype=out_dtype) + +@impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor2", "Meta") +def dequantize_per_tensor_tensor2_meta( + input, + scale, + zero_point, + quant_min, + quant_max, + dtype, + *, + out_dtype: Optional[torch.dtype] = None +) -> torch.Tensor: + return dequantize_per_tensor_tensor_meta(input, scale, zero_point, quant_min, quant_max, dtype, out_dtype=out_dtype) + +quantized_decomposed_lib.define( + "choose_qparams.tensor(Tensor input, int quant_min, int quant_max, " + "float eps, ScalarType dtype) -> (Tensor, Tensor)") + +@impl(quantized_decomposed_lib, "choose_qparams.tensor", "CompositeExplicitAutograd") +def choose_qparams_tensor( + input: torch.Tensor, + qmin: int, + qmax: int, + eps: float, + dtype: torch.dtype +) -> Tuple[torch.Tensor, torch.Tensor]: + """ Given an input Tensor, derive the per tensor affine quantization parameter + (scale and zero_point) for target quantized Tensor from the Tensor + + Args: + input (torch.Tensor): floating point input Tensor + quant_min (int): minimum quantized value for target quantized Tensor + quant_max (int): maximum quantized value for target quantized Tensor + dtype (torch.dtype): dtype for target quantized Tensor + + Returns: + scale (float): quantization parameter for the target quantized Tensor + zero_point (int): quantization parameter for the target quantized Tensor + """ + assert input.dtype in [ + torch.float32, + torch.float16, + torch.bfloat16, + ], f"Expecting input to have dtype torch.float32/16/b16, but got dtype: {input.dtype}" + assert dtype in _DTYPE_TO_QVALUE_BOUNDS, \ + f"Expecting target dtype to be one of {_DTYPE_TO_QVALUE_BOUNDS.keys()}, but got: {dtype}" + validate_qmin_qmax(qmin, qmax) + + min_val, max_val = torch.aminmax(input) + + return determine_qparams( + min_val, max_val, qmin, qmax, dtype, torch.Tensor([eps]), has_customized_qrange=False) + +quantized_decomposed_lib.define( + "choose_qparams_symmetric.tensor(Tensor input, int quant_min, int quant_max, " + "float eps, ScalarType dtype) -> (Tensor, Tensor)") + +@impl(quantized_decomposed_lib, "choose_qparams_symmetric.tensor", "CompositeExplicitAutograd") +def choose_qparams_symmetric_tensor( + input: torch.Tensor, + qmin: int, + qmax: int, + eps: float, + dtype: torch.dtype +) -> Tuple[torch.Tensor, torch.Tensor]: + """ Given an input Tensor, derive the per tensor affine quantization parameter + (scale and zero_point) for target quantized Tensor from the Tensor + + Args: + input (torch.Tensor): floating point input Tensor + quant_min (int): minimum quantized value for target quantized Tensor + quant_max (int): maximum quantized value for target quantized Tensor + dtype (torch.dtype): dtype for target quantized Tensor + + Returns: + scale (float): quantization parameter for the target quantized Tensor + zero_point (int): quantization parameter for the target quantized Tensor + """ + assert input.dtype in [ + torch.float32, + torch.float16, + torch.bfloat16, + ], f"Expecting input to have dtype torch.float32/16/b16, but got dtype: {input.dtype}" + assert dtype in _DTYPE_TO_QVALUE_BOUNDS, \ + f"Expecting target dtype to be one of {_DTYPE_TO_QVALUE_BOUNDS.keys()}, but got: {dtype}" + validate_qmin_qmax(qmin, qmax) + + min_val, max_val = torch.aminmax(input) + return determine_qparams( + min_val, + max_val, + qmin, + qmax, + dtype, + torch.Tensor([eps]), + has_customized_qrange=False, + qscheme=torch.per_tensor_symmetric + ) + +@impl(quantized_decomposed_lib, "choose_qparams.tensor", "Meta") +def choose_qparams_tensor_meta( + input: torch.Tensor, + quant_min: int, + quant_max: int, + eps: float, + dtype: torch.dtype +) -> Tuple[torch.Tensor, torch.Tensor]: + assert input.dtype in [ + torch.float32, + torch.float16, + torch.bfloat16, + ], f"Expecting input to have dtype torch.float32/16/b16, but got dtype: {input.dtype}" + assert quant_min < quant_max, f"Expecting quant_min to be smaller than quant_max but received min: \ + {quant_min} max: {quant_max}" + return torch.empty(1, dtype=torch.double, device=input.device), torch.empty(1, dtype=torch.int64, device=input.device) + +@impl(quantized_decomposed_lib, "choose_qparams_symmetric.tensor", "Meta") +def choose_qparams_symmetric_tensor_meta( + input: torch.Tensor, + quant_min: int, + quant_max: int, + eps: float, + dtype: torch.dtype +) -> Tuple[torch.Tensor, torch.Tensor]: + return torch.empty(1, dtype=torch.double, device=input.device), torch.empty(1, dtype=torch.int64, device=input.device) + +# Helper function used to implement per-channel quantization against any axis +def _permute_to_axis_zero(x, axis): + new_axis_list = list(range(x.dim())) + new_axis_list[axis] = 0 + new_axis_list[0] = axis + y = x.permute(tuple(new_axis_list)) + return y, new_axis_list + +quantized_decomposed_lib.define( + "quantize_per_channel(Tensor input, Tensor scales, Tensor zero_points, int axis, " + "int quant_min, int quant_max, ScalarType dtype) -> Tensor") + +@impl(quantized_decomposed_lib, "quantize_per_channel", "CompositeExplicitAutograd") +def quantize_per_channel( + input: torch.Tensor, + scales: torch.Tensor, + zero_points: torch.Tensor, + axis: int, + quant_min: int, + quant_max: int, + dtype: torch.dtype +) -> torch.Tensor: + """ Affine per channel quantization for the Tensor using the same quantization + parameters for each channel/axis to map from floating point to quantized values + + Args: + input (torch.Tensor): original float32 or bfloat16 Tensor + scales (torch.Tensor): a list of scale quantization parameter for + affine quantization, one per channel + zero_point (torch.Tensor): a list of zero_point quantization parameter for + affine quantization, one per channel + quant_min (int): minimum quantized value for output Tensor + quant_max (int): maximum quantized value for output Tensor + dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor + + Returns: + Tensor with requested dtype (e.g. torch.uint8), note the quantization parameters + are not stored in the Tensor, we are storing them in function arguments instead + """ + if input.dtype in [torch.float16, torch.bfloat16]: + input = input.to(torch.float32) + assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}" + assert axis < input.dim(), f"Expecting axis to be < {input.dim()}" + _quant_min_max_bounds_check(quant_min, quant_max, dtype) + input, permute_axis_list = _permute_to_axis_zero(input, axis) + res = torch.zeros_like(input) + + for i in range(input.size(0)): + res[i] = torch.clamp( + torch.round(input[i] * (1.0 / scales[i])) + zero_points[i], + quant_min, + quant_max + ) + + out = res.permute(tuple(permute_axis_list)) + return out.to(dtype) + +@impl(quantized_decomposed_lib, "quantize_per_channel", "Meta") +def quantize_per_channel_meta( + input: torch.Tensor, + scales: torch.Tensor, + zero_points: torch.Tensor, + axis: int, + quant_min: int, + quant_max: int, + dtype: torch.dtype +) -> torch.Tensor: + if input.dtype in [torch.float16, torch.bfloat16]: + input = input.to(torch.float32) + assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}" + assert axis < input.dim(), f"Expecting axis to be < {input.dim()}" + _quant_min_max_bounds_check(quant_min, quant_max, dtype) + return torch.empty_like(input, dtype=dtype) + +# Note: quant_min/quant_max/dtype are not used in the operator, but for now it's kept in +# the signature as metadata for the input Tensor, this might be useful for pattern +# matching in the future +# We will revisit this later if we found there are no use cases for it +quantized_decomposed_lib.define( + "dequantize_per_channel(Tensor input, Tensor scales, Tensor? zero_points, int axis, " + "int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor") + +@impl(quantized_decomposed_lib, "dequantize_per_channel", "CompositeExplicitAutograd") +def dequantize_per_channel( + input: torch.Tensor, + scales: torch.Tensor, + zero_points: Optional[torch.Tensor], + axis: int, + quant_min: int, + quant_max: int, + dtype: torch.dtype, + *, + out_dtype: Optional[torch.dtype] = None +) -> torch.Tensor: + """ Affine per channel dequantization for the Tensor using the same quantization + parameters for each channel/axis to map from quantized values to floating point values + + Args: + input (torch.Tensor): Tensor with dtype matching `dtype` argument, + e.g. (`torch.uint8`), it is a per channel quantized Tensor if combined with + quantization parameter in the argument of this function (scales/zero_points/axis) + + scales (torch.Tensor): a list of scale quantization parameter for + affine quantization, one per channel + + zero_points (torch.Tensor): a list of zero_point quantization parameter for + affine quantization, one per channel + + quant_min (int): minimum quantized value for output Tensor (not used in computation, + reserved for pattern matching) + + quant_max (int): maximum quantized value for output Tensor (not used in computation, + reserved for pattern matching) + + dtype (torch.dtype): requested dtype for output Tensor (not used in computation, + reserved for pattern matching) + + out_dtype (torch.dtype?): optional dtype for output Tensor + + Returns: + dequantized float32 Tensor + """ + assert input.dtype == dtype, f"Expecting input to have dtype {dtype}, but got dtype: {input.dtype}" + if out_dtype is None: + out_dtype = torch.float32 + assert axis < input.dim(), f"Expecting axis to be < {input.dim()}" + _quant_min_max_bounds_check(quant_min, quant_max, dtype) + input, permute_axis_list = _permute_to_axis_zero(input, axis) + res = torch.zeros_like(input, dtype=out_dtype) + + for i in range(input.size(0)): + zp = zero_points[i] if zero_points is not None else 0 + # TODO: investigate why + # (input[i] - zero_points[i]).to(out_dtype) * scales[i] + # failed the test + res[i] = (input[i].to(out_dtype) - zp) * scales[i] + + out = res.permute(tuple(permute_axis_list)) + return out + +@impl(quantized_decomposed_lib, "dequantize_per_channel", "Meta") +def dequantize_per_channel_meta( + input: torch.Tensor, + scales: torch.Tensor, + zero_points: Optional[torch.Tensor], + axis: int, + quant_min: int, + quant_max: int, + dtype: torch.dtype, + *, + out_dtype: Optional[torch.dtype] = None +) -> torch.Tensor: + assert input.dtype == dtype, f"Expecting input to have dtype {dtype}, but got dtype: {input.dtype}" + if out_dtype is None: + out_dtype = torch.float32 + assert axis < input.dim(), f"Expecting axis to be < {input.dim()}" + _quant_min_max_bounds_check(quant_min, quant_max, dtype) + return torch.empty_like(input, dtype=out_dtype) + + +quantized_decomposed_lib.define( + "choose_qparams_per_token(Tensor input, ScalarType dtype) -> (Tensor, Tensor)" +) + + +@impl( + quantized_decomposed_lib, + "choose_qparams_per_token", + "CompositeExplicitAutograd", +) +def choose_qparams_per_token( + input: torch.Tensor, + dtype: torch.dtype, +) -> Tuple[torch.Tensor, torch.Tensor]: + """Choose quantization parameters for per token quantization. This means for a N dimension Tensor + (M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize + every N elements with the same quantization parameter. The dimension for scales/zero_points + will be (M1 * M2 ... * Mn) + + Args: + input (torch.Tensor): original float32/float16 Tensor + dtype (torch.dtype): dtype (e.g. torch.uint8) for input Tensor + + Returns: + scales and zero_points, both float32 Tensors + """ + + scales = input.abs().amax(dim=-1, keepdim=True) + if scales.dtype == torch.float16: + scales = ( + scales.float() + ) # want float scales to avoid overflows for fp16, (bf16 has wide enough range) + if dtype == torch.int8: + n_bits = 8 + quant_max = 2 ** (n_bits - 1) - 1 + else: + raise Exception(f"unsupported dtype in choose_qparams_per_token: {dtype}") # noqa: TRY002 + + scales = scales.clamp(min=1e-5).div(quant_max) + zero_points = torch.zeros_like(scales) + return scales, zero_points + + +@impl( + quantized_decomposed_lib, + "choose_qparams_per_token", + "Meta", +) +def choose_qparams_per_token_meta( + input: torch.Tensor, + dtype: torch.dtype, +) -> Tuple[torch.Tensor, torch.Tensor]: + size = (1, input.size(-1)) + return torch.empty(size, dtype=torch.double, device=input.device), torch.empty( + size, dtype=torch.int64, device=input.device + ) + + +quantized_decomposed_lib.define( + "_choose_qparams_per_token_asymmetric_impl(Tensor input, ScalarType dtype) -> (Tensor, Tensor)" +) + + +@impl( + quantized_decomposed_lib, + "_choose_qparams_per_token_asymmetric_impl", + "CompositeImplicitAutograd", +) +def _choose_qparams_per_token_asymmetric_impl( + input: torch.Tensor, + dtype: torch.dtype, +) -> Tuple[torch.Tensor, torch.Tensor]: + """Choose quantization parameters for per token quantization. This means for a N dimension Tensor + (M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize + every N elements with the same quantization parameter. The dimension for scales/zero_points + will be (M1 * M2 ... * Mn) + + Args: + input (torch.Tensor): original float32/float16 Tensor + dtype (torch.dtype): dtype (e.g. torch.uint8) for input Tensor + + Returns: + scales and zero_points, both float32 Tensors + """ + # Based on https://github.com/google/XNNPACK/blob/df156f0cf3db5a4576cc711123eeb54915f82ffc/src/xnnpack/quantization.h#L18 + qmin, qmax = -128, 127 + min_val = torch.amin(input, dim=-1, keepdim=True) + max_val = torch.amax(input, dim=-1, keepdim=True) + min_val_neg = torch.min(min_val, torch.zeros_like(min_val)) + max_val_pos = torch.max(max_val, torch.zeros_like(max_val)) + eps = torch.finfo(torch.float32).eps # use xnnpack eps? + + # scale + scale = (max_val_pos - min_val_neg) / float(qmax - qmin) + scale = scale.clamp(min=eps) + + # zero point + descaled_min = min_val_neg / scale + descaled_max = max_val_pos / scale + zero_point_from_min_error = qmin + descaled_min + zero_point_from_max_error = qmax + descaled_max + zero_point = torch.where( + zero_point_from_min_error + zero_point_from_max_error > 0, + qmin - descaled_min, + qmax - descaled_max, + ) + zero_point = torch.clamp(zero_point, qmin, qmax).round() + + return scale.to(torch.float32), zero_point.to(torch.float32) + + +quantized_decomposed_lib.define( + "choose_qparams_per_token_asymmetric(Tensor input, ScalarType dtype) -> (Tensor, Tensor)" +) + + +@impl( + quantized_decomposed_lib, + "choose_qparams_per_token_asymmetric", + "CompositeExplicitAutograd", +) +def choose_qparams_per_token_asymmetric( + input: torch.Tensor, + dtype: torch.dtype, +) -> Tuple[torch.Tensor, torch.Tensor]: + return _choose_qparams_per_token_asymmetric_impl(input, dtype) + + +@impl( + quantized_decomposed_lib, + "choose_qparams_per_token_asymmetric", + "Meta", +) +def choose_qparams_per_token_asymmetric_meta( + input: torch.Tensor, + dtype: torch.dtype, +) -> Tuple[torch.Tensor, torch.Tensor]: + size = (1, input.size(-1)) + return torch.empty(size, dtype=torch.double, device=input.device), torch.empty( + size, dtype=torch.int64, device=input.device + ) + + +def _per_token_quant_qparam_dim_check(input, scales, zero_points): + num_tokens = math.prod(list(input.size())[:-1]) + assert ( + num_tokens == scales.numel() + ), f"num_tokens: {num_tokens} scales: {scales.size()}" + assert ( + num_tokens == zero_points.numel() + ), f"num_tokens: {num_tokens} zero_points: {zero_points.size()}" + + +quantized_decomposed_lib.define( + "quantize_per_token(Tensor input, Tensor scales, Tensor zero_points, " + "int quant_min, int quant_max, ScalarType dtype) -> Tensor" +) + + +@impl(quantized_decomposed_lib, "quantize_per_token", "CompositeExplicitAutograd") +def quantize_per_token( + input: torch.Tensor, + scales: torch.Tensor, + zero_points: torch.Tensor, + quant_min: int, + quant_max: int, + dtype: torch.dtype, +): + """Per token quantization for the Tensor using the quantization parameters to map + from floating point to quantized values. This means for a N dimension Tensor + (M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize + every N elements with the same quantization parameter. The dimension for scales/zero_points + will be (M1 * M2 ... * Mn) + + Args: + input (torch.Tensor): original float32 or bfloat16 Tensor + scales (float32 torch.Tensor): quantization parameter for per token affine quantization + zero_points (int32 torch.Tensor): quantization parameter for per token affine quantization + quant_min (int): minimum quantized value for output Tensor + quant_max (int): maximum quantized value for output Tensor + dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor + + Returns: + Tensor with requested dtype (e.g. torch.uint8), note the quantization parameters + are not stored in the Tensor, we are storing them in function arguments instead + """ + _quant_min_max_bounds_check(quant_min, quant_max, dtype) + _per_token_quant_qparam_dim_check(input, scales, zero_points) + input = ( + input.mul(1.0 / scales).add(zero_points).round().clamp(quant_min, quant_max).to(dtype) + ) + return input + + +@impl(quantized_decomposed_lib, "quantize_per_token", "Meta") +def quantize_per_token_meta( + input: torch.Tensor, + scales: torch.Tensor, + zero_points: torch.Tensor, + quant_min: int, + quant_max: int, + dtype: torch.dtype, +): + _quant_min_max_bounds_check(quant_min, quant_max, dtype) + return torch.empty_like(input, dtype=dtype) + + +quantized_decomposed_lib.define( + "dequantize_per_token(Tensor input, Tensor scales, Tensor zero_points, " + "int quant_min, int quant_max, ScalarType dtype, ScalarType output_dtype) -> Tensor" +) + + +@impl(quantized_decomposed_lib, "dequantize_per_token", "CompositeExplicitAutograd") +def dequantize_per_token( + input: torch.Tensor, + scales: torch.Tensor, + zero_points: torch.Tensor, + quant_min: int, + quant_max: int, + dtype: torch.dtype, + output_dtype: torch.dtype = torch.float32, +): + """Per token dequantization for the Tensor using the quantization parameters to map + from floating point to quantized values. This means for a N dimension Tensor + (M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize + every N elements with the same quantization parameter. The dimension for scales/zero_points + will be (M1 * M2 ... * Mn) + + Args: + input (torch.Tensor): quantized Tensor (uint8, int8 etc.) + scales (float32 torch.Tensor): quantization parameter for per token affine quantization + zero_points (int32 torch.Tensor): quantization parameter for per token affine quantization + quant_min (int): minimum quantized value for input Tensor + quant_max (int): maximum quantized value for input Tensor + dtype (torch.dtype): dtype (e.g. torch.uint8) for input Tensor + output_dtype (torch.dtype): dtype (e.g. torch.float32) for output Tensor + + Returns: + dequantized Tensor with dtype `output_dtype` + """ + input = input - zero_points + input = input.to(output_dtype) * scales + return input + + +@impl(quantized_decomposed_lib, "dequantize_per_token", "Meta") +def dequantize_per_token_meta( + input: torch.Tensor, + scales: torch.Tensor, + zero_points: torch.Tensor, + quant_min: int, + quant_max: int, + dtype: torch.dtype, + output_dtype: torch.dtype = torch.float32, +): + _quant_min_max_bounds_check(quant_min, quant_max, dtype) + # TODO: support fp16 + return torch.empty_like(input, dtype=output_dtype) + + +quantized_decomposed_lib.define( + "quantize_per_channel_group(Tensor input, Tensor scales, Tensor zero_points, int quant_min, " + "int quant_max, ScalarType dtype, int group_size) -> Tensor" +) + + +# TODO: dtype is ignored for now +@impl( + quantized_decomposed_lib, "quantize_per_channel_group", "CompositeExplicitAutograd" +) +def quantize_per_channel_group( + input: torch.Tensor, + scales: torch.Tensor, + zero_points: torch.Tensor, + quant_min: int, + quant_max: int, + dtype: torch.dtype, + group_size=128, +): + assert group_size > 1 + # needed for GPTQ single column quantize + if group_size > input.shape[-1] and scales.shape[-1] == 1: + group_size = input.shape[-1] + + assert input.shape[-1] % group_size == 0 + assert input.dim() == 2 + + # TODO: check for dtype, currently we can't express torch.int4 so it's omitted + to_quant = input.reshape(-1, group_size) + assert torch.isnan(to_quant).sum() == 0 + + scales = scales.reshape(-1, 1) + zero_points = zero_points.reshape(-1, 1) + + input_int8 = ( + to_quant.mul(1.0 / scales) + .add(zero_points) + .round() + .clamp_(quant_min, quant_max) + .to(dtype) + .reshape_as(input) + ) + + return input_int8 + + +@impl(quantized_decomposed_lib, "quantize_per_channel_group", "Meta") +def quantize_per_channel_group_meta( + input: torch.Tensor, + scales: torch.Tensor, + zero_points: torch.Tensor, + quant_min: int, + quant_max: int, + dtype: torch.dtype, + group_size=128, +): + """Groupwise quantization within each channel for an 2-d Tensor using the quantization parameters + to map from floating point to quantized values. This means for each row of a 2-d Tensor + (M, N), we calculate scales/zero_points for each `group_size` elements + and quantize every `group_size` elements with the same quantization parameter. + The dimension for scales/zero_points will be (M * ceil(N, group_size),) + + Args: + input (torch.Tensor): original float32 or bfloat16 Tensor + scales (float32 torch.Tensor): quantization parameter for per channel group affine quantization + zero_points (int32 torch.Tensor): quantization parameter for per channel group affine quantization + quant_min (int): minimum quantized value for output Tensor + quant_max (int): maximum quantized value for output Tensor + dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor + + Returns: + Tensor with requested dtype (e.g. torch.uint8), note the quantization parameters + are not stored in the Tensor, we are storing them in function arguments instead + """ + assert group_size > 1 + # needed for GPTQ single column quantize + if group_size > input.shape[-1] and scales.shape[-1] == 1: + group_size = input.shape[-1] + + assert input.shape[-1] % group_size == 0 + assert input.dim() == 2 + return torch.empty_like(input, dtype=dtype) + + +quantized_decomposed_lib.define( + "dequantize_per_channel_group(Tensor input, Tensor scales, Tensor? zero_points, int quant_min, " + "int quant_max, ScalarType dtype, int group_size, ScalarType output_dtype) -> Tensor" +) + + +@impl( + quantized_decomposed_lib, + "dequantize_per_channel_group", + "CompositeExplicitAutograd", +) +def dequantize_per_channel_group( + w_int8: torch.Tensor, + scales: torch.Tensor, + zero_points: Optional[torch.Tensor], + quant_min: int, + quant_max: int, + dtype: torch.dtype, + group_size: int = 128, + output_dtype: torch.dtype = torch.float32, +): + """Groupwise dequantization within each channel for an 2-d Tensor using the quantization parameters + to map from floating point to quantized values. This means for each row of a 2-d Tensor + (M, N), we calculate scales/zero_points for each `group_size` elements + and quantize every `group_size` elements with the same quantization parameter. + The dimension for scales/zero_points will be (M * ceil(N, group_size),) + + Args: + input (torch.Tensor): quantized Tensor (uint8/int8 etc.) + scales (float32 torch.Tensor): quantization parameter for per channel group affine quantization + zero_points (int32 torch.Tensor): quantization parameter for per channel group affine quantization + quant_min (int): minimum quantized value for input Tensor + quant_max (int): maximum quantized value for input Tensor + dtype (torch.dtype): dtype (e.g. torch.uint8) for input Tensor + output_dtype (torch.dtype): dtype (e.g. torch.float32) for output Tensor + + Returns: + dequantized Tensor with dtype `output_dtype` + """ + + assert group_size > 1 + # needed for GPTQ single column dequantize + if group_size > w_int8.shape[-1] and scales.shape[-1] == 1: + group_size = w_int8.shape[-1] + assert w_int8.shape[-1] % group_size == 0 + assert w_int8.dim() == 2 + + w_int8_grouped = w_int8.reshape(-1, group_size) + scales = scales.reshape(-1, 1) + if zero_points is not None: + zp = zero_points.reshape(-1, 1) + else: + zp = torch.zeros([], dtype=torch.int32, device=scales.device) + w_dq = w_int8_grouped.sub(zp).mul(scales).reshape_as(w_int8).to(output_dtype) + return w_dq + + +quantized_decomposed_lib.define( + "fake_quant_per_channel(Tensor input, Tensor scales, Tensor zero_points, int axis, " + "int quant_min, int quant_max) -> Tensor") + +class FakeQuantPerChannel(torch.autograd.Function): + @staticmethod + def forward(ctx, input, scales, zero_points, axis, quant_min, quant_max): + if scales.dtype != torch.float32: + scales = scales.to(torch.float32) + if zero_points.dtype != torch.int32: + zero_points = zero_points.to(torch.int32) + assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}" + assert axis < input.dim(), f"Expecting axis to be < {input.dim()}" + broadcast_dims = list(range(0, axis)) + list(range(axis + 1, input.ndim)) + unsqueeze_scales = _unsqueeze_multiple(scales, broadcast_dims) + unsqueeze_zero_points = _unsqueeze_multiple(zero_points, broadcast_dims) + temp = torch.round(input * (1.0 / unsqueeze_scales)) + unsqueeze_zero_points + out = (torch.clamp(temp, quant_min, quant_max) - unsqueeze_zero_points) * unsqueeze_scales + mask = torch.logical_and((temp >= quant_min), (temp <= quant_max)) + + ctx.save_for_backward(mask) + return out + + @staticmethod + def backward(ctx, gy): + mask, = ctx.saved_tensors + return gy * mask, None, None, None, None, None + +@impl(quantized_decomposed_lib, "fake_quant_per_channel", "Autograd") +def fake_quant_per_channel( + input: torch.Tensor, + scales: torch.Tensor, + zero_points: torch.Tensor, + axis: int, + quant_min: int, + quant_max: int, +) -> torch.Tensor: + return FakeQuantPerChannel.apply(input, scales, zero_points, axis, quant_min, quant_max) + +@impl(quantized_decomposed_lib, "fake_quant_per_channel", "Meta") +def fake_quant_per_channel_meta( + input: torch.Tensor, + scales: torch.Tensor, + zero_points: torch.Tensor, + axis: int, + quant_min: int, + quant_max: int, +) -> torch.Tensor: + return torch.empty_like(input) diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_lower_to_native_backend.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_lower_to_native_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..92620a169383423a7e392a6184a61d2de204278f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/_lower_to_native_backend.py @@ -0,0 +1,1177 @@ +# mypy: allow-untyped-defs +import torch +from torch.fx import map_arg, Node +from torch.fx.graph import Graph +import torch.nn as nn +import torch.nn.functional as F +import torch.ao.nn.intrinsic as nni +import torch.ao.nn.intrinsic.quantized as nniq +import torch.ao.nn.intrinsic.quantized.dynamic as nniqd +import torch.ao.nn.quantized as nnq +import torch.ao.nn.quantized.dynamic as nnqd +import torch.ao.nn.quantized.reference as nnqr +from torch.ao.nn.quantized.modules.utils import WeightedQuantizedModule +from torch.fx import GraphModule +from .utils import ( + collect_producer_nodes, + get_linear_prepack_op_for_dtype, + get_new_attr_name_with_prefix, + get_qconv_prepack_op, + graph_module_from_producer_nodes, +) +from ..utils import _parent_name +from ..qconfig import QConfigAny +from ..quantization_mappings import get_quantized_operator +from .utils import create_node_from_old_node_preserve_meta +from typing import Dict, Tuple, Type, List, Callable, Any, Union, Set, Optional +import operator + +QOP_TO_ARG_NAMES_TO_SKIP = { + torch._ops.ops.quantized.hardswish: ['inplace'], + torch._ops.ops.quantized.elu: ['inplace'], + torch._ops.ops.quantized.dropout: ['inplace'], + torch._ops.ops.quantized.instance_norm: + ['running_mean', 'running_var', 'use_input_stats', 'momentum'], +} + +def _is_node_in_list(node, modules, func_list, method_list, module_type_list): + is_call_function = node.op == "call_function" and node.target in func_list + is_call_method = node.op == "call_method" and node.target in method_list + is_call_module = node.op == "call_module" and type(modules[str(node.target)]) in module_type_list + return is_call_function, is_call_method, is_call_module + +def is_fixed_qparams_node(node, modules): + func_list = [ + torch.nn.functional.hardsigmoid, + torch.nn.functional.sigmoid, + torch.sigmoid, + torch.tanh, + ] + method_list = [ + "hardsigmoid", + "hardsigmoid_", + "sigmoid", + "sigmoid_", + "tanh", + "tanh_", + ] + module_type_list = [ + torch.nn.Hardsigmoid, + torch.nn.Sigmoid, + torch.nn.Tanh, + torch.nn.Softmax, + ] + return _is_node_in_list(node, modules, func_list, method_list, module_type_list) + +def is_default_node(node, modules): + func_list = [ + torch.nn.functional.elu, + torch.nn.functional.hardswish, + torch.nn.functional.instance_norm, + torch.nn.functional.layer_norm, + torch.nn.functional.leaky_relu, + torch.nn.functional.dropout, + ] + method_list: List[Any] = [] + module_type_list = [ + nnqr.ConvTranspose1d, + nnqr.ConvTranspose2d, + nnqr.ConvTranspose3d, + torch.nn.ELU, + torch.nn.LeakyReLU, + torch.nn.Hardswish, + torch.nn.InstanceNorm1d, + torch.nn.InstanceNorm2d, + torch.nn.InstanceNorm3d, + torch.nn.LayerNorm, + torch.nn.Dropout, + torch.nn.PReLU, + torch.nn.BatchNorm2d, + torch.nn.BatchNorm3d, + torch.ao.nn.intrinsic.BNReLU2d, + torch.ao.nn.intrinsic.BNReLU3d, + ] + return _is_node_in_list(node, modules, func_list, method_list, module_type_list) + +def is_copy_node(node, modules): + func_list = [ + torch.adaptive_avg_pool1d, + torch.nn.functional.adaptive_avg_pool2d, + torch.nn.functional.adaptive_avg_pool3d, + torch.nn.functional.hardtanh, + torch.nn.functional.hardtanh_, + torch.nn.functional.interpolate, + torch.nn.functional.max_pool1d, + torch.nn.functional.max_pool2d, + torch.nn.functional.max_pool3d, + torch.nn.functional.relu, + torch.nn.functional.relu6, + torch.avg_pool1d, + torch._C._nn.avg_pool2d, + torch._C._nn.avg_pool3d, + torch.clamp, + torch.flatten, + torch.mean, + operator.floordiv, + # F.channel_shuffle and torch.channel_shuffle are essentially the same thing + # so we only need to put one of them here + torch.channel_shuffle, + ] + method_list = [ + "clamp", + "mean", + "relu", + "relu_", + ] + module_type_list = [ + torch.nn.AdaptiveAvgPool1d, + torch.nn.AdaptiveAvgPool2d, + torch.nn.AdaptiveAvgPool3d, + torch.nn.AvgPool1d, + torch.nn.AvgPool2d, + torch.nn.AvgPool3d, + torch.nn.Hardtanh, + torch.nn.MaxPool1d, + torch.nn.MaxPool2d, + torch.nn.MaxPool3d, + torch.nn.ReLU, + torch.nn.ReLU6, + torch.nn.ChannelShuffle, + ] + return _is_node_in_list(node, modules, func_list, method_list, module_type_list) + +def is_general_tensor_shape_node(node, modules): + func_list = [ + torch.narrow, + torch.transpose, + torch.repeat_interleave, + torch.squeeze, + torch.stack, + torch.unsqueeze, + torch.nn.functional.pixel_shuffle, + torch.nn.functional.pixel_unshuffle, + ] + method_list = [ + "contiguous", + "detach", + "detach_", + "permute", + "repeat", + "repeat_interleave", + "reshape", + "resize_", + "shape", + "size", + "squeeze", + "squeeze_", + "transpose", + "unsqueeze", + "unsqueeze_", + "view", + ] + module_type_list = [ + torch.nn.Identity, + torch.nn.PixelShuffle, + torch.nn.PixelUnshuffle, + ] + return _is_node_in_list(node, modules, func_list, method_list, module_type_list) + +def is_other_node(node, modules): + func_list = [ + torch.cat, + ] + method_list: List[Any] = [] + module_type_list: List[Any] = [] + return _is_node_in_list(node, modules, func_list, method_list, module_type_list) + +def is_special_pattern_node(node, modules): + res_function, res_method, res_module = False, False, False + for checker in [is_fixed_qparams_node, is_default_node, is_copy_node, is_general_tensor_shape_node, is_other_node]: + is_call_function, is_call_method, is_call_module = checker(node, modules) + res_function = res_function or is_call_function + res_method = res_method or is_call_method + res_module = res_module or is_call_module + return res_function, res_method, res_module + +def is_dequantize_node(node): + return isinstance(node, Node) and node.op == "call_method" and node.target == "dequantize" + +def is_getattr_tensor_metadata_node(node): + return node.op == "call_function" and \ + node.target == getattr and \ + node.args[1] in ["shape"] + +def is_get_tensor_info_node(node): + return node.op == "call_method" and \ + node.target in ["shape", "size"] + +def should_skip_lowering(op: torch.fx.node.Node, qconfig_map: Dict[str, QConfigAny]): + """ + Return True if the op is configured with a None qconfig, False otherwise. + Note: maybe need to generalize this to also check for the dtype, and we + only lower when dtype matches, but right now fbgemm/qnnpack only support + a single dtype, so it is OK for now. + """ + return op.name in qconfig_map and qconfig_map[op.name] is None + +# Mapping from reference module class to the replacement static quantized module class for lowering +STATIC_LOWER_MODULE_MAP: Dict[Type[nn.Module], Type[WeightedQuantizedModule]] = { + nnqr.Linear: nnq.Linear, + nnqr.Conv1d: nnq.Conv1d, + nnqr.Conv2d: nnq.Conv2d, + nnqr.Conv3d: nnq.Conv3d, +} + +# Mapping from reference module class to the replacement dynamic quantized module class for lowering +DYNAMIC_LOWER_MODULE_MAP: Dict[Type[nn.Module], Type[nn.Module]] = { + nnqr.Linear: nnqd.Linear, + nnqr.GRUCell: nnqd.GRUCell, + nnqr.LSTMCell: nnqd.LSTMCell, + nnqr.RNNCell: nnqd.RNNCell, + nnqr.LSTM: nnqd.LSTM, + nnqr.GRU: nnqd.GRU, +} + +# Mapping from reference module class to the replacement weight only quantized module class for lowering +# TODO: correct the namespace for these modules +WEIGHT_ONLY_LOWER_MODULE_MAP: Dict[Type[nn.Module], Type[nn.Module]] = { + nnqr.Embedding: nnq.Embedding, + nnqr.EmbeddingBag: nnq.EmbeddingBag, +} + +# TODO: merge with STATIC_LOWER_MODULE_MAP after we merge +# _lower_static_weighted_ref_module and special_pattern_replacement +SPECIAL_PATTERN_LOWER_MODULE_MAP = { + nn.BatchNorm2d: nnq.BatchNorm2d, + nn.BatchNorm3d: nnq.BatchNorm3d, + nnqr.ConvTranspose1d: nnq.ConvTranspose1d, + nnqr.ConvTranspose2d: nnq.ConvTranspose2d, + nnqr.ConvTranspose3d: nnq.ConvTranspose3d, + nn.ELU: nnq.ELU, + nn.LeakyReLU: nnq.LeakyReLU, + nn.Hardswish: nnq.Hardswish, + nn.InstanceNorm1d: nnq.InstanceNorm1d, + nn.InstanceNorm2d: nnq.InstanceNorm2d, + nn.InstanceNorm3d: nnq.InstanceNorm3d, + nn.LayerNorm: nnq.LayerNorm, + nn.Dropout: nnq.Dropout, + nn.Softmax: nnq.Softmax, + nn.PReLU: nnq.PReLU, + nni.BNReLU2d: nniq.BNReLU2d, + nni.BNReLU3d: nniq.BNReLU3d, +} + +# Mapping from fused module class to a 2-tuple of: +# 1) The inner reference module class +# 2) The replacement static quantized module class for lowering +STATIC_LOWER_FUSED_MODULE_MAP: Dict[Type[nn.Module], Tuple[Type[nn.Module], Type[WeightedQuantizedModule]]] = { + nni.LinearReLU: (nnqr.Linear, nniq.LinearReLU), + # TODO: LinearLeakyReLU is registered as global but it is only fused and + # lowered when ondnn's backend config is used. Maybe need to separate + # registration and lowering functions for different backends in the future. + nni.LinearLeakyReLU: (nnqr.Linear, nniq.LinearLeakyReLU), + nni.LinearTanh: (nnqr.Linear, nniq.LinearTanh), + nni.ConvReLU1d: (nnqr.Conv1d, nniq.ConvReLU1d), + nni.ConvReLU2d: (nnqr.Conv2d, nniq.ConvReLU2d), + nni.ConvReLU3d: (nnqr.Conv3d, nniq.ConvReLU3d), +} + +# The difference between STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP and STATIC_LOWER_FUSED_MODULE_MAP: +# The refer node inside STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP has 2 inputs. +# Mapping from fused module class to a 2-tuple of: +# 1) The inner reference module class +# 2) The replacement static quantized module class for lowering +STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP: Dict[Type[nn.Module], Tuple[Type[nn.Module], Type[WeightedQuantizedModule]]] = { + nni.ConvAdd2d: (nnqr.Conv2d, nniq.ConvAdd2d), + nni.ConvAddReLU2d: (nnqr.Conv2d, nniq.ConvAddReLU2d), +} + +# Mapping from fused module class to a 2-tuple of: +# 1) The inner reference module class +# 2) The replacement dynamic quantized module class for lowering +DYNAMIC_LOWER_FUSED_MODULE_MAP: Dict[Type[nn.Module], Tuple[Type[nn.Module], Type[nn.Module]]] = { + nni.LinearReLU: (nnqr.Linear, nniqd.LinearReLU), +} + +# Mapping from a functional to lower to a 2-tuple of +# 1) The quantized version of the op +# 2) The quantized version of the op fused with relu, if it exists, else None +STATIC_LOWER_FUNCTIONAL_MAP: Dict[Callable, Tuple[Callable, Optional[Callable]]] = { + F.linear: (torch.ops.quantized.linear, torch.ops.quantized.linear_relu), + F.conv1d: (torch.ops.quantized.conv1d, torch.ops.quantized.conv1d_relu), + F.conv2d: (torch.ops.quantized.conv2d, torch.ops.quantized.conv2d_relu), + F.conv3d: (torch.ops.quantized.conv3d, torch.ops.quantized.conv3d_relu), + F.conv_transpose1d: (torch.ops.quantized.conv_transpose1d, None), + F.conv_transpose2d: (torch.ops.quantized.conv_transpose2d, None), + F.conv_transpose3d: (torch.ops.quantized.conv_transpose3d, None), +} + +WEIGHT_PREPACK_OPS: Set[Callable] = { + torch._ops.ops.quantized.linear_prepack, + torch._ops.ops.quantized.linear_prepack_fp16, + torch._ops.ops.quantized.conv1d_prepack, + torch._ops.ops.quantized.conv2d_prepack, + torch._ops.ops.quantized.conv3d_prepack, + torch.ops.quantized.conv_transpose1d_prepack, + torch.ops.quantized.conv_transpose2d_prepack, + torch.ops.quantized.conv_transpose3d_prepack, +} + +# Mapping from a functional to a dictionary, where the key is a 2-tuple of +# (input_activation_dtype, weight_dtype) and the value is a 2-tuple of +# 1) The dynamically quantized version of the op +# 2) The dynamically quantized version of the op fused with relu, if it exists, else None +DYNAMIC_LOWER_FUNCTIONAL_MAP: Dict[Callable, Dict[Tuple[torch.dtype, torch.dtype], Tuple[Callable, Optional[Callable]]]] = { + F.linear: { + (torch.quint8, torch.qint8): (torch.ops.quantized.linear_dynamic, + torch.ops.quantized.linear_relu_dynamic), + (torch.float16, torch.float16): (torch.ops.quantized.linear_dynamic_fp16, + torch.ops.quantized.linear_relu_dynamic_fp16) + }, + # dynamic conv + relu is not available yet + F.conv1d: { + (torch.quint8, torch.qint8): (torch.ops.quantized.conv1d_dynamic, None), + }, + F.conv2d: { + (torch.quint8, torch.qint8): (torch.ops.quantized.conv2d_dynamic, None), + }, + F.conv3d: { + (torch.quint8, torch.qint8): (torch.ops.quantized.conv3d_dynamic, None), + }, +} + +CONV_FUNCTIONAL_OPS: Set[Callable] = { + F.conv1d, + F.conv2d, + F.conv3d, +} + +CONV_TRANSPOSE_FUNCTIONAL_OPS: Set[Callable] = { + F.conv_transpose1d, + F.conv_transpose2d, + F.conv_transpose3d, +} + +# TODO: add tests for lowering these ops +QBIN_OP_MAPPING: Dict[Union[Callable, str], Callable] = { + operator.add: torch.ops.quantized.add, + torch.add: torch.ops.quantized.add, + operator.mul: torch.ops.quantized.mul, + operator.matmul: torch.ops.quantized.matmul, + torch.mul: torch.ops.quantized.mul, + torch.matmul: torch.ops.quantized.matmul, +} +QBIN_RELU_OP_MAPPING: Dict[Union[Callable, str], Callable] = { + operator.add: torch.ops.quantized.add_relu, + torch.add: torch.ops.quantized.add_relu, + operator.mul: torch.ops.quantized.mul_relu, + torch.mul: torch.ops.quantized.mul_relu, +} + +def _save_packed_weight(self, destination, prefix, keep_vars): + for attr_name in dir(self): + if "_packed_weight" in attr_name and \ + isinstance(getattr(self, attr_name), torch._C.ScriptObject): # type: ignore[attr-defined] + packed_weight = getattr(self, attr_name) + destination[prefix + attr_name] = packed_weight + +def _load_packed_weight(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + attrs_to_pop = [] + for attr_name in state_dict: + if attr_name.startswith("_packed_weight") and isinstance(state_dict[attr_name], torch._C.ScriptObject): # type: ignore[attr-defined] # noqa: B950 + setattr(self, attr_name, state_dict[attr_name]) + attrs_to_pop.append(attr_name) + + # pop the packed param attributesn + for attr_name in attrs_to_pop: + state_dict.pop(attr_name) + +def fold_weight( + quantized_model: GraphModule, + node_name_to_scope: Dict[str, Tuple[str, type]] +) -> GraphModule: + """ + Trace back from the weight node util we hit getattr, reconstruct the + graph module with the traced nodes and run the graph module to pack the + weight. then replace the original chain of ops with the packed weight. + """ + packed_weights = {} + # map from folded node name to the prepacked weight name + folded_nodes = {} + # get packed weights + for node in quantized_model.graph.nodes: + if node.op == 'call_function' and node.target in WEIGHT_PREPACK_OPS: + nodes_to_fold = collect_producer_nodes(node) + if nodes_to_fold is not None: + for node_to_fold in nodes_to_fold: + folded_nodes[node_to_fold.name] = node + + prepacking_module = graph_module_from_producer_nodes( + quantized_model, nodes_to_fold) + packed_weight = prepacking_module() + packed_weights[node.name] = packed_weight + + # remove folded nodes and replace the prepacking node with getattr + folded_graph = Graph() + env: Dict[Any, Any] = {} + + def load_arg(a): + return map_arg(a, lambda node: env[node.name]) + + for node in quantized_model.graph.nodes: + prepack_node = folded_nodes.get(node.name, None) + if prepack_node is node: + packed_weight = packed_weights[node.name] + # add a prepacked attribute to root + op_node = next(iter(prepack_node.users)) + module_path, _ = node_name_to_scope[op_node.name] + get_new_packed_weight_name = \ + get_new_attr_name_with_prefix(module_path + '_packed_weight_') + packed_weight_name = get_new_packed_weight_name(quantized_model) + setattr(quantized_model, packed_weight_name, packed_weight) + # replace prepack node with a getattr node + env[node.name] = folded_graph.create_node( + 'get_attr', packed_weight_name, (), {}) + elif prepack_node is not None: + # remove the foled node + continue + else: + # copy other nodes + env[node.name] = folded_graph.node_copy(node, load_arg) + + quantized_model = GraphModule(quantized_model, folded_graph) + quantized_model._register_state_dict_hook(_save_packed_weight) + quantized_model._register_load_state_dict_pre_hook(_load_packed_weight, with_module=True) + return quantized_model + +def _get_module(node: Node, modules: Dict[str, nn.Module]) -> Optional[nn.Module]: + """ + Return the `torch.nn.Module` that corresponds to the specified node's target. + If no such node exists, return None. + """ + if node.op == "call_module" and str(node.target) in modules: + return modules[str(node.target)] + else: + return None + +def _match_static_pattern( + node: Node, + modules: Dict[str, nn.Module], + qconfig_map: Dict[str, QConfigAny], + matching_modules_or_ops: List[Callable], + dequantize_node_arg_indices: List[int] +) -> Union[Tuple[Node, Node, Node], Tuple[None, None, None]]: + """ + Match the pattern (dequantize - ref node - quantize) against the node provided. + + If there is a match, return a 3-tuple of: + 1) q_node: the quantize node, + 2) relu_node: a relu node wrapping the ref_node, and + 3) ref_node: a reference module or functional node to replace with its quantized counterpart + Otherwise, if there is no match, return a 3-tuple of (None, None, None). + + Parameters: + node: The `torch.fx.Node` to match against. + modules: A mapping from node names to modules in the model graph, used for module lookup. + qconfig_map: A mapping from node names to the qconfigs associated with the nodes. + If the corresponding qconfig for the reference node is None, then return no match. + matching_modules_or_ops: Either a list of functions or a list of `torch.nn.Module`s. + If the reference node is not in this list, then return no match. + dequantize_node_arg_indices: A list of indices in the reference node args where dequantize + nodes may be present. An empty list means skipping the check for dequantize nodes. + """ + SKIP_LOWERING_VALUE = (None, None, None) + + # Match quantize node + if node.op != "call_function" or node.target != torch.quantize_per_tensor: + return SKIP_LOWERING_VALUE + q_node = node + ref_node = q_node.args[0] + assert isinstance(ref_node, Node) + + # Handle cases where the node is wrapped in a ReLU + if (ref_node.op == "call_function" and ref_node.target in (F.relu, torch.relu)) or\ + (ref_node.op == "call_module" and type(_get_module(ref_node, modules)) == nn.ReLU): + relu_node = ref_node + ref_node = relu_node.args[0] + assert isinstance(ref_node, Node) + else: + relu_node = None + if should_skip_lowering(ref_node, qconfig_map): + return SKIP_LOWERING_VALUE + + # Match reference module or functional + if isinstance(matching_modules_or_ops[0], type) and issubclass(matching_modules_or_ops[0], nn.Module): + expected_op = "call_module" + match_key = type(_get_module(ref_node, modules)) + else: + expected_op = "call_function" + match_key = ref_node.target + if ref_node.op != expected_op or match_key not in matching_modules_or_ops: + return SKIP_LOWERING_VALUE + + # Match dequantize node(s). Both of the following conditions must pass: + # (1) All `torch.fx.Node`s at the matching indices must be a dequantize node + # (2) There must be at least one dequantize node + matched_dequantize = False + for i in dequantize_node_arg_indices: + assert i < len(ref_node.args), \ + f"Dequantize index {i} exceeded reference node's arg length {len(ref_node.args)}" + arg = ref_node.args[i] + if is_dequantize_node(arg): + matched_dequantize = True + elif isinstance(arg, Node): + return SKIP_LOWERING_VALUE + if not matched_dequantize: + return SKIP_LOWERING_VALUE + + return (q_node, relu_node, ref_node) + +def _match_static_pattern_with_two_inputs( + node: Node, + modules: Dict[str, nn.Module], + qconfig_map: Dict[str, QConfigAny], + matching_modules_or_ops: List[Callable] +) -> Union[Tuple[Node, Node], Tuple[None, None]]: + """ + (dequantize \ + Match the pattern (dequantize - ref node - quantize) against the node provided. + + If there is a match, return a 2-tuple of: + 1) q_node: the quantize node, + 2) ref_node: a reference module or functional node to replace with its quantized counterpart + Otherwise, if there is no match, return a 2-tuple of (None, None). + + Parameters: + node: The `torch.fx.Node` to match against. + modules: A mapping from node names to modules in the model graph, used for module lookup. + qconfig_map: A mapping from node names to the qconfigs associated with the nodes. + If the corresponding qconfig for the reference node is None, then return no match. + matching_modules_or_ops: Either a list of functions or a list of `torch.nn.Module`s. + If the reference node is not in this list, then return no match. + """ + SKIP_LOWERING_VALUE = (None, None) + + # Match quantize node + if node.op != "call_function" or node.target != torch.quantize_per_tensor: + return SKIP_LOWERING_VALUE + q_node = node + ref_node = q_node.args[0] + assert isinstance(ref_node, Node) + + if should_skip_lowering(ref_node, qconfig_map): + return SKIP_LOWERING_VALUE + + # Match reference module or functional + if isinstance(matching_modules_or_ops[0], type) and issubclass(matching_modules_or_ops[0], nn.Module): + expected_op = "call_module" + match_key = type(_get_module(ref_node, modules)) + else: + # This pass only support op of "call_module" + return SKIP_LOWERING_VALUE + + if ref_node.op != expected_op or match_key not in matching_modules_or_ops: + return SKIP_LOWERING_VALUE + + # Check ref_node has 2 input nodes, both are dq node. + if len(ref_node.args) != 2: + return SKIP_LOWERING_VALUE + for i in range(len(ref_node.args)): + arg = ref_node.args[i] + if not is_dequantize_node(arg): + return SKIP_LOWERING_VALUE + + return (q_node, ref_node) + +def _lower_static_weighted_ref_module( + model: GraphModule, + qconfig_map: Dict[str, QConfigAny]): + """ + Traverse the graph and find dequantize - ref module - quantize patterns + and replace them with the quantized version of the ref module. + """ + modules = dict(model.named_modules(remove_duplicate=False)) + nodes = list(model.graph.nodes) + for n in model.graph.nodes: + # Step 0: Find nodes that match this pattern (dequantize - ref module - quantize) + matching_modules = list(STATIC_LOWER_MODULE_MAP.keys()) + list(STATIC_LOWER_FUSED_MODULE_MAP.keys()) + (q_node, relu_node, ref_node) = _match_static_pattern( + n, modules, qconfig_map, matching_modules, dequantize_node_arg_indices=[0]) # type: ignore[arg-type] + if q_node is None: + continue + assert ref_node is not None + (_, scale_node, zero_point_node, _) = q_node.args + ref_module = _get_module(ref_node, modules) + ref_class = type(ref_module) + assert isinstance(scale_node, Node) + assert isinstance(zero_point_node, Node) + assert issubclass(ref_class, nn.Module) + + # Step 1: Change this pattern to use the corresponding quantized module + # For fused modules, we also check whether the inner module is a reference module + # If so, we replace the entire fused module with the corresponding quantized module + if ref_class in STATIC_LOWER_FUSED_MODULE_MAP: + inner_ref_class, q_class = STATIC_LOWER_FUSED_MODULE_MAP[ref_class] + if type(ref_module[0]) != inner_ref_class: # type: ignore[index] + continue + else: + q_class = STATIC_LOWER_MODULE_MAP[ref_class] + output_scale = getattr(model, scale_node.target) + output_zero_point = getattr(model, zero_point_node.target) + q_module = q_class.from_reference(ref_module, output_scale, output_zero_point) + # replace reference module with quantized module + parent_name, module_name = _parent_name(ref_node.target) + setattr(modules[parent_name], module_name, q_module) + + # Step 2: Reroute around dq_node, and remove q_node and its args + assert len(ref_node.args) == 1 + dq_node = ref_node.args[0] + assert isinstance(dq_node, Node) + ref_node.replace_input_with(dq_node, dq_node.args[0]) + q_node.replace_all_uses_with(ref_node) + model.graph.erase_node(q_node) + model.graph.erase_node(scale_node) + model.graph.erase_node(zero_point_node) + +def _lower_static_weighted_ref_module_with_two_inputs( + model: GraphModule, + qconfig_map: Dict[str, QConfigAny]): + """ + Traverse the graph and find patterns + dequantize dequantize + \\ // + ref module + \\ + quantize + and replace them with the quantized version of the ref module. + """ + modules = dict(model.named_modules(remove_duplicate=False)) + nodes = list(model.graph.nodes) + for n in model.graph.nodes: + # (dequantize \ + # Step 0: Find nodes that match this pattern (dequantize - ref module - quantize) + matching_modules = list(STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP.keys()) + (q_node, ref_node) = _match_static_pattern_with_two_inputs( + n, modules, qconfig_map, matching_modules) # type: ignore[arg-type] + if q_node is None: + continue + assert ref_node is not None + (_, scale_node, zero_point_node, _) = q_node.args + ref_module = _get_module(ref_node, modules) + ref_class = type(ref_module) + assert isinstance(scale_node, Node) + assert isinstance(zero_point_node, Node) + assert issubclass(ref_class, nn.Module) + + # Step 1: Change this pattern to use the corresponding quantized module + # For fused modules, we also check whether the inner module is a reference module + # If so, we replace the entire fused module with the corresponding quantized module + if ref_class in STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP: + inner_ref_class, q_class = STATIC_LOWER_FUSED_MODULE_TWO_INPUTS_MAP[ref_class] + if type(ref_module[0]) != inner_ref_class: # type: ignore[index] + continue + else: + continue + output_scale = getattr(model, scale_node.target) + output_zero_point = getattr(model, zero_point_node.target) + q_module = q_class.from_reference(ref_module, output_scale, output_zero_point) + # replace reference module with quantized module + parent_name, module_name = _parent_name(ref_node.target) + setattr(modules[parent_name], module_name, q_module) + + # Step 2: Reroute around dq_node, and remove q_node and its args + assert len(ref_node.args) == 2 + for arg in ref_node.args: + if not is_dequantize_node(arg): + continue + dq_node = arg + assert isinstance(dq_node, Node) + ref_node.replace_input_with(dq_node, dq_node.args[0]) + + q_node.replace_all_uses_with(ref_node) + model.graph.erase_node(q_node) + model.graph.erase_node(scale_node) + model.graph.erase_node(zero_point_node) + +def _lower_dynamic_weighted_ref_module(model: GraphModule): + """ + Traverse the graph and find quantize_per_tensor_dynamic - dequantize - ref_module patterns + and replace them with the dynamically quantized version of the ref module. + """ + named_modules = dict(model.named_modules(remove_duplicate=False)) + for n in model.graph.nodes: + if n.op != "call_module" or \ + type(named_modules[str(n.target)]) not in \ + set(DYNAMIC_LOWER_MODULE_MAP.keys()).union( + set(DYNAMIC_LOWER_FUSED_MODULE_MAP.keys())): + continue + ref_node = n + dq_node = ref_node.args[0] + if dq_node.op != "call_method" or dq_node.target != "dequantize": + continue + + input_dynamic_q_node = dq_node.args[0] + + if input_dynamic_q_node.op != "call_function" or \ + input_dynamic_q_node.target != torch.quantize_per_tensor_dynamic: + continue + + activation_dtype = input_dynamic_q_node.args[1] + is_fp16 = activation_dtype == torch.float16 + is_int8 = activation_dtype in [torch.quint8, torch.qint8] + if not is_int8 and not is_fp16: + continue + + ref_module = named_modules[str(ref_node.target)] + ref_class = type(ref_module) + if ref_class in DYNAMIC_LOWER_FUSED_MODULE_MAP: + inner_ref_class, q_class = DYNAMIC_LOWER_FUSED_MODULE_MAP[ref_class] + if type(ref_module[0]) != inner_ref_class: + continue + else: + q_class = DYNAMIC_LOWER_MODULE_MAP.get(ref_class) # type: ignore[assignment] + # TODO: maybe define a WeightedDynamicallyQuantizedModule + q_module = q_class.from_reference(ref_module) # type: ignore[attr-defined] + + # replace reference module with dynamically quantized module + parent_name, module_name = _parent_name(ref_node.target) + setattr(named_modules[parent_name], module_name, q_module) + ref_node.replace_input_with(dq_node, input_dynamic_q_node.args[0]) + +def _lower_weight_only_weighted_ref_module(model: GraphModule): + """ + Traverse the graph and find ref_module patterns + and replace them with the weight only quantized version of the ref module. + """ + named_modules = dict(model.named_modules(remove_duplicate=False)) + for n in model.graph.nodes: + if n.op != "call_module" or \ + type(named_modules[str(n.target)]) not in \ + set(WEIGHT_ONLY_LOWER_MODULE_MAP.keys()): + continue + ref_node = n + ref_module = named_modules[str(ref_node.target)] + ref_class = type(ref_module) + q_class = WEIGHT_ONLY_LOWER_MODULE_MAP.get(ref_class) + # TODO: WeightedQuantizedModule is currently assuming static quant apis + # with output_scale, output_zero_point in from_reference, we may want to + # relax that, or rename this + # TODO: maybe define a WeightedWeightOnlyQuantizedModule + q_module = q_class.from_reference(ref_module) # type: ignore[union-attr] + + # replace reference module with dynamically quantized module + parent_name, module_name = _parent_name(ref_node.target) + setattr(named_modules[parent_name], module_name, q_module) + +def _lower_static_weighted_ref_functional( + model: GraphModule, + qconfig_map: Dict[str, QConfigAny]): + """ + Traverse the graph and replace functional reference patterns with their quantized versions. + """ + modules = dict(model.named_modules(remove_duplicate=False)) + nodes = list(model.graph.nodes) + for n in model.graph.nodes: + # Step 0: Find nodes that match this pattern (dequantize - functional op - quantize) + matching_ops = list(STATIC_LOWER_FUNCTIONAL_MAP.keys()) + (q_node, relu_node, func_node) = _match_static_pattern( + n, modules, qconfig_map, matching_ops, dequantize_node_arg_indices=[0, 1]) + if q_node is None: + continue + assert func_node is not None + (_, output_scale_node, output_zp_node, _) = q_node.args + (input_dq_node, weight_dq_node, *remaining_func_args) = func_node.args + assert isinstance(output_zp_node, Node) + assert isinstance(input_dq_node, Node) + assert isinstance(weight_dq_node, Node) + quantized_weight = weight_dq_node.args[0] + assert isinstance(quantized_weight, Node) + if quantized_weight.op != "call_function" or\ + quantized_weight.target not in (torch.quantize_per_tensor, torch.quantize_per_channel): + continue + + # Step 1: Replace quantized weights with packed weights, which will be folded later + # Use the right prepack op and prepare the corresponding args + # Linear prepack args: (quantized weights[, bias]) + # Conv prepack args: (quantized weights[, bias, stride, padding, dilation, groups]) + prepack_args = [quantized_weight] + remaining_func_args + if func_node.target == F.linear: + weight_dtype = quantized_weight.args[-1] + prepack_op = get_linear_prepack_op_for_dtype(weight_dtype) + elif func_node.target in CONV_FUNCTIONAL_OPS: + prepack_op = get_qconv_prepack_op(func_node.target) # type: ignore[arg-type] + # For conv1d, the stride, padding, and dilation args may be ints, + # in which case we need to convert them to tuples + if func_node.target == F.conv1d: + for i in [2, 3, 4]: + if len(prepack_args) > i and isinstance(prepack_args[i], int): + prepack_args[i] = (prepack_args[i],) + elif func_node.target in CONV_TRANSPOSE_FUNCTIONAL_OPS: + prepack_op = get_qconv_prepack_op(func_node.target) # type: ignore[arg-type] + # For conv_transpose1d, the stride, padding, and dilation args may be ints, + # in which case we need to convert them to tuples + if func_node.target == F.conv_transpose1d: + # Note prepack_args[5] is groups. + for i in [2, 3, 4, 6]: + if len(prepack_args) > i and isinstance(prepack_args[i], int): + prepack_args[i] = (prepack_args[i],) + # swap dilation and groups + # prepack op has arguments: {w, b, stride, padding, output_padding, dilation, groups} + # transposed conv op has arguments: {x, w, b, stride, padding, output_padding, groups, dilation} + if (len(prepack_args) > 6): + prepack_args[5], prepack_args[6] = prepack_args[6], prepack_args[5] + else: + raise ValueError(f"Lowering is not supported for op '{func_node.target}'") + with model.graph.inserting_before(output_scale_node): + # kwargs of the func node are needed for prepack op (i.e., quantized::linear_prepack) + # They are not needed for compute op (i.e., quantized::linear) + kwargs = func_node.kwargs + # F.linear uses 'bias' key for bias while qlinear_prepack uses 'B' for bias + if func_node.target == F.linear and 'bias' in kwargs: + kwargs = kwargs.copy() + kwargs['B'] = kwargs['bias'] + del kwargs['bias'] + packed_weight = model.graph.create_node("call_function", prepack_op, tuple(prepack_args), kwargs) + + # Step 2: Replace reference pattern with the corresponding quantized op + (q_func, q_relu_func) = STATIC_LOWER_FUNCTIONAL_MAP[func_node.target] # type: ignore[index] + # conv_transpose does not support fusion with relu yet. q_relu_func is None in such cases + if q_relu_func is not None: + func_node.target = q_relu_func if relu_node is not None else q_func + else: + func_node.target = q_func + func_node.args = (input_dq_node.args[0], packed_weight, output_scale_node, output_zp_node) + # kwargs for func_node has been moved to kwargs for prepack op + func_node.kwargs = {} + q_node.replace_all_uses_with(func_node) + # Move func_node after output_zp_node in the graph + output_zp_node.append(func_node) + + # Clean up: Remove quantize node, and the relu node if it exists + model.graph.erase_node(q_node) + if relu_node is not None and q_relu_func is not None: + model.graph.erase_node(relu_node) + +def _lower_dynamic_weighted_ref_functional( + model: GraphModule, + qconfig_map: Dict[str, QConfigAny]): + """ + Traverse the graph and replace functional reference patterns with their dynamically + quantized versions. + Examples: + quantize_per_tensor_dynamic - dequantize - functional linear --> linear_dynamic + to(torch.float16) - dequantize - functional linear --> linear_dynamic_fp16 + """ + modules = dict(model.named_modules(remove_duplicate=False)) + nodes = list(model.graph.nodes) + # we want to search in reserved order so that we can match the larger patterns first + # e.g. we want to match linear - relu before linear. + for n in reversed(model.graph.nodes): + + # Step 0: Find nodes that match this pattern + # (quantize_per_tensor_dynamic - dequantize - dynamically quantized op) + # We search for the pattern backwards, starting with the quantize node + # Quantize node args: (func, scale, zp, dtype) + func_node = n + # Handle cases where the functional op is wrapped in a ReLU + if func_node.op == "call_function" and func_node.target == F.relu or \ + func_node.op == "call_module" and \ + type(modules[str(func_node.target)]) == torch.nn.ReLU: + relu_node = func_node + func_node = relu_node.args[0] + else: + relu_node = None + if should_skip_lowering(func_node, qconfig_map): + continue + # Linear args: (dequantized inputs, dequantized weights[, bias]) + # Conv args: (dequantized inputs, dequantized weights[, bias, stride, padding, dilation, groups]) + if func_node.op != "call_function" or func_node.target not in DYNAMIC_LOWER_FUNCTIONAL_MAP: + continue + (input_dq_node, weight_dq_node, *remaining_func_args) = func_node.args + if input_dq_node.op != "call_method" or input_dq_node.target != "dequantize" or \ + weight_dq_node.op != "call_method" or weight_dq_node.target != "dequantize": + continue + + input_dynamic_q_node = input_dq_node.args[0] + + if input_dynamic_q_node.op != "call_function" or \ + input_dynamic_q_node.target != torch.quantize_per_tensor_dynamic: + continue + + reduce_range_node = None + (pattern_input, activation_dtype, reduce_range_node) = input_dynamic_q_node.args + is_fp16 = activation_dtype == torch.float16 + is_int8 = activation_dtype in [torch.quint8, torch.qint8] + if not is_int8 and not is_fp16: + continue + + quantized_weight = weight_dq_node.args[0] + weight_dtype = quantized_weight.args[-1] + + # Step 1: Try to select reference pattern with the corresponding quantized op + dynamic_quant_dtype_key = (activation_dtype, weight_dtype) + if dynamic_quant_dtype_key not in DYNAMIC_LOWER_FUNCTIONAL_MAP[func_node.target]: + print(f"Didn't find dtype combination {dynamic_quant_dtype_key} during " + f"dynamic quantized op lowering for {func_node.target}") + continue + (q_func, q_relu_func) = DYNAMIC_LOWER_FUNCTIONAL_MAP[func_node.target][dynamic_quant_dtype_key] + + if q_func is None or q_relu_func is None: + print("Didn't find corresponding quantized function or quantized relu function " + f"for {func_node.target}, {dynamic_quant_dtype_key}") + continue + + # Step 2: Replace quantized weights with packed weights, which will be folded later + # Use the right prepack op and prepare the corresponding args + # Linear prepack args: (quantized weights[, bias]) + # Conv prepack args: (quantized weights[, bias, stride, padding, dilation, groups]) + prepack_args = [quantized_weight] + remaining_func_args + prepack_kwargs = {} + if func_node.target == F.linear: + prepack_op = get_linear_prepack_op_for_dtype(weight_dtype) + kwargs = func_node.kwargs.copy() + if 'bias' in kwargs: + prepack_kwargs['B'] = kwargs['bias'] + del kwargs['bias'] + func_node.kwargs = kwargs + elif func_node.target in CONV_FUNCTIONAL_OPS: + prepack_op = get_qconv_prepack_op(func_node.target) + # For conv1d, the stride, padding, and dilation args may be ints, + # in which case we need to convert them to tuples + if func_node.target == F.conv1d: + for i in [2, 3, 4]: + if len(prepack_args) > i and isinstance(prepack_args[i], int): + prepack_args[i] = (prepack_args[i],) + else: + raise ValueError(f"Lowering is not supported for op '{func_node.target}'") + with model.graph.inserting_before(func_node): + packed_weight = model.graph.create_node("call_function", prepack_op, tuple(prepack_args), prepack_kwargs) + + # Step 3: Replace reference pattern with the corresponding quantized op + func_node.target = q_relu_func if relu_node is not None else q_func + if is_int8: + func_node.args = (pattern_input, packed_weight, reduce_range_node) + else: + func_node.args = (pattern_input, packed_weight) + + if relu_node is not None: + relu_node.replace_all_uses_with(func_node) + + # Step 4: Remove the relu node if it exists + if relu_node is not None: + model.graph.erase_node(relu_node) + +def _lower_quantized_binary_op( + model: GraphModule, + qconfig_map: Dict[str, QConfigAny]): + binary_ops_to_lower: List[Callable] = [operator.add, torch.add, operator.mul, torch.mul, torch.matmul] + modules = dict(model.named_modules(remove_duplicate=False)) + for n in model.graph.nodes: + # Step 0: Find nodes that match this pattern (dequantize - ref module - quantize) + (q_node, relu_node, bop_node) = _match_static_pattern( + n, modules, qconfig_map, binary_ops_to_lower, dequantize_node_arg_indices=[0, 1]) + if q_node is None: + continue + assert bop_node is not None + (_, scale_node, zero_point_node, _) = q_node.args + + # Step 1: Remove dequant nodes + num_dq_nodes = 0 + for arg in bop_node.args: + if not is_dequantize_node(arg): + continue + dq_node = arg + assert isinstance(dq_node, Node) + dn_input = dq_node.args[0] + bop_node.replace_input_with(dq_node, dn_input) + num_dq_nodes += 1 + assert num_dq_nodes > 0 + + # Step 2: Swap binary op to quantized binary op + assert bop_node.target in QBIN_OP_MAPPING + binop_to_qbinop = QBIN_OP_MAPPING if relu_node is None else QBIN_RELU_OP_MAPPING + qbin_op = binop_to_qbinop[bop_node.target] + # prepare the args for quantized binary op + # (x, y) + qop_node_args = list(bop_node.args) + # (x, y, scale, zero_point) + # add scale and zero_point arguments for Tensor - Tensor operation + if num_dq_nodes == 2: + qop_node_args.extend([scale_node, zero_point_node]) + # insert a call to quantized binary op and remove the original binary op + with model.graph.inserting_after(q_node): + qop_node = create_node_from_old_node_preserve_meta( + model.graph, + ("call_function", qbin_op, tuple(qop_node_args), {}), + bop_node) + q_node.replace_all_uses_with(qop_node) + + # Step 3: Remove quantize node, binary op node, and relu node if any + model.graph.erase_node(q_node) + if relu_node is not None: + model.graph.erase_node(relu_node) + model.graph.erase_node(bop_node) + +def special_pattern_replacement(model: GraphModule): + modules = dict(model.named_modules(remove_duplicate=False)) + for n in model.graph.nodes: + q_node = n + is_quantize = q_node.target == torch.quantize_per_tensor + is_to_fp16 = q_node.op == "call_method" and q_node.target == "to" and \ + len(q_node.args) == 2 and q_node.args[1] == torch.float16 + if not (is_quantize or is_to_fp16): + continue + ref_node = q_node.args[0] + # get output scale/zero_point/dtype from the quantize node + # ref_node, scale_node, zero_point_node, dtype = q_node.args + # TODO: add safety checks that users for the ref_node and dq_node needs to be one + is_call_function, is_call_method, is_call_module = is_fixed_qparams_node(ref_node, modules) + if is_to_fp16 and (is_call_function or is_call_method or is_call_module): + # TODO: add a warning or error out here? (bc-breaking if error out) + # warnings.warn( + # "Only reference patterns are currently supported for {dtype} dtype with {op} op" + # "".format(dtype=dtypes, op=ref_node)) + continue + + is_call_function, is_call_method, is_call_module = is_default_node(ref_node, modules) + if is_to_fp16 and (is_call_function or is_call_method or is_call_module): + # TODO: add a warning or error out here? (bc-breaking if error out) + continue + + # This check includes all supported ops + is_call_function, is_call_method, is_call_module = is_special_pattern_node(ref_node, modules) + if not (is_call_module or is_call_function or is_call_method): + continue + assert len(ref_node.args) > 0 or len(ref_node.kwargs) > 0 + dq_node_or_nodes = ref_node.args[0] if len(ref_node.args) > 0 else next(iter(ref_node.kwargs.values())) + assert isinstance(dq_node_or_nodes, (Node, tuple, list)) + is_dequantize = False + if isinstance(dq_node_or_nodes, Node): + is_dequantize = dq_node_or_nodes.op == 'call_method' and \ + dq_node_or_nodes.target == 'dequantize' + elif isinstance(dq_node_or_nodes, (tuple, list)): + is_dequantize = all( + x.op == 'call_method' and x.target == 'dequantize' + for x in dq_node_or_nodes) + + if not is_dequantize: + continue + + # TODO: enable we have patterns that needs to swap the modules + if is_call_module: + ref_module = modules[ref_node.target] + if type(ref_module) in SPECIAL_PATTERN_LOWER_MODULE_MAP and is_quantize: + qmodule_cls = SPECIAL_PATTERN_LOWER_MODULE_MAP.get(type(ref_module)) + scale_node = q_node.args[1] + zero_point_node = q_node.args[2] + output_scale = getattr(model, scale_node.target) + output_zero_point = getattr(model, zero_point_node.target) + + qmodule = qmodule_cls.from_reference(ref_module, output_scale, output_zero_point) # type:ignore[union-attr] + # replace reference module with quantized module + parent_name, module_name = _parent_name(ref_node.target) + setattr(modules[parent_name], module_name, qmodule) + + # reroute around dq node: + dq_nodes: List[Node] = [] + if isinstance(dq_node_or_nodes, Node): + dq_nodes = [dq_node_or_nodes] + elif isinstance(dq_node_or_nodes, (tuple, list)): + dq_nodes = list(dq_node_or_nodes) + + for dq_node in dq_nodes: + dn_input = dq_node.args[0] + ref_node.replace_input_with(dq_node, dn_input) + + # store q node args + qnode_qparams = list(q_node.args)[1:] + # replace uses of q node with input and remove q node + q_node_input = q_node.args[0] + q_node.replace_all_uses_with(q_node_input) + model.graph.erase_node(q_node) + + is_call_function, is_call_method, is_call_module = is_default_node(ref_node, modules) + if is_call_function: + # pass scale/zer_point arguments from quantize_per_tensor to the default node operator + # insert an op after the zero_point node so that the scale/zero_point + # nodes are is available + qop = get_quantized_operator(ref_node.target) + args = list(ref_node.args) + kwargs = dict(ref_node.kwargs) + if qop in QOP_TO_ARG_NAMES_TO_SKIP: + args_to_skip = QOP_TO_ARG_NAMES_TO_SKIP[qop] + for arg in args_to_skip: + if arg in kwargs: + kwargs.pop(arg) + kwargs["output_scale"] = qnode_qparams[0] + kwargs["output_zero_point"] = qnode_qparams[1] + with model.graph.inserting_after(qnode_qparams[1]): + qop_node = create_node_from_old_node_preserve_meta( + model.graph, + ("call_function", qop, tuple(args), kwargs), + ref_node) + ref_node.replace_all_uses_with(qop_node) + model.graph.erase_node(ref_node) + else: + # remove scale/zero_point node for quantize node + for n in qnode_qparams: + if isinstance(n, Node): + model.graph.erase_node(n) + + return model + +def _lower_getattr_tensor_metadta_op(model: GraphModule): + """ Modified the graph of the model inplace, to skip extra dequantize op before + the general tensor shape ops when possible + """ + for n in model.graph.nodes: + if is_getattr_tensor_metadata_node(n): + maybe_dq = n.args[0] + if maybe_dq.op != "call_method" or maybe_dq.target != "dequantize": + continue + # skip the dequantize node + args = list(n.args) + args[0] = n.args[0].args[0] + n.args = tuple(args) + +def _lower_get_tensor_info_op(model: GraphModule): + """ Modified the graph of the model inplace, to skip extra dequantize op before + the general tensor shape ops when possible + """ + for n in model.graph.nodes: + if not is_get_tensor_info_node(n): + continue + maybe_dq = n.args[0] + if maybe_dq.op != "call_method" or maybe_dq.target != "dequantize": + continue + # skip the dequantize node + args = list(n.args) + args[0] = n.args[0].args[0] + n.args = tuple(args) + +def _lower_to_native_backend( + model: GraphModule, + qconfig_map: Dict[str, QConfigAny], + node_name_to_scope: Dict[str, Tuple[str, type]] +) -> GraphModule: + """ Lower a quantized reference model (with reference quantized operator patterns) + to the native backend in PyTorch (fbgemm/qnnpack), both backends shares the same + operator signature so they can be lowered with the same function + """ + _lower_static_weighted_ref_module(model, qconfig_map) + _lower_static_weighted_ref_module_with_two_inputs(model, qconfig_map) + _lower_dynamic_weighted_ref_module(model) + _lower_weight_only_weighted_ref_module(model) + _lower_static_weighted_ref_functional(model, qconfig_map) + _lower_dynamic_weighted_ref_functional(model, qconfig_map) + _lower_quantized_binary_op(model, qconfig_map) + _lower_getattr_tensor_metadta_op(model) + _lower_get_tensor_info_op(model) + special_pattern_replacement(model) + model.graph.eliminate_dead_code() + model = fold_weight(model, node_name_to_scope) + model.graph.eliminate_dead_code() + model.recompile() + model.graph.lint() + return model diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/convert.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/convert.py new file mode 100644 index 0000000000000000000000000000000000000000..6ca622cc4171a06287930723eefe3c882985af5b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/convert.py @@ -0,0 +1,1143 @@ +# mypy: ignore-errors + +from typing import Any, Dict, List, Optional, Set, Tuple, Union, Type, Callable +from torch.ao.quantization.quant_type import QuantType +import torch +import copy +import warnings +from torch.fx import ( + GraphModule, +) +from torch.fx.graph import ( + Graph, + Node, + Argument, +) +from ..utils import ( + activation_is_statically_quantized, + weight_is_quantized, + get_qparam_dict, + _parent_name, + get_swapped_custom_module_class, +) +from ..qconfig import ( + QConfigAny, + qconfig_equals +) +from ..qconfig_mapping import QConfigMapping +from .qconfig_mapping_utils import ( + _generate_node_name_to_qconfig, + _compare_prepare_convert_qconfig_mappings, + _update_qconfig_for_fusion, + _is_qconfig_supported_by_dtype_configs, + _update_qconfig_for_qat, +) +from torch.ao.quantization.backend_config.utils import ( + get_root_module_to_quantized_reference_module, + get_pattern_to_dtype_configs, + get_fused_module_classes, + get_qat_module_classes, +) +from torch.ao.quantization.backend_config import ( + BackendConfig, + get_native_backend_config, +) +from torch.ao.quantization.observer import _is_activation_post_process +from .graph_module import ( + _is_observed_module, + _is_observed_standalone_module, +) +from ._equalize import update_obs_for_equalization, convert_eq_obs +from torch.nn.utils.parametrize import type_before_parametrizations +from .utils import ( + _get_module, + _is_custom_module_lstm, + _is_custom_module_mha, + assert_and_get_unique_device, + get_custom_module_class_keys, + create_getattr_from_value, + collect_producer_nodes, + graph_module_from_producer_nodes, + node_arg_is_weight, +) +from torch.ao.quantization.utils import ( + is_per_channel, + to_underlying_dtype, +) +from torch.ao.quantization.quantize import ( + _remove_qconfig, +) +from torch.ao.quantization.stubs import DeQuantStub +from .custom_config import ( + ConvertCustomConfig, + PrepareCustomConfig, +) +from .lower_to_fbgemm import lower_to_fbgemm +# importing the lib so that the quantized_decomposed ops are registered +from ._decomposed import quantized_decomposed_lib # noqa: F401 +import operator + +__all__ = [ + "convert", + "convert_custom_module", + "convert_standalone_module", + "convert_weighted_module", +] + +SUPPORTED_QDTYPES = [ + torch.quint8, + torch.qint8, + torch.qint32, + torch.uint8, + torch.int8, + torch.int16, + torch.int32, + torch.float8_e5m2, + torch.float8_e4m3fn, +] + +_QSCHEME_TO_CHOOSE_QPARAMS_OP = { + torch.per_tensor_affine: torch.ops.quantized_decomposed.choose_qparams.tensor, + torch.per_tensor_symmetric: torch.ops.quantized_decomposed.choose_qparams_symmetric.tensor, +} + +def _replace_observer_with_quantize_dequantize_node_decomposed( + model: torch.fx.GraphModule, + node: Node, + modules: Dict[str, torch.nn.Module], + node_name_to_scope: Dict[str, Tuple[str, type]], + node_name_to_qconfig: Dict[str, QConfigAny]) -> None: + """ Replace activation_post_process module call node with quantize and + dequantize node working with decomposed Tensor + + Before: + ... -> observer_0(x) -> ... + After: + ... -> torch.ops.quantized_decomposed.quantize_per_tensor(x, ...) -> + torch.ops.quantized_decomposed.dequantize_per_tensor() -> ... + + or quantize_per_channel and dequantize_per_channel + """ + graph = model.graph + assert modules is not None + assert isinstance(node.target, str) + module_path, prefix = _get_module_path_and_prefix(node, node_name_to_scope, node_name_to_qconfig) + activation_post_process = modules[node.target] + if hasattr(activation_post_process, "convert"): + activation_post_process.convert(model, node) + return + # skip replacing observers to quant/dequant nodes if the qconfigs of all + # consumers and producers of this observer are None + skip_replacement = all(_has_none_qconfig(n, node_name_to_qconfig) for n in + list(node.args) + list(node.users.keys())) + if skip_replacement or not _is_conversion_supported(activation_post_process): + # didn't find corresponding quantize op and info for the activation_post_process + # so we just remove the observer + with graph.inserting_before(node): + node.replace_all_uses_with(node.args[0]) + graph.erase_node(node) + return + + # otherwise, we can convert the activation_post_process module call to quantize/dequantize node + + # 1. extract the information from activation_post_process module for generating + # the quantize and dequantize operator + dtype = activation_post_process.dtype # type: ignore[attr-defined] + + is_dynamic = False + if hasattr(activation_post_process, "is_dynamic"): + is_dynamic = activation_post_process.is_dynamic # type: ignore[assignment] + + if dtype in SUPPORTED_QDTYPES and (not is_dynamic): + # TODO: probably should cleanup this condition check, it's hard + # to reason about this if and the following elif + + # uint8/int8/int32 static quantization branch + + # 1. extract information for inserting q/dq node from activation_post_process + node_type = "call_function" + quantize_op : Optional[Callable] = None + scale, zero_point = activation_post_process.calculate_qparams() # type: ignore[attr-defined, operator] + if is_per_channel(activation_post_process.qscheme): # type: ignore[attr-defined] + ch_axis = int(activation_post_process.ch_axis) # type: ignore[attr-defined, arg-type] + quantize_op = torch.ops.quantized_decomposed.quantize_per_channel.default + dequantize_op = torch.ops.quantized_decomposed.dequantize_per_channel.default + quant_min = activation_post_process.quant_min + quant_max = activation_post_process.quant_max + dtype_ = to_underlying_dtype(dtype) + qparams = { + "_scale_": scale, + "_zero_point_": zero_point, + "_axis_": ch_axis, + "_quant_min_": quant_min, + "_quant_max_": quant_max, + "_dtype_": dtype_ + } + else: + quantize_op = torch.ops.quantized_decomposed.quantize_per_tensor.default + dequantize_op = torch.ops.quantized_decomposed.dequantize_per_tensor.default + scale = float(scale) + zero_point = int(zero_point) + quant_min = activation_post_process.quant_min # type: ignore[attr-defined] + quant_max = activation_post_process.quant_max # type: ignore[attr-defined] + dtype_ = to_underlying_dtype(dtype) + qparams = { + "_scale_": scale, + "_zero_point_": zero_point, + "_quant_min_": quant_min, + "_quant_max_": quant_max, + "_dtype_": dtype_ + } + + # 2. replace activation_post_process node with quantize and dequantize + with graph.inserting_before(node): + input_node = node.args[0] + quantize_op_inputs = [input_node] + for key, value_or_node in qparams.items(): + # TODO: we can add the information of whether a value needs to + # be registered as an attribute in qparams dict itself + if key in ['_scale_', '_zero_point_'] and (not isinstance(value_or_node, (float, int))): + # For scale and zero_point values we register them as buffers in the root module. + # However, note that when the values are not tensors, as in the case of + # per_tensor quantization, they will be treated as literals. + # However, registering them as a node seems to cause issue with dynamo + # tracing where it may consider tensor overload as opposed to default. + # With extra check of scale and zero_point being scalar, it makes + # sure that the default overload can be used. + # TODO: maybe need more complex attr name here + qparam_node = create_getattr_from_value( + model, graph, module_path + prefix + key, value_or_node) + quantize_op_inputs.append(qparam_node) + else: + # for qparams that are not scale/zero_point (like axis, dtype) we store them as literals in the graph. + quantize_op_inputs.append(value_or_node) + + quantized_node = graph.create_node(node_type, quantize_op, tuple(quantize_op_inputs), {}) + # use the same qparams from quantize op + dq_inputs = [quantized_node] + quantize_op_inputs[1:] + dequantized_node = graph.call_function( + dequantize_op, + tuple(dq_inputs), + {} + ) + + def remap_fn(x): + return dequantized_node if x is node else x + + # remap numeric_debug_handle + for user_node in node.users: + if "numeric_debug_handle" in user_node.meta: + numeric_debug_handle = user_node.meta["numeric_debug_handle"] + user_node.meta["numeric_debug_handle"] = {remap_fn(k): v for k, v in numeric_debug_handle.items()} + node.replace_all_uses_with(dequantized_node) + graph.erase_node(node) + elif is_dynamic: + + # uint8/int8/fp16 dynamic quantization + + # 1. extract information for inserting q/dq node from activation_post_process + node_type = "call_function" + quantize_op = torch.ops.quantized_decomposed.quantize_per_tensor.tensor + # we only use choose_qparams for is_decomposed now, + # but we should probably align the non-decomposed path with this as well, + # and that can be done after we remove reduce_range flag + # 1. extract qparams from activation_post_process module + dtype_ = to_underlying_dtype(dtype) + assert dtype_ in [torch.uint8, torch.int8], \ + "only uint8 and int8 are supported in reference flow for " \ + "dynamic quantization right now" + quant_min = activation_post_process.quant_min # type: ignore[attr-defined] + quant_max = activation_post_process.quant_max # type: ignore[attr-defined] + qscheme = getattr(activation_post_process, "qscheme", torch.per_tensor_affine) # type: ignore[attr-defined] + eps = getattr(activation_post_process, "eps", torch.finfo(torch.float32).eps) # type: ignore[attr-defined] + # note: scale and zero_point are missing for quantize_per_tensor op + # we'll need to get this from choose_qparams op, which we'll add after + # this step + qparams = { + "_quant_min_": quant_min, + "_quant_max_": quant_max, + "_eps_": eps, + "_dtype_": dtype_ + } + + choose_qparams_op = _QSCHEME_TO_CHOOSE_QPARAMS_OP[qscheme] + # 2. insert choose_qparams op and update the qparams list + with graph.inserting_before(node): + input_node = node.args[0] + choose_qparams_op_inputs = [node.args[0]] + for key, value in qparams.items(): + # we have quant_min, quant_max and dtype, all should be stored + # as literals + choose_qparams_op_inputs.append(value) + choose_qparams_node = graph.create_node( + "call_function", + choose_qparams_op, + tuple(choose_qparams_op_inputs), + {} + ) + # choose_qparms returns (scale, zero_point) + scale_node = graph.create_node( + "call_function", + operator.getitem, + (choose_qparams_node, 0), + {} + ) + zero_point_node = graph.create_node( + "call_function", + operator.getitem, + (choose_qparams_node, 1), + {} + ) + quant_min = qparams["_quant_min_"] + quant_max = qparams["_quant_max_"] + dtype = qparams["_dtype_"] + qparams = { + "_scale_": scale_node, + "_zero_point_": zero_point_node, + "_quant_min_": quant_min, + "_quant_max_": quant_max, + "_dtype_": dtype + } + + # 3. replace activation_post_process node to quantize and dequantize node + with graph.inserting_before(node): + input_node = node.args[0] + quantize_op_inputs = [input_node] + for key, value_or_node in qparams.items(): + # TODO: we can add the information of whether a value needs to + # be registered as an attribute in qparams dict itself + if key in ['_scale_', '_zero_point_']: + # in this case we have a node in the graph since it's dynamically + # computed from the input, with choose_qparams op + qparam_node = value_or_node + quantize_op_inputs.append(qparam_node) + else: + # for qparams that are not scale/zero_point (like axis, dtype) we + # store them as literals in the graph. + quantize_op_inputs.append(value_or_node) + + quantized_node = graph.create_node(node_type, quantize_op, tuple(quantize_op_inputs), {}) + # use the same qparams from quantize op + dq_inputs = [quantized_node] + quantize_op_inputs[1:] + # need to use the tensor variant of this op, since scale and zero_point + # from choose_qparam are Tensors, instead of float/int, this is to + # prevent these nodes being traced away by downstream systems + dequantize_op = torch.ops.quantized_decomposed.dequantize_per_tensor.tensor + dequantized_node = graph.call_function( + dequantize_op, + tuple(dq_inputs), + {} + ) + + def remap_fn(x): + return dequantized_node if x is node else x + + # remap numeric_debug_handle + for user_node in node.users: + if "numeric_debug_handle" in user_node.meta: + numeric_debug_handle = user_node.meta["numeric_debug_handle"] + user_node.meta["numeric_debug_handle"] = {remap_fn(k): v for k, v in numeric_debug_handle.items()} + node.replace_all_uses_with(dequantized_node) + graph.erase_node(node) + elif dtype == torch.float16: + raise NotImplementedError("decomposed to float16 op not implemented yet") + + # should not reach since we have checks in the beginning to make sure the + # activation_post_process is supported + +def _replace_observer_with_quantize_dequantize_node( + model: torch.fx.GraphModule, + node: Node, + modules: Dict[str, torch.nn.Module], + node_name_to_scope: Dict[str, Tuple[str, type]], + node_name_to_qconfig: Dict[str, QConfigAny]) -> None: + """ Replace activation_post_process module call node with quantize and + dequantize node + + Before: + ... -> observer_0(x) -> ... + After: + ... -> torch.quantize_per_tensor(x, ...) -> x.dequantize() -> ... + """ + assert modules is not None + assert isinstance(node.target, str) + graph = model.graph + module_path, prefix = _get_module_path_and_prefix(node, node_name_to_scope, node_name_to_qconfig) + activation_post_process = modules[node.target] + # skip replacing observers to quant/dequant nodes if the qconfigs of all + # consumers and producers of this observer are None + skip_replacement = all(_has_none_qconfig(n, node_name_to_qconfig) for n in + list(node.args) + list(node.users.keys())) + if skip_replacement or not _is_conversion_supported(activation_post_process): + # didn't find corresponding quantize op and info for the activation_post_process + # so we just remove the observer + with graph.inserting_before(node): + node.replace_all_uses_with(node.args[0]) + graph.erase_node(node) + return + + # otherwise, we can convert the activation_post_process module call to quantize/dequantize node + dtype = activation_post_process.dtype # type: ignore[attr-defined] + + is_dynamic = False + if hasattr(activation_post_process, "is_dynamic"): + is_dynamic = activation_post_process.is_dynamic # type: ignore[attr-defined, assignment] + + if dtype in [torch.quint8, torch.qint8, torch.qint32, torch.float8_e5m2, torch.float8_e4m3fn] and \ + (not is_dynamic): + # TODO: probably should cleanup this condition check, it's hard + # to reason about this if and the following elif + + # uint8/int8/int32 static quantization branch + + # 1. extract the information from activation_post_process module for generating + # the quantize and dequantize operator + node_type = "call_function" + quantize_op : Optional[Callable] = None + scale, zero_point = activation_post_process.calculate_qparams() # type: ignore[attr-defined, operator] + if is_per_channel(activation_post_process.qscheme): # type: ignore[attr-defined] + ch_axis = int(activation_post_process.ch_axis) # type: ignore[attr-defined, arg-type] + qparams = {"_scale_": scale, "_zero_point_": zero_point, "_axis_": ch_axis, "_dtype_": dtype} + quantize_op = torch.quantize_per_channel + else: + scale = float(scale) + zero_point = int(zero_point) + qparams = {"_scale_": scale, "_zero_point_": zero_point, "_dtype_": dtype} + quantize_op = torch.quantize_per_tensor + + # 2. replace activation_post_process node with quantize and dequantize + with graph.inserting_before(node): + input_node = node.args[0] + quantize_op_inputs = [input_node] + for key, value_or_node in qparams.items(): + # TODO: we can add the information of whether a value needs to + # be registered as an attribute in qparams dict itself + if key in ['_scale_', '_zero_point_']: + # For scale and zero_point values we register them as buffers in the root module. + # TODO: maybe need more complex attr name here + qparam_node = create_getattr_from_value( + model, graph, module_path + prefix + key, value_or_node) + quantize_op_inputs.append(qparam_node) + else: + # for qparams that are not scale/zero_point (like axis, dtype) we store them as literals in the graph. + quantize_op_inputs.append(value_or_node) + + quantized_node = graph.create_node(node_type, quantize_op, tuple(quantize_op_inputs), {}) + dequantized_node = graph.call_method("dequantize", args=(quantized_node,)) + node.replace_all_uses_with(dequantized_node) + graph.erase_node(node) + elif is_dynamic: + + # uint8/int8/fp16 dynamic quantization branch + + node_type = "call_function" + quantize_op = torch.quantize_per_tensor_dynamic + # TODO: get reduce range from observer + # reduce_range = activation_post_process.reduce_range + reduce_range = torch.backends.quantized.engine in ("fbgemm", "x86") + qparams = {"_dtype_": dtype, "_reduce_range_": reduce_range} + + with graph.inserting_before(node): + input_node = node.args[0] + quantize_op_inputs = [input_node] + for key, value in qparams.items(): + quantize_op_inputs.append(value) + + quantized_node = graph.create_node(node_type, quantize_op, tuple(quantize_op_inputs), {}) + dequantized_node = graph.call_method("dequantize", args=(quantized_node,)) + node.replace_all_uses_with(dequantized_node) + graph.erase_node(node) + elif dtype == torch.float16: + node_type = "call_method" + quantize_op = "to" # type: ignore[assignment] + qparams = {"_dtype_": dtype} + with graph.inserting_before(node): + input_node = node.args[0] + quantize_op_inputs = [input_node] + for key, value in qparams.items(): + # TODO: we can add the information of whether a value needs to + # be registered as an attribute in qparams dict itself + quantize_op_inputs.append(value) + + quantized_node = graph.create_node(node_type, quantize_op, tuple(quantize_op_inputs), {}) + dequantized_node = graph.call_method("dequantize", args=(quantized_node,)) + node.replace_all_uses_with(dequantized_node) + graph.erase_node(node) + + # should not reach since we have checks in the beginning to make sure the + # activation_post_process is supported + +# this is a temporary hack for custom module, we may want to implement +# this properly after the custom module class design is finalized +# TODO: DeQuantStubs are currently inserted only after custom module LSTM, while observers are inserted +# after all other custom modules. In the future, we should simply insert QuantStubs before and DeQuantStubs +# after custom modules in general, and replace these with "quantize" and "dequantize" nodes respectively. +def _replace_observer_or_dequant_stub_with_dequantize_node(node: Node, graph: Graph) -> None: + call_custom_module_node = node.args[0] + assert isinstance(call_custom_module_node, Node), \ + f"Expecting the for call custom module node to be a Node, but got {call_custom_module_node}" + node.replace_all_uses_with(call_custom_module_node) + graph.erase_node(node) + _insert_dequantize_node(call_custom_module_node, graph) + +def _is_conversion_supported(activation_post_process: torch.nn.Module) -> bool: + dtype = activation_post_process.dtype # type: ignore[attr-defined] + + is_dynamic = False + if hasattr(activation_post_process, "is_dynamic"): + is_dynamic = activation_post_process.is_dynamic # type: ignore[attr-defined, assignment] + + return ( + (dtype in SUPPORTED_QDTYPES and (not is_dynamic)) or # type: ignore[return-value] + is_dynamic or + dtype == torch.float16 + ) + +def _has_none_qconfig(node: Argument, node_name_to_qconfig: Dict[str, QConfigAny]) -> bool: + """ Check if a node has a qconfig of None, i.e. user requested to not quantize + the node + """ + return isinstance(node, Node) and node.name in node_name_to_qconfig and node_name_to_qconfig[node.name] is None + +def _run_weight_observers(observed: GraphModule, backend_config: BackendConfig) -> None: + """ Extract the subgraph that produces the weight for dynamic quant + or weight only quant node and run the subgraph to observe the weight. + Note that the observers of dynamic quant or weight only quant ops are + run during the convert step. + """ + for node in observed.graph.nodes: + if node.op != "call_function": + continue + for node_arg in node.args: + # node_arg is weight + if node_arg and node_arg_is_weight(node, node_arg): + weight_observer_nodes = collect_producer_nodes(node_arg) + if weight_observer_nodes is None: + continue + weight_observer_module = \ + graph_module_from_producer_nodes( + observed, weight_observer_nodes) + # run the weight observer + weight_observer_module() + +def _maybe_recursive_remove_dequantize(arg: Any, node: Node, graph: Graph) -> None: + """ If the arg is a dequantize Node, or a list/tuple/dict of dequantize Node, + we'll recursively remove the dequantize Node + """ + if isinstance(arg, Node) and \ + arg.op == "call_method" and \ + arg.target == "dequantize": + quantize_node = arg.args[0] + # we only replace the specific use since dequantize could be used by other nodes + # as well + node.replace_input_with(arg, quantize_node) + elif isinstance(arg, (list, tuple)): + for arg_element in arg: + _maybe_recursive_remove_dequantize(arg_element, node, graph) + elif isinstance(arg, dict): + for arg_element in arg.values(): + _maybe_recursive_remove_dequantize(arg_element, node, graph) + else: + warnings.warn(f"Unsupported node type in recursive remove dequantize: {type(arg)}") + +def _get_module_path_and_prefix( + obs_node: Node, + node_name_to_scope: Dict[str, Tuple[str, type]], + node_name_to_qconfig: Dict[str, QConfigAny]) -> Tuple[str, str]: + """ Given and observer node, get the `Scope` or the fully qualified name for + the submodule containing the observed node, also return a prefix of "_input" + when the observed node is an input of a F.linear op, and not the output of another + quantized op. + TODO: this logic is hacky, we should think about how to remove it or make it more + general + """ + observed_node = obs_node.args[0] + # an observer can be inserted for both input of the next operator or output of the previous + # operator (they can be the same) + # this flag identifies if the observer is inserted only because the observed node is + # the input of the next operator + assert isinstance(observed_node, Node), \ + f"Expecting observed node to be a Node, but got {observed_node}" + is_input_observer_only = node_name_to_qconfig[observed_node.name] is None \ + if observed_node.name in node_name_to_qconfig else None + if is_input_observer_only: + # if the quantize function is at the input of op, then we find the first user of the observer_node + # to get the path. If a linear call_function is in the user list, we return the first instance + # of linear node to get the FQN. + users = list(obs_node.users) + first_linear_use_or_first_use = users[0] if users else None + linear_node = None + for n in users: + if n.op == "call_function" and n.target == torch.nn.functional.linear: + linear_node = n + break + if linear_node: + first_linear_use_or_first_use = linear_node + prefix = "_input" + else: + # if the quantize function is at the output of the op, we use the observer input node to get the path + first_linear_use_or_first_use = observed_node + prefix = "" + + if first_linear_use_or_first_use and first_linear_use_or_first_use.name in node_name_to_scope: + module_path, _ = node_name_to_scope[first_linear_use_or_first_use.name] + else: + # TODO: it's not used, so actually we can skip quantization + # but this requires changing return type of quantize_node + # we can fix it later if needed + module_path = "" + return module_path, prefix + +def _insert_dequantize_node( + node: Node, + graph: Graph) -> None: + """ Inserts dequantize node for `node` in `graph` + """ + with graph.inserting_after(node): + dequantize_node = graph.call_method("dequantize", (node,)) + for user_node in dict(node.users): + if user_node is not dequantize_node: + user_node.replace_input_with(node, dequantize_node) + +def _maybe_get_observer_for_node( + node: Node, + modules: Dict[str, torch.nn.Module] +) -> Optional[torch.nn.Module]: + """ + If the node is observed, return the observer + instance. Otherwise, return None. + """ + for maybe_obs_node in node.users.keys(): + if maybe_obs_node.op == 'call_module': + maybe_obs = modules[str(maybe_obs_node.target)] + if _is_activation_post_process(maybe_obs): + return maybe_obs + return None + +def convert_standalone_module( + node: Node, + modules: Dict[str, torch.nn.Module], + model: torch.fx.GraphModule, + is_reference: bool, + backend_config: Optional[BackendConfig]) -> None: + """ Converts a observed standalone module to a quantized standalone module by calling + the fx convert api, currently using the same `is_reference` flag as parent, but we may + changing this behavior in the future (e.g. separating quantization and lowering for + standalone module as well) + + Args: + - node: The call_module node of the observed standalone module + - modules: named_module of original model + - model: original model + - is_reference: a flag from parent provided by user to decide if we want to + produce a reference model or a fbgemm/qnnpack model + - backend_config: backend configuration of the target backend of quantization + """ + # TODO: remove is_reference flag + if is_reference: + convert_fn = torch.ao.quantization.quantize_fx.convert_to_reference_fx + else: + convert_fn = torch.ao.quantization.quantize_fx.convert_fx # type: ignore[attr-defined] + # We know that observed standalone module is a GraphModule since + # it's produced by us + observed_standalone_module : GraphModule = modules[str(node.target)] # type: ignore[assignment] + sm_input_quantized_idxs = \ + observed_standalone_module \ + .meta["_observed_graph_module_attrs"].standalone_module_input_quantized_idxs + # remove the dequantize nodes for inputs + args = list(node.args) + for idx in range(len(args)): + if idx in sm_input_quantized_idxs: + arg = args[idx] + if arg.op == "call_method" and arg.target == "dequantize": # type: ignore[union-attr] + quantize_node = arg.args[0] # type: ignore[union-attr] + node.replace_input_with(arg, quantize_node) + if len(arg.users) == 0: # type: ignore[union-attr] + model.graph.erase_node(arg) + # add dequantize node for output + sm_output_quantized_idxs = \ + observed_standalone_module \ + .meta["_observed_graph_module_attrs"].standalone_module_output_quantized_idxs + if len(sm_output_quantized_idxs) > 0: + assert sm_output_quantized_idxs[0] == 0, "Currently only quantized" + "output idxs = [0] is supported" + + # if it's non-empty, then it means the output is kept in quantized form + # we'll just add a dequantize node after this node + _insert_dequantize_node(node, model.graph) + + # TODO: allow convert_custom_config to override backend_config + # for standalone module + quantized_standalone_module = convert_fn( + observed_standalone_module, + backend_config=backend_config) + parent_name, name = _parent_name(node.target) + # update the modules dict + setattr(modules[parent_name], name, quantized_standalone_module) + modules[str(node.target)] = quantized_standalone_module + +def convert_weighted_module( + node: Node, + modules: Dict[str, torch.nn.Module], + observed_node_names: Set[str], + node_name_to_qconfig: Dict[str, QConfigAny], + backend_config: BackendConfig, + is_decomposed: bool = False, + is_reference: bool = False, +) -> None: + """ Convert a weighted module to reference quantized module in the model + If the QConfig of a QAT module is not set, the module will still be converted to + a float module. + + Args: + - node: The call_module node of the observed standalone module + - modules: named_module of original model + - observed_node_names: names for the set of observed fx node, we can skip + this conversion if the node is not observed + """ + original_module = modules[str(node.target)] + qconfig: QConfigAny = original_module.qconfig # type: ignore[assignment] + weight_post_process = None + qat_module_classes = get_qat_module_classes(backend_config) + + if isinstance( + original_module, + qat_module_classes): + # Converting qat module to a float module, we need to attach + # weight fake_quant to the module, weight fake_quant is assumed to be run during + # QAT so we don't need to run it again here + weight_post_process = original_module.weight_fake_quant + original_module = original_module.to_float() # type: ignore[operator] + # change qat module to float module + parent_name, name = _parent_name(node.target) + setattr(modules[parent_name], name, original_module) + + is_observed = node.name in observed_node_names + # If a qconfig is not defined for this node, then skip converting to a reference module + if qconfig is None or _has_none_qconfig(node, node_name_to_qconfig) or not is_observed: + return + + # skip converting to reference quantized module if the qconfig is not supported + pattern_to_dtype_configs = get_pattern_to_dtype_configs(backend_config) + dtype_configs = pattern_to_dtype_configs.get(type(original_module), []) + if not _is_qconfig_supported_by_dtype_configs(qconfig, dtype_configs): + return + + # TODO: rename weight_is_statically_quantized to weight_is_int8_quantized + is_weight_quantized = weight_is_quantized(qconfig) + + # the condition for swapping the module to reference quantized module is: + # weights need to be quantized + if not is_weight_quantized: + return + + fused_module = None + float_module = original_module + # extract the individual float_module and fused module + if isinstance(original_module, torch.ao.nn.intrinsic._FusedModule): + fused_module = float_module + float_module = fused_module[0] # type: ignore[index] + + # TODO: move this to the reference quantized module + # weight_qparams or weight_qparams dict + wq_or_wq_dict = {"is_decomposed": is_decomposed} + if isinstance(float_module, torch.nn.RNNCellBase): + weight_post_process_ih = qconfig.weight() # type: ignore[union-attr, operator] + weight_post_process_hh = qconfig.weight() # type: ignore[union-attr, operator] + weight_post_process_ih(float_module.weight_ih) + weight_post_process_hh(float_module.weight_hh) + weight_qparams_ih = get_qparam_dict(weight_post_process_ih) + weight_qparams_hh = get_qparam_dict(weight_post_process_hh) + wq_or_wq_dict.update({ + "weight_ih": weight_qparams_ih, + "weight_hh": weight_qparams_hh, + }) + elif isinstance(float_module, (torch.nn.LSTM, torch.nn.GRU)): + # format for wq_or_wq_dict (flattened attributes): + # {"weight_ih_l0_scale": ..., "weight_ih_l0_qscheme": ..., ...} + for wn in float_module._flat_weights_names: + if hasattr(float_module, wn) and wn.startswith("weight"): + weight = getattr(float_module, wn) + weight_post_process = qconfig.weight() # type: ignore[union-attr, operator] + if weight_post_process.dtype == torch.qint8: # type: ignore[union-attr] + weight_post_process(weight) # type: ignore[operator, misc] + wq_or_wq_dict[wn] = get_qparam_dict(weight_post_process) + else: + # weight_post_process is None means the original module is not a QAT module + # we need to get weight_post_process from qconfig in this case + is_ptq = weight_post_process is None + if is_ptq: + weight_post_process = qconfig.weight() # type: ignore[union-attr, operator] + device = assert_and_get_unique_device(float_module) + if device: + weight_post_process.to(device) + + # Call weight observer/fake_quant at least once to ensure the scales and zero points + # have the right shapes. Note: there are two cases where we don't have to do this: + # + # (1) QAT: The model's forward method already calls the weight observer/fake_quant, + # and this typically happens during training, so we don't need to do it here. + # + # (2) Non-reference (lowered) case: The quantized module's from_float method already + # calls the weight observer/fake_quant, so we don't have to do it here. + # + # Currently we ignore both cases and call the weight observer/fake_quant here + # regardless, which is technically incorrect. For (1), this is mainly to preserve BC + # in test code, which may not always train before convert. In the future, we should + # break BC for these two cases. See https://github.com/pytorch/pytorch/issues/73941. + # + # For PT2, however, we don't need to preserve BC here, so we can skip this hack + # for QAT. We identify this case as (is_decomposed + is_reference + is_qat). + # Note that we still need it for PTQ in the PT2 flow since the model's forward + # method doesn't call the weight observer. + is_qat = not is_ptq + if not (is_decomposed and is_reference and is_qat): + weight_post_process(float_module.weight) # type: ignore[operator] + + wq_or_wq_dict.update(get_qparam_dict(weight_post_process)) + + # We use the same reference module for all modes of quantization: static, dynamic, weight_only + # root_module_to_quantized_reference_module: module mapping from root (floating point) module class + # to quantized reference module class, e.g. nn.Conv2d to nn.quantized._reference.Conv2d + root_module_to_quantized_reference_module = get_root_module_to_quantized_reference_module(backend_config) + ref_qmodule_cls = root_module_to_quantized_reference_module.get(type_before_parametrizations(float_module), None) + assert ( + ref_qmodule_cls is not None + ), f"No reference quantized module class configured for {type_before_parametrizations(float_module)}" + ref_qmodule = ref_qmodule_cls.from_float(float_module, wq_or_wq_dict) # type: ignore[attr-defined] + if fused_module is not None: + fused_module[0] = ref_qmodule # type: ignore[operator] + else: + parent_name, name = _parent_name(node.target) + setattr(modules[parent_name], name, ref_qmodule) + +def _remove_previous_dequantize_in_custom_module(node: Node, prev_node: Node, graph: Graph) -> None: + """ + Given a custom module `node`, if the previous node is a dequantize, reroute the custom as follows: + + Before: quantize - dequantize - custom_module + After: quantize - custom_module + \\ - dequantize + """ + # expecting the input node for a custom module node to be a Node + assert isinstance(prev_node, Node), \ + f"Expecting the argument for custom module node to be a Node, but got {prev_node}" + if prev_node.op == "call_method" and prev_node.target == "dequantize": + node.replace_input_with(prev_node, prev_node.args[0]) + # Remove the dequantize node if it doesn't have other users + if len(prev_node.users) == 0: + graph.erase_node(prev_node) + +def convert_custom_module( + node: Node, + graph: Graph, + modules: Dict[str, torch.nn.Module], + custom_module_class_mapping: Dict[QuantType, Dict[Type, Type]], + statically_quantized_custom_module_nodes: Set[Node]) -> None: + """ Converts an observed custom module to a quantized custom module based on + `custom_module_class_mapping` + For static quantization, we'll also remove the previous `dequantize` node and + attach the observer node for output to the module, the observer for the node + will be converted to a dequantize node instead of quantize-dequantize pairs + later in the graph. In the end we would have a quantized custom module that + has the same interface as a default quantized module in nn.quantized namespace, + i.e. quantized input and quantized output. + + Args: + - node: The call_module node of the observed standalone module + - graph: The graph containing the node + - modules: named_module of original model + - custom_module_class_mapping: mapping from observed custom module class to + quantized custom module class, used to swap custom modules + - statically_quantized_custom_module_nodes: we'll add the custom module node + if we find it is statically quantized, this will be used later when converting + observers to quant/dequant node pairs, if the observed node is a statically + quantized custom module nodes, we'll convert the observer to a dequantize node, + this is to keep the interface the same as the default quantized module. + TODO: maybe we want to redesign this part to align with reference model design + as well, but there has been some discussions around the interface, so we can do + it later. + """ + observed_custom_module = modules[str(node.target)] + maybe_obs = _maybe_get_observer_for_node(node, modules) + qconfig = observed_custom_module.qconfig + if activation_is_statically_quantized(qconfig): + statically_quantized_custom_module_nodes.add(node) + if _is_custom_module_lstm(node, modules): + # The inputs are tuples in the form (input, (hidden0, hidden1)) + # Ensure all three input nodes are quantized + assert ( + len(node.args) == 2 and + isinstance(node.args[1], tuple) and + len(node.args[1]) == 2 + ) + (inputs, (hidden0, hidden1)) = node.args # type: ignore[misc] + assert isinstance(inputs, Node) + assert isinstance(hidden0, Node) + assert isinstance(hidden1, Node) + _remove_previous_dequantize_in_custom_module(node, inputs, graph) + _remove_previous_dequantize_in_custom_module(node, hidden0, graph) + _remove_previous_dequantize_in_custom_module(node, hidden1, graph) + elif _is_custom_module_mha(node, modules): + # Inputs are in the form (query, key, value) + # TODO: This is the first step in enabling the full fx custom module + # quantization path for MultiheadAttention, and only covers the inputs + # to the module. + # Additional handling is yet to be implemented for the outputs, similar + # to LSTM custom module + assert len(node.args) == 3 + query, key, value = node.args + assert isinstance(query, Node) + assert isinstance(key, Node) + assert isinstance(value, Node) + _remove_previous_dequantize_in_custom_module(node, query, graph) + _remove_previous_dequantize_in_custom_module(node, key, graph) + _remove_previous_dequantize_in_custom_module(node, value, graph) + else: + # remove the previous dequant node to ensure the inputs are quantized + arg = node.args[0] + assert isinstance(arg, Node) + _remove_previous_dequantize_in_custom_module(node, arg, graph) + # absorb the following observer into the module conversion + activation_post_process = _maybe_get_observer_for_node(node, modules) + assert activation_post_process is not None + observed_custom_module.activation_post_process = activation_post_process + + # swap the observed custom module to quantized custom module + quantized_custom_module_class = get_swapped_custom_module_class( + observed_custom_module, custom_module_class_mapping, qconfig) + quantized_custom_module = \ + quantized_custom_module_class.from_observed(observed_custom_module) + parent_name, name = _parent_name(node.target) + setattr(modules[parent_name], name, quantized_custom_module) + +def convert( + model: GraphModule, is_reference: bool = False, + convert_custom_config: Union[ConvertCustomConfig, Dict[str, Any], None] = None, + is_standalone_module: bool = False, + _remove_qconfig_flag: bool = True, + qconfig_mapping: Union[QConfigMapping, Dict[str, Any], None] = None, + backend_config: Union[BackendConfig, Dict[str, Any], None] = None, + is_decomposed: bool = False) -> GraphModule: + """ + We will convert an observed model (a module with observer calls) to a reference + quantized model, the rule is simple: + 1. for each observer module call in the graph, we'll convert it to calls to + quantize and dequantize functions based on the observer instance + 2. for weighted operations like linear/conv, we need to convert them to reference + quantized module, this requires us to know whether the dtype configured for the + weight is supported in the backend, this is done in prepare step and the result + is stored in observed_node_names, we can decide whether we need to swap the + module based on this set + + Args: + * `is_standalone_module`: when this flag is True, it means we are quantizing + a submodule that is not inlined in parent module, and will be quantized + separately as one unit. + + * `is_decomposed`: a boolean flag to indicate whether we want to use the + quantize operator for decomposed quantized tensor + (torch.ops.quantized_decomposed.quantize_per_tensor) or default/standalone + quantized tensor (torch.quantize_per_tensor) + + Returns: + a quantized standalone module, whether input/output is quantized is + specified by prepare_custom_config, with + input_quantized_idxs, output_quantized_idxs, please + see docs for :func:`~torch.ao.quantization.prepare_fx` for details + """ + if convert_custom_config is None: + convert_custom_config = ConvertCustomConfig() + + if isinstance(convert_custom_config, dict): + warnings.warn( + "Passing a convert_custom_config_dict to convert is deprecated and will not be supported " + "in a future version. Please pass in a ConvertCustomConfig instead.", + FutureWarning, + stacklevel=2, + ) + convert_custom_config = ConvertCustomConfig.from_dict(convert_custom_config) + + if isinstance(qconfig_mapping, dict): + warnings.warn( + "Passing a QConfig dictionary to convert is deprecated and will not be supported " + "in a future version. Please pass in a QConfigMapping instead.", + FutureWarning, + stacklevel=2, + ) + qconfig_mapping = QConfigMapping.from_dict(qconfig_mapping) if qconfig_mapping else None + qconfig_mapping = copy.deepcopy(qconfig_mapping) + assert qconfig_mapping is None or isinstance(qconfig_mapping, QConfigMapping) + + if isinstance(backend_config, dict): + warnings.warn( + "Passing a backend_config_dict to prepare is deprecated and will not be supported " + "in a future version. Please pass in a BackendConfig instead.", + FutureWarning, + stacklevel=2, + ) + backend_config = BackendConfig.from_dict(backend_config) + + if backend_config is None: + backend_config = get_native_backend_config() + + assert _is_observed_module(model), \ + 'incoming model must be produced by prepare_fx' + observed_graph_module_attrs = model.meta["_observed_graph_module_attrs"] + node_name_to_scope: Dict[str, Tuple[str, type]] = observed_graph_module_attrs.node_name_to_scope + prepare_custom_config: PrepareCustomConfig = observed_graph_module_attrs.prepare_custom_config + observed_node_names: Set[str] = observed_graph_module_attrs.observed_node_names + node_name_to_qconfig: Dict[str, QConfigAny] = observed_graph_module_attrs.node_name_to_qconfig # type: ignore[assignment] + + # mapping from fully qualified module name to module instance + # for example, + # { + # '': Model(...), + # 'linear': Linear(...), + # 'linear.weight_fake_quant': PerChannelMinMaxObserver(...), + # } + # We use remove_duplicate=False here because torch.cat uses + # the same activation_post_process module instance but different names + modules = dict(model.named_modules(remove_duplicate=False)) + + # TODO refactor this code once we update the prepare logic to have additional information on + # which graph nodes have been observed and share that with convert to decide which observers to ignore. + if qconfig_mapping: + prepare_qconfig_mapping: QConfigMapping = observed_graph_module_attrs.qconfig_mapping # type: ignore[assignment] + modules_copy = copy.deepcopy(modules) + + if observed_graph_module_attrs.is_qat: + _update_qconfig_for_qat(qconfig_mapping, backend_config) + _update_qconfig_for_fusion(model, qconfig_mapping) + + _compare_prepare_convert_qconfig_mappings(prepare_qconfig_mapping, qconfig_mapping) # type: ignore[arg-type] + convert_node_name_to_qconfig = _generate_node_name_to_qconfig( + model, modules_copy, model.graph, qconfig_mapping, node_name_to_scope) + # check the convert_node_name_to_qconfig generated and ensure that + # all the values either match what was set in prepare node_name_to_qconfig + # or are set to None in the convert_node_name_to_qconfig. + for k, v in node_name_to_qconfig.items(): + assert k in convert_node_name_to_qconfig, f'Expected key {k} in convert node_name_to_qconfig' + if convert_node_name_to_qconfig[k] is not None: + assert qconfig_equals(v, convert_node_name_to_qconfig[k]), \ + f"Expected k {k} to have the same value in prepare and convert QConfigMappings, " \ + f"but {v} was updated to {convert_node_name_to_qconfig[k]}" + node_name_to_qconfig = convert_node_name_to_qconfig + + custom_module_classes = get_custom_module_class_keys(convert_custom_config.observed_to_quantized_mapping) + custom_module_class_mapping = convert_custom_config.observed_to_quantized_mapping + + if observed_graph_module_attrs.equalization_node_name_to_qconfig is not None: + # If we want to do equalization then do the following: + # Calculate the equalization scale, update the observers with the scaled + # inputs, and scale the weight + weight_eq_obs_dict = update_obs_for_equalization(model, modules) + convert_eq_obs(model, modules, weight_eq_obs_dict) + + # always run weight observers in the top level forward method + # for dynamic quant ops or weight only quant ops + _run_weight_observers(model, backend_config) + + graph_inputs: List[str] = [] + for node in model.graph.nodes: + if node.op == 'placeholder': + graph_inputs.append(node.name) + + # additional state to override inputs to be quantized, if specified + # by the user + placeholder_node_seen_cnt = 0 + input_quantized_idxs: List[int] = prepare_custom_config.input_quantized_indexes + output_quantized_idxs: List[int] = prepare_custom_config.output_quantized_indexes + + root_module_to_quantized_reference_module = get_root_module_to_quantized_reference_module(backend_config) + # convert tuples so that it can work with isinstance(module, tuple_of_classes) + root_module_classes = tuple(root_module_to_quantized_reference_module.keys()) + qat_module_classes = get_qat_module_classes(backend_config) + fused_module_classes = get_fused_module_classes(backend_config) + statically_quantized_custom_module_nodes: Set[Node] = set() + + for node in list(model.graph.nodes): + if node.op == 'placeholder': + cur_placeholder_node_idx = placeholder_node_seen_cnt + placeholder_node_seen_cnt += 1 + if cur_placeholder_node_idx in input_quantized_idxs: + # Inputs are assumed to be quantized if the user specified the + # input_quantized_idxs override. + # we need to dequantize the inputs since all operators took + # floating point inputs in reference quantized models + _insert_dequantize_node(node, model.graph) + elif node.op == "output": + # If the argument is empty we don't need to do anything + if len(output_quantized_idxs) == 0: + continue + # Result are kept quantized if the user specified the + # output_quantized_idxs override. + # Remove the dequantize operator for the node in the end if any + return_node = node + output = node.args[0] + # outputs can be Node, list, tuple, dict, other cases are not supported yet + if isinstance(output, (list, tuple)): + for idx in output_quantized_idxs: + _maybe_recursive_remove_dequantize(output[idx], return_node, model.graph) + elif isinstance(output, (Node, dict)): + # we treat dict as a single argument currently, but it can be extended + # to support {"key": dtype} after we change output_quantized_idxs to + # dict + if 0 in output_quantized_idxs: + _maybe_recursive_remove_dequantize(output, return_node, model.graph) + else: + warnings.warn(f"Unsupported node type for output_quantized_idxs: {type(output)}") + elif node.op == "call_module": + mod = _get_module(node, modules) + assert mod is not None + if _is_activation_post_process(mod): + observed_node = node.args[0] + if observed_node in statically_quantized_custom_module_nodes: + _replace_observer_or_dequant_stub_with_dequantize_node(node, model.graph) + else: + if is_decomposed: + _replace_observer_with_quantize_dequantize_node_decomposed( + model, node, modules, node_name_to_scope, + node_name_to_qconfig) + else: + _replace_observer_with_quantize_dequantize_node( + model, node, modules, node_name_to_scope, + node_name_to_qconfig) + elif isinstance(mod, DeQuantStub): + _replace_observer_or_dequant_stub_with_dequantize_node(node, model.graph) + elif _is_observed_standalone_module(mod): + convert_standalone_module( + node, modules, model, is_reference, backend_config) + # below this point `type_before_parametrizations` is used + # instead of `type` to handle situations with fx quant + sparsity + elif type_before_parametrizations(mod) in set( + root_module_classes).union(qat_module_classes).union(fused_module_classes): + # extra check for fused module classes to make sure they are fused module classes + # of target modules + if type_before_parametrizations(mod) in fused_module_classes and \ + type_before_parametrizations(mod[0]) not in root_module_classes: # type: ignore[index] + continue + convert_weighted_module( + node, modules, observed_node_names, node_name_to_qconfig, backend_config, + is_decomposed, is_reference) + elif type_before_parametrizations(mod) in custom_module_classes: + convert_custom_module( + node, model.graph, modules, custom_module_class_mapping, + statically_quantized_custom_module_nodes) + + # remove deadcode after converting observers to quant/dequant ops + model.graph.eliminate_dead_code() + model = GraphModule(model, model.graph) + + # TODO: maybe move this to quantize_fx.py + if not is_reference: + model = lower_to_fbgemm(model, node_name_to_qconfig, node_name_to_scope) + + # TODO: this looks hacky, we want to check why we need this and see if we can + # remove this + # removes qconfig and activation_post_process modules + if _remove_qconfig_flag: + _remove_qconfig(model) + model.delete_all_unused_submodules() + model.meta.pop("_observed_graph_module_attrs", None) + return model diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/fuse.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/fuse.py new file mode 100644 index 0000000000000000000000000000000000000000..b555789f673a29ad609a8902f87f059204818bc1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/fuse.py @@ -0,0 +1,168 @@ +# mypy: allow-untyped-defs +from torch.fx import ( + GraphModule, + Node, + map_arg +) +from torch.fx.graph import Graph +from .match_utils import ( + _is_match, + MatchAllNode, +) +from .pattern_utils import ( + _sorted_patterns_dict, +) + +from ..backend_config import ( + BackendConfig, + get_native_backend_config, +) +from ..backend_config.utils import ( + get_fuser_method_mapping, + get_fusion_pattern_to_root_node_getter, + get_fusion_pattern_to_extra_inputs_getter, +) + +from .custom_config import FuseCustomConfig + +from .fuse_handler import ( + _get_fusion_pattern_to_fuse_handler_cls, + FuseHandler, +) + +from typing import Any, Callable, Dict, List, Tuple, Union +import warnings + +from torch.ao.quantization.utils import Pattern, NodePattern + + +__all__ = [ + "fuse", + # TODO: We should make this private in the future + # This is currently needed for test_public_bindings for some reason + "FuseHandler", +] + + +def fuse( + model: GraphModule, + is_qat: bool, + fuse_custom_config: Union[FuseCustomConfig, Dict[str, Any], None] = None, + backend_config: Union[BackendConfig, Dict[str, Any], None] = None, +) -> GraphModule: + if fuse_custom_config is None: + fuse_custom_config = FuseCustomConfig() + + if isinstance(fuse_custom_config, dict): + warnings.warn( + "Passing a fuse_custom_config_dict to fuse is deprecated and will not be supported " + "in a future version. Please pass in a FuseCustomConfig instead.", + FutureWarning, + stacklevel=2, + ) + fuse_custom_config = FuseCustomConfig.from_dict(fuse_custom_config) + + if isinstance(backend_config, dict): + warnings.warn( + "Passing a backend_config_dict to prepare is deprecated and will not be supported " + "in a future version. Please pass in a BackendConfig instead.", + FutureWarning, + stacklevel=2, + ) + backend_config = BackendConfig.from_dict(backend_config) + + named_modules = dict(model.named_modules()) + + if backend_config is None: + backend_config = get_native_backend_config() + + fusion_pattern_to_fuse_handler_cls = _sorted_patterns_dict(_get_fusion_pattern_to_fuse_handler_cls(backend_config)) + fuser_method_mapping = get_fuser_method_mapping(backend_config) + fusion_pattern_to_root_node_getter = get_fusion_pattern_to_root_node_getter(backend_config) + fusion_pattern_to_extra_inputs_getter = get_fusion_pattern_to_extra_inputs_getter(backend_config) + + # find fusion + fusion_pairs = _find_matches( + model, model.graph, fusion_pattern_to_fuse_handler_cls) + # TODO: change this to inplace changes to graph, since we no longer construct + # new GraphModule anymore + fused_graph = Graph() + env: Dict[Any, Any] = {} + + def load_arg(a): + return map_arg(a, lambda node: env[node.name]) + + def default_root_node_getter(node_pattern): + while not isinstance(node_pattern[-1], Node): + node_pattern = node_pattern[-1] + return node_pattern[-1] + + for node in model.graph.nodes: + maybe_last_node, pattern, matched_node_pattern, obj, node_to_subpattern = \ + fusion_pairs.get(node.name, (None, None, None, None, None)) + # get the corresponding subpattern for the current node + if node_to_subpattern is not None: + node_subpattern = node_to_subpattern.get(node, None) + else: + node_subpattern = None + if maybe_last_node is node: + assert obj is not None + root_node_getter = fusion_pattern_to_root_node_getter.get(pattern, default_root_node_getter) + root_node = root_node_getter(matched_node_pattern) # type: ignore[index] + extra_inputs_getter = fusion_pattern_to_extra_inputs_getter.get(pattern, None) + extra_inputs = [] + if extra_inputs_getter is not None: + extra_inputs = extra_inputs_getter(matched_node_pattern) + # TODO: add validation that root_node is a module and has the same type + # as the root_module in the configuration + env[node.name] = obj.fuse( + load_arg, named_modules, fused_graph, root_node, extra_inputs, matched_node_pattern, # type: ignore[arg-type] + fuse_custom_config, fuser_method_mapping, is_qat) + elif maybe_last_node is None or node_subpattern is MatchAllNode: + env[node.name] = fused_graph.node_copy(node, load_arg) + # node matched in patterns and is not root is removed here + + model = GraphModule(model, fused_graph) + return model + +def _find_matches( + root: GraphModule, + graph: Graph, + pattern_to_fuse_handler_cls: Dict[Pattern, Callable], +) -> Dict[str, Tuple[Node, Pattern, NodePattern, FuseHandler, Dict[Node, Any]]]: + modules = dict(root.named_modules()) + # node name -> (root_node, match_value) + match_map : Dict[ + str, Tuple[Node, Pattern, NodePattern, FuseHandler, Dict[Node, Any]]] = {} + # a map from node to the matched subpattern + node_to_subpattern: Dict[Node, Any] = {} + + # TODO: dedup with quantization matching function in match_utils.py + def apply_match(pattern, node, match, matched_node_pattern, node_to_subpattern): + if isinstance(pattern, tuple): + s, *args = pattern + current_node_pattern: List[Node] = [] + apply_match(s, node, match, current_node_pattern, node_to_subpattern) + for subpattern, arg in zip(args, node.args): + apply_match(subpattern, arg, match, current_node_pattern, node_to_subpattern) + matched_node_pattern.append(tuple(current_node_pattern)) + else: + # the first pattern matches will take precedence + if node.name not in match_map: + matched_node_pattern.append(node) + # MatchAllNode here is actually MatchAllInputNode which should not + # be added to match_map + if pattern is not MatchAllNode: + node_to_subpattern[node] = pattern + root_node, pattern, handler = match + match_map[node.name] = (root_node, pattern, matched_node_pattern, handler, node_to_subpattern) + + for node in reversed(graph.nodes): + if node.name not in match_map: + for pattern, fuse_handler_cls in pattern_to_fuse_handler_cls.items(): + matched_node_pattern: List[Node] = [] + if _is_match(modules, node, pattern): + apply_match(pattern, node, (node, pattern, fuse_handler_cls(node)), matched_node_pattern, node_to_subpattern) + break + + return match_map diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/graph_module.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/graph_module.py new file mode 100644 index 0000000000000000000000000000000000000000..224f71745157c70bf66bc407a004845771c786f2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/graph_module.py @@ -0,0 +1,120 @@ +# mypy: allow-untyped-defs +import torch +import copy +from torch.fx import GraphModule +from torch.fx.graph import Graph +from typing import Union, Dict, Any, Set + +__all__ = [ + "FusedGraphModule", + "ObservedGraphModule", + "ObservedStandaloneGraphModule", + "QuantizedGraphModule", +] + +class FusedGraphModule(GraphModule): + def __init__(self, root: Union[torch.nn.Module, Dict[str, Any]], graph: Graph, preserved_attr_names: Set[str]): + self.preserved_attr_names = preserved_attr_names + preserved_attrs = {attr: getattr(root, attr) for attr in self.preserved_attr_names if hasattr(root, attr)} + super().__init__(root, graph) + for attr in preserved_attrs: + setattr(self, attr, preserved_attrs[attr]) + + # GraphModule does not copy attributes which are not in the __dict__ + # of vanilla nn.Module. So, we override __deepcopy__ in order + # to copy the quantization specific attributes correctly. + def __deepcopy__(self, memo): + fake_mod = torch.nn.Module() + fake_mod.__dict__ = copy.deepcopy(self.__dict__) + return FusedGraphModule(fake_mod, copy.deepcopy(self.graph), copy.deepcopy(self.preserved_attr_names)) + +class ObservedGraphModule(GraphModule): + + def __init__(self, root: Union[torch.nn.Module, Dict[str, Any]], graph: Graph, preserved_attr_names: Set[str]): + self.preserved_attr_names = { + '_activation_post_process_map', + '_activation_post_process_indexes', + '_patterns', + '_node_name_to_qconfig', + '_prepare_custom_config', + '_equalization_node_name_to_qconfig', + '_node_name_to_scope', + '_qconfig_mapping', + '_is_qat', + '_observed_node_names'}.union(preserved_attr_names) + preserved_attrs = {attr: getattr(root, attr) for attr in self.preserved_attr_names if hasattr(root, attr)} + super().__init__(root, graph) + for attr in preserved_attrs: + setattr(self, attr, preserved_attrs[attr]) + + # GraphModule does not copy attributes which are not in the __dict__ + # of vanilla nn.Module. So, we override __deepcopy__ in order + # to copy the quantization specific attributes correctly. + def __deepcopy__(self, memo): + fake_mod = torch.nn.Module() + fake_mod.__dict__ = copy.deepcopy(self.__dict__) + return ObservedGraphModule(fake_mod, copy.deepcopy(self.graph), copy.deepcopy(self.preserved_attr_names)) + +def _is_observed_module(module: Any) -> bool: + return hasattr(module, "meta") and "_observed_graph_module_attrs" in module.meta + +def _get_observed_graph_module_attr(model: Union[torch.nn.Module, GraphModule], attr_name: str) -> Any: + if hasattr(model, "meta") and "_observed_graph_module_attrs" in model.meta: # type: ignore[operator, index] + return getattr(model.meta["_observed_graph_module_attrs"], attr_name) # type: ignore[index] + return None + +class ObservedStandaloneGraphModule(ObservedGraphModule): + def __init__(self, root: Union[torch.nn.Module, Dict[str, Any]], graph: Graph, preserved_attr_names: Set[str]): + preserved_attr_names = preserved_attr_names.union({ + "_standalone_module_input_quantized_idxs", + "_standalone_module_output_quantized_idxs"}) + super().__init__(root, graph, preserved_attr_names) + + def __deepcopy__(self, memo): + fake_mod = torch.nn.Module() + fake_mod.__dict__ = copy.deepcopy(self.__dict__) + return ObservedStandaloneGraphModule(fake_mod, copy.deepcopy(self.graph), copy.deepcopy(self.preserved_attr_names)) + +def _is_observed_standalone_module(module: Any) -> bool: + return _is_observed_module(module) and module.meta["_observed_graph_module_attrs"].is_observed_standalone_module + +def _save_packed_weight(self, destination, prefix, keep_vars): + for attr_name in dir(self): + if "_packed_weight" in attr_name and \ + isinstance(getattr(self, attr_name), torch._C.ScriptObject): # type: ignore[attr-defined] + packed_weight = getattr(self, attr_name) + destination[prefix + attr_name] = packed_weight + +class QuantizedGraphModule(GraphModule): + """ This class is created to make sure PackedParams + (e.g. LinearPackedParams, Conv2dPackedParams) to appear in state_dict + so that we can serialize and deserialize quantized graph module with + torch.save(m.state_dict()) and m.load_state_dict(state_dict) + """ + def __init__(self, root: Union[torch.nn.Module, Dict[str, Any]], graph: Graph, preserved_attr_names: Set[str]): + self.preserved_attr_names = preserved_attr_names + preserved_attrs = {attr: getattr(root, attr) for attr in self.preserved_attr_names if hasattr(root, attr)} + super().__init__(root, graph) + for attr in preserved_attrs: + setattr(self, attr, preserved_attrs[attr]) + self._register_state_dict_hook(_save_packed_weight) + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + attrs_to_pop = [] + for attr_name in state_dict: + if attr_name.startswith("_packed_weight") and isinstance(state_dict[attr_name], torch._C.ScriptObject): # type: ignore[attr-defined] # noqa: B950 + setattr(self, attr_name, state_dict[attr_name]) + attrs_to_pop.append(attr_name) + + # pop the packed param attributesn + for attr_name in attrs_to_pop: + state_dict.pop(attr_name) + + super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) + + + def __deepcopy__(self, memo): + fake_mod = torch.nn.Module() + fake_mod.__dict__ = copy.deepcopy(self.__dict__) + return QuantizedGraphModule(fake_mod, copy.deepcopy(self.graph), copy.deepcopy(self.preserved_attr_names)) diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/lower_to_fbgemm.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/lower_to_fbgemm.py new file mode 100644 index 0000000000000000000000000000000000000000..ef58652b1adda0dc135fbef21afe789d6f538eda --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/lower_to_fbgemm.py @@ -0,0 +1,16 @@ +from ._lower_to_native_backend import _lower_to_native_backend +from ..qconfig import QConfigAny +from torch.fx import GraphModule +from typing import Dict, Tuple + +__all__ = ['lower_to_fbgemm'] + +def lower_to_fbgemm( + model: GraphModule, + qconfig_map: Dict[str, QConfigAny], + node_name_to_scope: Dict[str, Tuple[str, type]] +) -> GraphModule: + """ Lower a quantized reference model (with reference quantized operator patterns) + to fbgemm + """ + return _lower_to_native_backend(model, qconfig_map, node_name_to_scope) diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/match_utils.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/match_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b5a6657103fc7513effe59b35a61abc0d01856fa --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/match_utils.py @@ -0,0 +1,238 @@ +# mypy: allow-untyped-defs +import sys +import torch +from torch.fx.graph import ( + Graph, + Node, +) +from torch.ao.quantization.utils import Pattern +from .quantize_handler import ( + QuantizeHandler, +) +from ..qconfig import ( + QConfigAny, +) +from ..utils import ( + MatchAllNode +) +from .graph_module import ( + _is_observed_standalone_module, +) +from torch.nn.utils.parametrize import type_before_parametrizations +from typing import Any, Dict, List, Callable, Optional, Tuple, Type, Set, Iterable + + +__all__: List[str] = [] + +# TODO(future PR): the 1st argument is typed as `List[Node]`, but a better type +# would be a recursive `List[Union[Node, Tuple[Union[Node, ...]]]]` +_MatchResult = Tuple[Node, List[Node], Optional[Pattern], QuantizeHandler] + +_MatchResultWithQConfig = Tuple[Node, List[Node], Optional[Pattern], QuantizeHandler, + QConfigAny] + +# Note: The order of patterns is important! match function will take whatever is matched first, so we'll +# need to put the fusion patterns before single patterns. For example, add_relu should be registered come before relu. +# decorators are applied in the reverse order we see. Also when we match the nodes in the graph with these patterns, +# we'll start from the last node of the graph and traverse back. +def _is_match(modules, node, pattern, max_uses=sys.maxsize): + """ Matches a node in fx against a pattern + """ + if isinstance(pattern, tuple): + self_match, *arg_matches = pattern + if self_match is getattr: + assert len(pattern) == 2, 'Expecting getattr pattern to have two elements' + arg_matches = [] + else: + self_match = pattern + arg_matches = [] + + if isinstance(self_match, type) and issubclass(self_match, MatchAllNode): + return True + + if node == pattern: + return True + + if not isinstance(node, Node) or len(node.users) > max_uses: + return False + + if isinstance(self_match, type) and issubclass(self_match, torch.nn.Module): + if node.op != 'call_module': + return False + if not type_before_parametrizations(modules[node.target]) == self_match: + return False + elif callable(self_match): + if node.op != 'call_function' or node.target is not self_match: + return False + elif node.target is getattr: + if node.args[1] != pattern[1]: + return False + elif isinstance(self_match, str): + if node.op != 'call_method' or node.target != self_match: + return False + elif node.target != self_match: + return False + + if not arg_matches: + return True + + if len(arg_matches) != len(node.args): + return False + + return all(_is_match(modules, node, arg_match, max_uses=1) for node, arg_match in zip(node.args, arg_matches)) + +def _find_matches( + graph: Graph, + modules: Dict[str, torch.nn.Module], + patterns: Dict[Pattern, QuantizeHandler], + root_node_getter_mapping: Dict[Pattern, Callable], + standalone_module_names: Optional[List[str]] = None, + standalone_module_classes: Optional[List[Type]] = None, + custom_module_classes: Optional[List[Any]] = None) -> Dict[str, _MatchResult]: + """ + Matches the nodes in the input graph to quantization patterns, and + outputs the information needed to quantize them in future steps. + + Inputs: + - graph: an fx.Graph object + - modules: a mapping of fully qualified module name to instance, + for example, {'foo': ModuleFoo, ...} + - patterns: a mapping from a tuple of nodes in reverse order to + uninitialized QuantizeHandler subclass. + + Outputs a map of + node_name -> + (node, matched_values, matched_pattern, QuantizeHandler instance, + qconfig) + + For example, { + 'relu_1': (relu_1, [relu_1], torch.nn.functional.relu, + , QConfig(...)), + ... + } + """ + if custom_module_classes is None: + custom_module_classes = [] + + if standalone_module_classes is None: + standalone_module_classes = [] + + if standalone_module_names is None: + standalone_module_names = [] + + match_map: Dict[str, _MatchResult] = {} + all_matched : Set[str] = set() + + def _recursive_record_node_in_match_map( + last_node, + match_map, + node_pattern, + matched_node_pattern, + pattern, + match_value): + if isinstance(node_pattern, Node): + match_map[node_pattern.name] = ( + last_node, matched_node_pattern, pattern, match_value) + elif not isinstance(node_pattern, Iterable): + return + else: + for n in node_pattern: + _recursive_record_node_in_match_map(last_node, match_map, n, matched_node_pattern, pattern, match_value) + + # TODO: 1. merge with fuse matcher 2. document the code + def record_match( + pattern, + node, + last_node, + matched_node_pattern, + match_map): + if isinstance(pattern, tuple): + s, *args = pattern + is_single_arg = len(args) == 1 + current_node_pattern: List[Node] = [] + record_match( + s, + node, + last_node, + matched_node_pattern, + match_map) + if pattern[0] is not getattr: + for subpattern, arg in zip(args, node.args): + record_match( + subpattern, + arg, + node, + current_node_pattern, + match_map) + if len(current_node_pattern) > 1: + # current_node_pattern is the node pattern we get from matching + # the subpattern with arguments of the node + # we use is_single_arg to recover the original structure of the pattern + # if the original pattern has a single argument, we will have + # (original_op, (original_arg, ...)) + # otherwise, we'll have a list of arguments + # (original_op, arg0, arg1, arg2, ...) + if is_single_arg: + matched_node_pattern.append(tuple(current_node_pattern)) + else: + matched_node_pattern.extend(list(current_node_pattern)) + else: + matched_node_pattern.append(current_node_pattern[0]) + else: + matched_node_pattern.append(node) + + for node in reversed(graph.nodes): + if node.name not in match_map and node.name not in all_matched: + for pattern, quantize_handler_cls in patterns.items(): + root_node_getter = root_node_getter_mapping.get(pattern, None) + if _is_match(modules, node, pattern) and node.name not in match_map: + matched_node_pattern: List[Node] = [] + record_match( + pattern, + node, + node, + matched_node_pattern, + match_map) + quantize_handler = quantize_handler_cls( # type: ignore[operator] + matched_node_pattern, + modules, + root_node_getter) + last_node = node + # record the match for all nodes in the pattern + _recursive_record_node_in_match_map( + last_node, + match_map, + # we need to record all nodes in the matched pattern in the match_map + matched_node_pattern, + # this is a part of the value corresponding to the node + matched_node_pattern, + pattern, + quantize_handler) + break + + # add custom module instances to the match result + assert modules is not None + for node in graph.nodes: + if node.op == 'call_module' and \ + type(modules[node.target]) in custom_module_classes: + match_map[node.name] = ( + node, node, None, QuantizeHandler(node, modules, is_custom_module=True)) + + def is_standalone_module(node_target: str, modules: Dict[str, torch.nn.Module]): + assert modules is not None + return ( + node_target in standalone_module_names or # type: ignore[operator] + type(modules[node_target]) in standalone_module_classes # type: ignore[operator] + ) + + # add standalone modules to the match + for node in graph.nodes: + if node.op == 'call_module' and \ + (is_standalone_module(node.target, modules) or + _is_observed_standalone_module(modules[node.target])): + # add node to matched nodes + match_map[node.name] = ( + node, node, None, + QuantizeHandler(node, modules, is_standalone_module=True)) + + return match_map diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/pattern_utils.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/pattern_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3665f75f756755109ac8721ab7861c55ff2d7669 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/pattern_utils.py @@ -0,0 +1,88 @@ +# mypy: allow-untyped-defs +from collections import OrderedDict +from typing import Dict, Any +from torch.ao.quantization.utils import Pattern +from ..fake_quantize import FixedQParamsFakeQuantize +from ..observer import ObserverBase +import copy + +__all__ = [ + "get_default_fusion_patterns", + "get_default_quant_patterns", + "get_default_output_activation_post_process_map", +] + +# TODO(future PR): fix the typing on QuantizeHandler (currently a circular dependency) +QuantizeHandler = Any + +# pattern for conv bn fusion +_DEFAULT_FUSION_PATTERNS: Dict[Pattern, QuantizeHandler] = OrderedDict() +def _register_fusion_pattern(pattern): + def insert(fn): + _DEFAULT_FUSION_PATTERNS[pattern] = fn + return fn + return insert + +def get_default_fusion_patterns() -> Dict[Pattern, QuantizeHandler]: + return copy.copy(_DEFAULT_FUSION_PATTERNS) + +_DEFAULT_QUANTIZATION_PATTERNS: Dict[Pattern, QuantizeHandler] = OrderedDict() + +# Mapping from pattern to activation_post_process(observer/fake_quant) constructor for output activation +# e.g. pattern: torch.sigmoid, +# output_activation_post_process: default_fixed_qparams_range_0to1_fake_quant +_DEFAULT_OUTPUT_FAKE_QUANTIZE_MAP: Dict[Pattern, QuantizeHandler] = {} +_DEFAULT_OUTPUT_OBSERVER_MAP: Dict[Pattern, QuantizeHandler] = {} + +# Register pattern for both static quantization and qat +def _register_quant_pattern(pattern, fixed_qparams_observer=None): + def insert(fn): + _DEFAULT_QUANTIZATION_PATTERNS[pattern] = fn + if fixed_qparams_observer is not None: + _DEFAULT_OUTPUT_FAKE_QUANTIZE_MAP[pattern] = FixedQParamsFakeQuantize.with_args(observer=fixed_qparams_observer) + _DEFAULT_OUTPUT_OBSERVER_MAP[pattern] = fixed_qparams_observer + return fn + return insert + +# Get patterns for both static quantization and qat +def get_default_quant_patterns() -> Dict[Pattern, QuantizeHandler]: + return copy.copy(_DEFAULT_QUANTIZATION_PATTERNS) + +# a map from pattern to output activation post process constructor +# e.g. torch.sigmoid -> default_affine_fixed_qparam_fake_quant +def get_default_output_activation_post_process_map(is_training) -> Dict[Pattern, ObserverBase]: + if is_training: + return copy.copy(_DEFAULT_OUTPUT_FAKE_QUANTIZE_MAP) + else: + return copy.copy(_DEFAULT_OUTPUT_OBSERVER_MAP) + +# Example use of register pattern function: +# @_register_fusion_pattern(torch.nn.ReLU, (torch.nn.BatchNorm2d, torch.nn.Conv2d))) +# class ConvOrLinearBNReLUFusion(): +# def __init__(...): +# ... +# + +def _sorted_patterns_dict(patterns_dict: Dict[Pattern, QuantizeHandler]) -> Dict[Pattern, QuantizeHandler]: + """ + Return a sorted version of the patterns dictionary such that longer patterns are matched first, + e.g. match (F.relu, F.linear) before F.relu. + This works for current use cases, but we may need to have a more clever way to sort + things to address more complex patterns + """ + + def get_len(pattern): + """ this will calculate the length of the pattern by counting all the entries + in the pattern. + this will make sure (nn.ReLU, (nn.BatchNorm, nn.Conv2d)) comes before + (nn.BatchNorm, nn.Conv2d) so that we can match the former first + """ + len = 0 + if isinstance(pattern, tuple): + for item in pattern: + len += get_len(item) + else: + len += 1 + return len + + return OrderedDict(sorted(patterns_dict.items(), key=lambda kv: -get_len(kv[0]) if isinstance(kv[0], tuple) else 1)) diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/qconfig_mapping_utils.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/qconfig_mapping_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..378c51b6805dab01119072c4e8d67bd7b117a596 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/qconfig_mapping_utils.py @@ -0,0 +1,344 @@ +# mypy: allow-untyped-defs +import torch +import re +from collections import defaultdict, OrderedDict +from typing import Callable, Any, Dict, Tuple, Set, List, Union +from torch.ao.quantization import QConfig +from torch.ao.quantization.qconfig import _add_module_to_qconfig_obs_ctr, QConfigAny, qconfig_equals +from torch.ao.quantization.observer import ( + _is_activation_post_process, +) +from torch.ao.quantization.backend_config import ( + BackendConfig, + DTypeConfig, +) +from torch.ao.quantization.backend_config.utils import ( + get_module_to_qat_module, +) + +from torch.fx import ( + GraphModule, +) +from torch.fx.graph import ( + Graph, +) +from torch.ao.nn.intrinsic import _FusedModule + +from ..utils import ( + _parent_name, + get_qconfig_dtypes, +) +from ..qconfig_mapping import ( + _OBJECT_TYPE_DICT_KEY, + _MODULE_NAME_DICT_KEY, + _MODULE_NAME_REGEX_DICT_KEY, + QConfigMapping, +) + +__all__: List[str] = [] + + + +def _maybe_adjust_qconfig_for_module_name_object_type_order( + qconfig_mapping: QConfigMapping, + cur_module_path: str, + cur_object_type: Callable, + cur_object_type_idx: int, + fallback_qconfig: QConfigAny, +) -> QConfigAny: + for (module_name, object_type, index), qconfig in qconfig_mapping.module_name_object_type_order_qconfigs.items(): + if ( + (module_name == cur_module_path) and + (object_type == cur_object_type) and + (index == cur_object_type_idx) + ): + return qconfig + return fallback_qconfig + + +def _update_qconfig_for_fusion(model: GraphModule, qconfig_mapping: QConfigMapping): + """ + Update the QConfigMapping to account for fused modules such as LinearReLU. + This assumes the QConfigMapping's attributes have already been converted to OrderedDicts. + """ + object_type_dict = qconfig_mapping.object_type_qconfigs + if len(object_type_dict) == 0: + return qconfig_mapping + + modules = dict(model.named_modules()) + + for node in model.graph.nodes: + if node.op == 'call_module' and node.target in modules: + maybe_fused_module = modules[str(node.target)] + if not isinstance(maybe_fused_module, _FusedModule): + continue + + ops = list(maybe_fused_module._modules.values()) + fused_qconfig = object_type_dict.get(type(ops[0]), None) + + # Raise an error if the modules in the fused module have + # different qconfigs specified in the qconfig_dict + # TODO: currently it only works for modules, + # need to make this work for torch.nn.functional.relu + # TODO: currently it only works for object_type configurations, + # ideally it should work for different types of configurations, + # maybe we want to redesign this part + for op in ops[1:]: + if not qconfig_equals(object_type_dict.get(type(op), None), fused_qconfig): + raise LookupError( + "During fusion, we need to specify the same " + + f"qconfigs for all module types in {type(maybe_fused_module)} " + + f"offending type: {type(op)}") + + if fused_qconfig is not None: + object_type_dict[type(maybe_fused_module)] = fused_qconfig + +def _generate_node_name_to_qconfig( + root: torch.nn.Module, + modules: Dict[str, torch.nn.Module], + input_graph: Graph, + qconfig_mapping: QConfigMapping, + node_name_to_scope: Dict[str, Tuple[str, type]]) -> Dict[str, QConfigAny]: + global_qconfig = qconfig_mapping.global_qconfig + node_name_to_qconfig = {} + + # example: + # + # {'foo.bar': {F.linear: 0, F.conv2d: 1, ...}, ...} + # + # meaning in submodule 'foo.bar', we have seen 0 F.linear and + # 1 F.conv2d invocations so far. + submodule_to_object_type_to_cur_idx: Dict[str, Dict[Callable, int]] = \ + defaultdict(lambda: defaultdict(int)) + for node in input_graph.nodes: + qconfig = None + if node.op == "get_attr": + module_name, _ = _parent_name(node.target) + qconfig = _maybe_adjust_qconfig_for_module_type_or_name( + qconfig_mapping, type(modules[module_name]), module_name, global_qconfig) + qconfig_with_device_check = _add_module_to_qconfig_obs_ctr(qconfig, modules.get(node.target, None)) + elif node.op == "call_function": + # precedence: module_name_qconfig + # > function_qconfig > global_qconfig + # module_name takes precedence over function qconfig + function_qconfig = _get_object_type_qconfig( + qconfig_mapping, node.target, global_qconfig) + module_path, module_type = node_name_to_scope[node.name] + qconfig = _maybe_adjust_qconfig_for_module_type_or_name( + qconfig_mapping, module_type, module_path, function_qconfig) + + cur_object_type_idx = \ + submodule_to_object_type_to_cur_idx[module_path][node.target] + submodule_to_object_type_to_cur_idx[module_path][node.target] += 1 + qconfig = _maybe_adjust_qconfig_for_module_name_object_type_order( + qconfig_mapping, module_path, node.target, cur_object_type_idx, qconfig) + qconfig_with_device_check = _add_module_to_qconfig_obs_ctr(qconfig, modules.get(node.target, None)) + + elif node.op == "call_method": + module_path, module_type = node_name_to_scope[node.name] + # first use node.target (string) to get the qconfig + # this is to support configs like + # "object_type": [("reshape", qconfig)] + qconfig = _maybe_adjust_qconfig_for_module_type_or_name( + qconfig_mapping, node.target, module_path, global_qconfig) + # if there is no special config for the method, we'll fall back to the + # config for the module that contains the call_method node + qconfig = _maybe_adjust_qconfig_for_module_type_or_name( + qconfig_mapping, module_type, module_path, qconfig) + # currently call_method does not support modifying qconfig + # by order, we can add this later if it is needed. + qconfig_with_device_check = _add_module_to_qconfig_obs_ctr(qconfig, modules.get(node.target, None)) + + elif node.op == 'call_module': + # if the node is an observer, just continue - don't add it to the qconfig_map + if _is_activation_post_process(modules[node.target]): + continue + qconfig = _maybe_adjust_qconfig_for_module_type_or_name( + qconfig_mapping, type(modules[node.target]), node.target, global_qconfig) + + module_path, module_type = node_name_to_scope[node.name] + # Note: for call_module, the module_path is the current module's name. + # to meaningfully count invocations, we need to count them in the parent + # module. + parent_name, _ = _parent_name(module_path) + cur_object_type_idx = \ + submodule_to_object_type_to_cur_idx[parent_name][module_type] + submodule_to_object_type_to_cur_idx[parent_name][module_type] += 1 + qconfig = _maybe_adjust_qconfig_for_module_name_object_type_order( + qconfig_mapping, parent_name, module_type, cur_object_type_idx, + qconfig) + qconfig_with_device_check = _add_module_to_qconfig_obs_ctr(qconfig, modules.get(node.target, None)) + + # regex is not supported eager mode propagate_qconfig_, we'll + # need to set the qconfig explicitly here in case regex + # is used + modules[node.target].qconfig = qconfig_with_device_check + else: + qconfig_with_device_check = None + + node_name_to_qconfig[node.name] = qconfig_with_device_check + return node_name_to_qconfig + + +def _check_is_valid_config_dict(config_dict: Any, allowed_keys: Set[str], dict_name: str) -> None: + r""" Checks if the given config_dict has the correct keys + + Args: + `config_dict`: dictionary whose keys we want to check + """ + + for k in config_dict.keys(): + if k not in allowed_keys: + raise ValueError( + 'Expected ' + dict_name + ' to have the following keys: ' + + str(allowed_keys) + '. But found \'' + k + + '\' instead.') + + +def _compare_prepare_convert_qconfig_mappings( + prepare_qconfig_mapping: QConfigMapping, + convert_qconfig_mapping: QConfigMapping): + r""" Compare the qconfig_mapping passed in convert to the one from prepare and check the values + + Args: + `prepare_qconfig_mapping`: configuration for prepare quantization step + `convert_qconfig_mapping`: configuration for convert quantization step + """ + assert qconfig_equals(prepare_qconfig_mapping.global_qconfig, convert_qconfig_mapping.global_qconfig), \ + "Expected global qconfigs to be the same in the prepare and convert quantization configs" + prepare_dicts: List[OrderedDict] = [ + prepare_qconfig_mapping.object_type_qconfigs, + prepare_qconfig_mapping.module_name_qconfigs, + prepare_qconfig_mapping.module_name_regex_qconfigs, + ] + convert_dicts: List[OrderedDict] = [ + convert_qconfig_mapping.object_type_qconfigs, + convert_qconfig_mapping.module_name_qconfigs, + convert_qconfig_mapping.module_name_regex_qconfigs, + ] + dict_names = [_OBJECT_TYPE_DICT_KEY, _MODULE_NAME_DICT_KEY, _MODULE_NAME_REGEX_DICT_KEY] + for i in range(len(prepare_dicts)): + for name in prepare_dicts[i].keys(): + assert name in convert_dicts[i], f"Missing key {dict_names[i]} {name} in convert QConfigMapping \ + when it was present in prepare" + assert convert_dicts[i][name] is None \ + or qconfig_equals(prepare_dicts[i][name], convert_dicts[i][name]), \ + f"Expected convert QConfigMapping to have the same qconfig as prepare for key {dict_names[i]} {name}; \ + prepare: {prepare_dicts[i][name]}; convert: {convert_dicts[i][name]}" + +def _is_qconfig_supported_by_dtype_configs(qconfig: QConfig, dtype_configs: List[DTypeConfig]): + for dtype_config in dtype_configs: + is_dynamic = dtype_config.is_dynamic + if is_dynamic is None: + is_dynamic = False + input_dtype = dtype_config.input_dtype or torch.float + weight_dtype = dtype_config.weight_dtype or torch.float + bias_dtype = dtype_config.bias_dtype or torch.float + output_dtype = dtype_config.output_dtype or torch.float + qconfig_activation_dtype, qconfig_weight_dtype, qconfig_input_act_is_dynamic = \ + get_qconfig_dtypes(qconfig) + qconfig_bias_dtype = torch.float16 \ + if ( + qconfig_activation_dtype == torch.float16 + and qconfig_weight_dtype == torch.float16 + and not is_dynamic + ) else torch.float + + if is_dynamic: + is_match = qconfig_input_act_is_dynamic and \ + input_dtype == qconfig_activation_dtype and \ + output_dtype == torch.float and \ + weight_dtype == qconfig_weight_dtype + else: + is_match = input_dtype == qconfig_activation_dtype and \ + output_dtype == qconfig_activation_dtype and \ + weight_dtype == qconfig_weight_dtype and \ + bias_dtype == qconfig_bias_dtype + if is_match: + return True + return False + +def _get_object_type_qconfig( + qconfig_mapping: QConfigMapping, + object_type: Union[Callable, str], + fallback_qconfig: QConfigAny) -> QConfigAny: + return qconfig_mapping.object_type_qconfigs.get(object_type, fallback_qconfig) + + +def _get_module_name_regex_qconfig(qconfig_mapping, module_name, fallback_qconfig): + for regex_pattern, qconfig in qconfig_mapping.module_name_regex_qconfigs.items(): + if re.match(regex_pattern, module_name): + # first match wins + return qconfig + return fallback_qconfig + + +def _get_module_name_qconfig(qconfig_mapping, module_name, fallback_qconfig): + if module_name == '': + # module name qconfig not found + return fallback_qconfig + if module_name in qconfig_mapping.module_name_qconfigs: + return qconfig_mapping.module_name_qconfigs[module_name] + else: + parent, _ = _parent_name(module_name) + return _get_module_name_qconfig(qconfig_mapping, parent, fallback_qconfig) + + +def _maybe_adjust_qconfig_for_module_type_or_name(qconfig_mapping, module_type, module_name, global_qconfig): + # get qconfig for module_name, + # fallback to module_name_regex_qconfig, module_type_qconfig, + # global_qconfig if necessary + module_type_qconfig = _get_object_type_qconfig( + qconfig_mapping, module_type, global_qconfig) + module_name_regex_qconfig = _get_module_name_regex_qconfig( + qconfig_mapping, module_name, module_type_qconfig) + module_name_qconfig = _get_module_name_qconfig( + qconfig_mapping, module_name, module_name_regex_qconfig) + return module_name_qconfig + + +def _get_flattened_qconfig_dict(qconfig_mapping: QConfigMapping) -> Dict[Union[Callable, str], QConfigAny]: + """ flatten the global, object_type and module_name qconfig + to the same qconfig_dict so that it can be used by + propagate_qconfig_ function. + "module_name_regex" is ignored for now since it's not supported + in propagate_qconfig_, but it can be fixed later. + + For example: + Input: { + "": qconfig, + "object_type": [ + (torch.add, qconfig) + ], + "module_name": [ + ("conv", qconfig) + ] + } + + Output: { + "": qconfig, + torch.add: qconfig, + "conv": qconfig + } + """ + flattened: Dict[Union[Callable, str], QConfigAny] = {"": qconfig_mapping.global_qconfig} + for obj, qconfig in qconfig_mapping.object_type_qconfigs.items(): + flattened[obj] = qconfig + for obj, qconfig in qconfig_mapping.module_name_qconfigs.items(): + flattened[obj] = qconfig + return flattened + + +def _update_qconfig_for_qat( + qconfig_mapping: QConfigMapping, + backend_config: BackendConfig): + """ + Update the qconfig_mapping to account for module swaps during QAT. + During QAT we perform a module swap on the nn.Module types to the corresponding nn.qat.modules types. + """ + module_to_qat_module_class = get_module_to_qat_module(backend_config) + object_type_dict = qconfig_mapping.object_type_qconfigs + new_object_type_dict = object_type_dict.copy() + for k, v in new_object_type_dict.items(): + if k in module_to_qat_module_class: + object_type_dict[module_to_qat_module_class[k]] = v diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/utils.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5029db47961f0340e4fbabdd9cef59bf92ecf7e7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/fx/utils.py @@ -0,0 +1,874 @@ +# mypy: allow-untyped-defs +import copy +import torch +import torch.nn as nn +from torch.ao.quantization import ( + QConfigAny, + QuantType, +) +from torch.ao.quantization.backend_config import ( + DTypeWithConstraints, +) +from torch.ao.quantization.fake_quantize import ( + FakeQuantizeBase, + FixedQParamsFakeQuantize, +) +from torch.ao.quantization.observer import ( + FixedQParamsObserver, + ObserverBase, +) +from torch.ao.quantization.qconfig import ( + float16_static_qconfig, + float16_dynamic_qconfig, + qconfig_equals, +) +from torch.ao.quantization.stubs import DeQuantStub +from torch.ao.quantization.utils import ( + _assert_and_get_unique_device, + activation_is_statically_quantized, +) +from torch.ao.quantization.observer import _is_activation_post_process +from torch.ao.quantization.qconfig_mapping import QConfigMapping + +from torch.fx import GraphModule, map_arg + +from torch.fx.graph import ( + Graph, + Node, +) +from .custom_config import PrepareCustomConfig +# importing the lib so that the quantized_decomposed ops are registered +from ._decomposed import quantized_decomposed_lib # noqa: F401 + +from typing import Callable, Optional, List, Dict, Any, Set, Tuple, Union, Type +from dataclasses import dataclass +from collections import namedtuple +import operator +import warnings + +# TODO: revisit this list. Many helper methods shouldn't be public +__all__ = [ + "all_node_args_except_first", + "all_node_args_have_no_tensors", + "assert_and_get_unique_device", + "collect_producer_nodes", + "create_getattr_from_value", + "create_node_from_old_node_preserve_meta", + "EMPTY_ARG_DICT", + "get_custom_module_class_keys", + "get_linear_prepack_op_for_dtype", + "get_new_attr_name_with_prefix", + "get_non_observable_arg_indexes_and_types", + "get_qconv_prepack_op", + "get_skipped_module_name_and_classes", + "graph_module_from_producer_nodes", + "maybe_get_next_module", + "NodeInfo", + "node_arg_is_bias", + "node_arg_is_weight", + "NON_OBSERVABLE_ARG_DICT", + "NON_QUANTIZABLE_WEIGHT_OPS", + "return_arg_list", + "ObservedGraphModuleAttrs", +] + +NON_QUANTIZABLE_WEIGHT_OPS = {torch.nn.functional.layer_norm, torch.nn.functional.group_norm, torch.nn.functional.instance_norm} + +@dataclass +class ObservedGraphModuleAttrs: + node_name_to_qconfig: Dict[str, QConfigAny] + node_name_to_scope: Dict[str, Tuple[str, type]] + prepare_custom_config: PrepareCustomConfig + equalization_node_name_to_qconfig: Dict[str, Any] + qconfig_mapping: QConfigMapping + is_qat: bool + observed_node_names: Set[str] + is_observed_standalone_module: bool = False + standalone_module_input_quantized_idxs: Optional[List[int]] = None + standalone_module_output_quantized_idxs: Optional[List[int]] = None + +def node_arg_is_weight(node: Node, arg: Any) -> bool: + """Returns if node arg is weight""" + weight_index = None + if "target_dtype_info" in node.meta: + weight_index = node.meta["target_dtype_info"].get("weight_index", None) + if weight_index is not None and weight_index < len(node.args) and node.args[weight_index] is arg: + return True + return node.kwargs.get("weight") is arg + +def node_arg_is_bias(node: Node, arg: Any) -> bool: + """Returns if node arg is bias""" + bias_index = None + if "target_dtype_info" in node.meta: + bias_index = node.meta["target_dtype_info"].get("bias_index", None) + if bias_index is not None and bias_index < len(node.args) and node.args[bias_index] is arg: + return True + return node.kwargs.get("bias") is arg + +def get_custom_module_class_keys(custom_module_mapping: Dict[QuantType, Dict[Type, Type]]) -> List[Any]: + r""" Get all the unique custom module keys in the custom config dict + e.g. + Input: + { + QuantType.STATIC: { + CustomModule1: ObservedCustomModule + }, + QuantType.DYNAMIC: { + CustomModule2: DynamicObservedCustomModule + }, + QuantType.WEIGHT_ONLY: { + CustomModule3: WeightOnlyObservedCustomModule + }, + } + + Output: + # extract the keys across all inner STATIC, DYNAMIC, and WEIGHT_ONLY dicts + [CustomModule1, CustomModule2, CustomModule3] + """ + # using set to dedup + float_custom_module_classes : Set[Any] = set() + for quant_mode in [QuantType.STATIC, QuantType.DYNAMIC, QuantType.WEIGHT_ONLY]: + quant_mode_custom_module_config = custom_module_mapping.get(quant_mode, {}) + quant_mode_custom_module_classes = set(quant_mode_custom_module_config.keys()) + float_custom_module_classes |= quant_mode_custom_module_classes + return list(float_custom_module_classes) + +def get_linear_prepack_op_for_dtype(dtype): + if dtype == torch.float16: + return torch.ops.quantized.linear_prepack_fp16 + elif dtype == torch.qint8: + return torch.ops.quantized.linear_prepack + else: + raise Exception("can't get linear prepack op for dtype:", dtype) # noqa: TRY002 + +def get_qconv_prepack_op(conv_op: Callable) -> Callable: + prepack_ops = { + torch.nn.functional.conv1d: torch.ops.quantized.conv1d_prepack, + torch.nn.functional.conv2d: torch.ops.quantized.conv2d_prepack, + torch.nn.functional.conv3d: torch.ops.quantized.conv3d_prepack, + torch.nn.functional.conv_transpose1d: torch.ops.quantized.conv_transpose1d_prepack, + torch.nn.functional.conv_transpose2d: torch.ops.quantized.conv_transpose2d_prepack, + torch.nn.functional.conv_transpose3d: torch.ops.quantized.conv_transpose3d_prepack, + } + prepack_op = prepack_ops.get(conv_op, None) + assert prepack_op, f"Didn't find prepack op for {conv_op}" + return prepack_op + +# Returns a function that can get a new attribute name for module with given +# prefix, for example, +# >> get_new_observer_name = get_new_attr_name_with_prefix('_observer') +# >> new_name = get_new_observer_name(module) +# new_name will be an unused attribute name on module, e.g. `_observer_1` +def get_new_attr_name_with_prefix(prefix: str) -> Callable: + prefix = prefix.replace(".", "_") + + def get_new_attr_name(module: torch.nn.Module): + def get_attr_name(i: int): + return prefix + str(i) + i = 0 + attr_name = get_attr_name(i) + while hasattr(module, attr_name): + i += 1 + attr_name = get_attr_name(i) + return attr_name + return get_new_attr_name + +def collect_producer_nodes(node: Node) -> Optional[List[Node]]: + r''' Starting from a target node, trace back until we hit inpu or + getattr node. This is used to extract the chain of operators + starting from getattr to the target node, for example + def forward(self, x): + observed = self.observer(self.weight) + return F.linear(x, observed) + collect_producer_nodes(observed) will either return a list of nodes that + produces the observed node or None if we can't extract a self contained + graph without free variables(inputs of the forward function). + ''' + nodes = [node] + frontier = [node] + while frontier: + node = frontier.pop() + all_args = list(node.args) + list(node.kwargs.values()) + for arg in all_args: + if not isinstance(arg, Node): + continue + if arg.op == 'placeholder': + # hit input, can't fold in this case + return None + nodes.append(arg) + if not (arg.op == 'call_function' and arg.target == getattr): + frontier.append(arg) + return nodes + +def graph_module_from_producer_nodes( + root: GraphModule, producer_nodes: List[Node]) -> GraphModule: + r''' Construct a graph module from extracted producer nodes + from `collect_producer_nodes` function + Args: + root: the root module for the original graph + producer_nodes: a list of nodes we use to construct the graph + Return: + A graph module constructed from the producer nodes + ''' + assert len(producer_nodes) > 0, 'list of producer nodes can not be empty' + # since we traced back from node to getattr + producer_nodes.reverse() + graph = Graph() + env: Dict[Any, Any] = {} + + def load_arg(a): + return map_arg(a, lambda node: env[node]) + for producer_node in producer_nodes: + env[producer_node] = graph.node_copy(producer_node, load_arg) + graph.output(load_arg(producer_nodes[-1])) + graph_module = GraphModule(root, graph) + return graph_module + +# TODO: delete +def assert_and_get_unique_device(module: torch.nn.Module) -> Any: + """ + Returns the unique device for a module, or None if no device is found. + Throws an error if multiple devices are detected. + """ + return _assert_and_get_unique_device(module) + +def create_getattr_from_value(module: torch.nn.Module, graph: Graph, prefix: str, value: Any) -> Node: + """ + Given a value of any type, creates a getattr node corresponding to the value and + registers the value as a buffer to the module. + """ + get_new_attr_name = get_new_attr_name_with_prefix(prefix) + attr_name = get_new_attr_name(module) + device = assert_and_get_unique_device(module) + new_value = value.clone().detach() if isinstance(value, torch.Tensor) \ + else torch.tensor(value, device=device) + module.register_buffer(attr_name, new_value) + # Create get_attr with value + attr_node = graph.create_node("get_attr", attr_name) + return attr_node + +def all_node_args_have_no_tensors(node: Node, modules: Dict[str, torch.nn.Module], cache: Dict[Node, bool]) -> bool: + """ + If we know for sure that all of this node's args have no + tensors (are primitives), return True. If we either + find a tensor or are not sure, return False. Note: this + function is not exact. + """ + if cache and node in cache: + return cache[node] + + result = False # will be overwritten + if not isinstance(node, Node): + result = True + elif node.op == 'placeholder': + result = False + elif node.op == 'call_module': + assert isinstance(node.target, str) + if _is_activation_post_process(modules[node.target]): + result = all_node_args_have_no_tensors(node.args[0], modules, cache) # type: ignore[arg-type] + elif node.op == 'call_module': + result = False + elif node.op == 'call_function' and node.target is operator.getitem: + result = all_node_args_have_no_tensors(node.args[0], modules, cache) # type: ignore[arg-type] + elif node.op == 'get_attr': + result = False + elif node.target is getattr and node.args[1] in ['ndim', 'shape']: + # x1 = x0.ndim + result = True + elif node.op == 'call_method' and node.target == 'size': + # x1 = x0.size(0) + result = True + else: + found_one_tensor = False + for arg in node.args: + if isinstance(arg, list): + for list_el in arg: + if isinstance(list_el, Node): + this_list_el_args_have_no_tensors = \ + all_node_args_have_no_tensors(list_el, modules, cache) + found_one_tensor = found_one_tensor or \ + (not this_list_el_args_have_no_tensors) + # If found_one_tensor is True, there is no point in + # recursing further as the end result will always + # be True. + # TODO(future PR): remove this entire function and + # change to dtype inference without recursion. + if found_one_tensor: + result = not found_one_tensor + if cache: + cache[node] = result + return result + elif isinstance(arg, int): + pass + else: + if isinstance(arg, Node): + this_arg_args_have_no_tensors = all_node_args_have_no_tensors(arg, modules, cache) + found_one_tensor = found_one_tensor or \ + (not this_arg_args_have_no_tensors) + # If found_one_tensor is True, there is no point in + # recursing further as the end result will always + # be True. + # TODO(future PR): remove this entire function and + # change to dtype inference without recursion. + if found_one_tensor: + result = not found_one_tensor + if cache: + cache[node] = result + return result + else: + found_one_tensor = True + result = not found_one_tensor + if cache: + cache[node] = result + return result + +def all_node_args_except_first(node: Node) -> List[int]: + """ + Returns all node arg indices after first + """ + return list(range(1, len(node.args))) + +def return_arg_list(arg_indices: List[int]) -> Callable[[Node], List[int]]: + """ + Constructs a function that takes a node as arg and returns the arg_indices + that are valid for node.args + """ + def arg_indices_func(node: Node) -> List[int]: + return [i for i in arg_indices if i < len(node.args)] + return arg_indices_func + +NodeInfo = namedtuple("NodeInfo", "op target") + +# this dict identifies which indices of a node are non tensors +# so that they can be propagated correctly since inserting observers +# for them would cause errors + +NON_OBSERVABLE_ARG_DICT: Dict[NodeInfo, Dict[Union[type, torch.dtype], Callable[[Node], List[int]]]] = { + NodeInfo("call_method", "masked_fill") : { + torch.bool: return_arg_list([1]), + float: return_arg_list([2]) + }, + NodeInfo("call_method", "permute") : { + int: all_node_args_except_first + }, + NodeInfo("call_method", "repeat") : { + int: all_node_args_except_first + }, + NodeInfo("call_method", "reshape") : { + int: all_node_args_except_first + }, + NodeInfo("call_method", "size") : { + int: return_arg_list([1]) + }, + NodeInfo("call_method", "transpose") : { + int: all_node_args_except_first + }, + NodeInfo("call_method", torch.transpose) : { + int: all_node_args_except_first + }, + NodeInfo("call_method", "unsqueeze") : { + int: return_arg_list([1]) + }, + NodeInfo("call_method", "unsqueeze_") : { + int: return_arg_list([1]) + }, + NodeInfo("call_method", torch.unsqueeze) : { + int: return_arg_list([1]) + }, + NodeInfo("call_method", "view") : { + int: all_node_args_except_first + }, +} + +EMPTY_ARG_DICT: Dict[Union[type, torch.dtype], Callable[[Node], List[int]]] = {} + +def get_non_observable_arg_indexes_and_types(node: Node) -> Dict[Union[type, torch.dtype], Callable[[Node], List[int]]]: + """ + Returns a dict with of non float tensor types as keys and values which correspond to a + function to retrieve the list (which takes the node as an argument) + """ + info = NodeInfo(node.op, node.target) + + return NON_OBSERVABLE_ARG_DICT.get(info, EMPTY_ARG_DICT) + +def maybe_get_next_module( + node: Node, + modules: Dict[str, nn.Module], + target_module_type: Optional[Type[nn.Module]] = None, + target_functional_type: Any = None, +) -> Optional[Node]: + """ Gets the next module that matches what is needed in + is_target_module_type if it exists + + Args: + node: The node whose users we want to look at + target_module_type: Module type that we want to check + target_functional_type: Functional type that we want to check + """ + + for user in node.users.keys(): + if user.op == 'call_module' and target_module_type is not None and \ + isinstance(modules[str(user.target)], target_module_type): + return user + elif (user.op == 'call_function' and target_functional_type is not None and + user.target == target_functional_type): + return user + + return None + +def create_node_from_old_node_preserve_meta( + quantized_graph: Graph, + create_node_args: Tuple[Any, ...], + old_node: Node, +) -> Node: + """ + Creates `new_node` and copies the necessary metadata to it from `old_node`. + """ + new_node = quantized_graph.create_node(*create_node_args) + new_node.stack_trace = old_node.stack_trace + return new_node + +def get_skipped_module_name_and_classes( + prepare_custom_config: PrepareCustomConfig, + is_standalone_module: bool) -> Tuple[List[str], List[Type[Any]]]: + skipped_module_names = copy.copy(prepare_custom_config.non_traceable_module_names) + skipped_module_classes = copy.copy(prepare_custom_config.non_traceable_module_classes) + if not is_standalone_module: + # standalone module and custom module config are applied in top level module + skipped_module_names += list(prepare_custom_config.standalone_module_names.keys()) + skipped_module_classes += list(prepare_custom_config.standalone_module_classes.keys()) + skipped_module_classes += get_custom_module_class_keys(prepare_custom_config.float_to_observed_mapping) + + return skipped_module_names, skipped_module_classes + +def _is_custom_module_lstm( + node: Node, + named_modules: Dict[str, torch.nn.Module], + qconfig: QConfigAny = None, + # QuantizeHandler, but we cannot include the type here due to circular imports + qhandler: Optional[Any] = None, +) -> bool: + """ + Return whether this refers to the custom module LSTM flow. + """ + mod = _get_module(node, named_modules) + if qconfig is not None and qhandler is not None: + assert isinstance(qhandler, torch.ao.quantization.fx.quantize_handler.QuantizeHandler) # type: ignore[attr-defined] + return isinstance(mod, torch.nn.LSTM) and \ + activation_is_statically_quantized(qconfig) and \ + qhandler.is_custom_module() + else: + return isinstance(mod, torch.ao.nn.quantizable.LSTM) + +def _is_custom_module_mha( + node: Node, + named_modules: Dict[str, torch.nn.Module], + qconfig: QConfigAny = None, + # QuantizeHandler, but we cannot include the type here due to circular imports + qhandler: Optional[Any] = None, +) -> bool: + """ + Return whether this refers to the custom module MultiheadAttention flow. + """ + mod = _get_module(node, named_modules) + if qconfig is not None and qhandler is not None: + assert isinstance(qhandler, torch.ao.quantization.fx.quantize_handler.QuantizeHandler) # type: ignore[attr-defined] + return isinstance(mod, torch.nn.MultiheadAttention) and \ + activation_is_statically_quantized(qconfig) and \ + qhandler.is_custom_module() + else: + return isinstance(mod, torch.ao.nn.quantizable.MultiheadAttention) + +def _get_module(node: Node, named_modules: Dict[str, torch.nn.Module]) -> Optional[torch.nn.Module]: + """ + If `node` refers to a call_module node, return the module, else None. + """ + if node.op == "call_module" and str(node.target) in named_modules: + return named_modules[str(node.target)] + else: + return None + +def _insert_dequant_stub( + node: Node, + model: torch.nn.Module, + named_modules: Dict[str, torch.nn.Module], + graph: Graph, +) -> Node: + """ + Attach a `DeQuantStub` to the model and create a node that calls this + `DeQuantStub` on the output of `node`, similar to how observers are inserted. + """ + prefix = "dequant_stub_" + get_new_dequant_stub_name = get_new_attr_name_with_prefix(prefix) + dequant_stub_name = get_new_dequant_stub_name(model) + dequant_stub = DeQuantStub() + setattr(model, dequant_stub_name, dequant_stub) + named_modules[dequant_stub_name] = dequant_stub + with graph.inserting_after(node): + return graph.call_module(dequant_stub_name, (node,)) + +def _insert_dequant_stubs_for_custom_module_lstm_output( + node: Node, + model: torch.nn.Module, + named_modules: Dict[str, torch.nn.Module], + graph: Graph, +) -> Node: + """ + Insert DeQuantStubs after each internal output node of custom module LSTM. + + Custom module LSTM outputs are nested tuples of the structure (output, (hidden0, hidden1)), + Since we cannot dequantize a tuple as a whole, we must first break down the tuple into its + components through `getitem`. This function transforms the graph as follows: + + (1) Split the LSTM node into (output, (hidden0, hidden1)) + (2) Insert a DeQuantStub after each internal node + (3) Recombine the DeQuantStubs into the same structure as before + (4) Reroute all consumers of the original LSTM node and its sub-nodes + (e.g. lstm[0]) + + Before: + lstm_output + | + v + original_user(s) + After: + lstm_output + / \\ + / (getitem) \\ + / \\ + v v + output hidden + | / \\ + (DeQuantStub) (getitem) + | / \\ + v v v + output_dq hidden0 hidden1 + | | | + | (DeQuantStub) (DeQuantStub) + | | | + | v v + | hidden0_dq hidden1_dq + | \\ / + | (tuple) + | \\ / + | v v + | hidden_dq + \\ / + \\ (tuple) / + v v + lstm_output_dq + | + v + original_user(s) + + For step (4), reroute all users of the original LSTM node(s) as follows: + lstm_output -> lstm_output_dq + lstm_output[0] -> output_dq + lstm_output[1] -> hidden_dq + lstm_output[1][0] -> hidden0_dq + lstm_output[1][1] -> hidden1_dq + + Return the node `lstm_output_dq`. + """ + # (1) Split the LSTM node into (output, (hidden0, hidden1)) + # (2) Insert a DeQuantStub after each internal node + with graph.inserting_after(node): + output = graph.call_function(operator.getitem, (node, 0)) + output_dq = _insert_dequant_stub(output, model, named_modules, graph) + with graph.inserting_after(output_dq): + hidden = graph.call_function(operator.getitem, (node, 1)) + with graph.inserting_after(hidden): + hidden0 = graph.call_function(operator.getitem, (hidden, 0)) + hidden0_dq = _insert_dequant_stub(hidden0, model, named_modules, graph) + with graph.inserting_after(hidden0_dq): + hidden1 = graph.call_function(operator.getitem, (hidden, 1)) + hidden1_dq = _insert_dequant_stub(hidden1, model, named_modules, graph) + + # (3) Recombine the DeQuantStubs into the same structure as before + with graph.inserting_after(hidden1_dq): + hidden_dq = graph.call_function(tuple, ([hidden0_dq, hidden1_dq],)) + with graph.inserting_after(hidden_dq): + lstm_output_dq = graph.call_function(tuple, ([output_dq, hidden_dq],)) + + # (4) Reroute all consumers of the original LSTM node and its sub-nodes + for user in list(node.users.keys()): + if user != output and user != hidden: + user.replace_input_with(node, lstm_output_dq) + # The getitem and tuple nodes we added here may interfere with reference quantized + # pattern matching, so we need to redirect the consumers of internal nodes to the + # corresponding nodes with DeQuantStubs (e.g. lstm_output_dq[0] -> output_dq) attached, + # in order to preserve reference patterns like "dequantize - consumer - quantize". + _reroute_tuple_getitem_pattern(graph) + return lstm_output_dq + +def _maybe_get_custom_module_lstm_from_node_arg( + arg: Node, + named_modules: Dict[str, torch.nn.Module], +) -> Optional[Node]: + """ + Given an argument of a node, if the argument refers to the path through which the node + is a consumer of custom module LSTM, return the custom module LSTM node, or None otherwise. + + This is used to determine whether a node is a consumer of custom module LSTM, and, if so, + skip inserting input observers for this node. This is because custom module LSTM produces + quantized outputs, so inserting an input observer for the consumer of custom module LSTM + would unnecessarily quantize the outputs again. + + lstm -> consumer + + In practice, however, custom module LSTM outputs a tuple (output, (hidden0, hidden1)) with + DeQuantStubs attached to each internal node (see `_insert_dequant_stubs_for_custom_module_lstm_output`). + This tuple can be consumed in one of four ways: + + lstm -> getitem -> DeQuantStub -> consumer # consume lstm[0] + lstm -> getitem -> getitem -> DeQuantStub -> tuple -> consumer # consume lstm[1] + lstm -> getitem -> getitem -> DeQuantStub -> consumer # consume lstm[1][0] or lstm[1][1] + lstm -> getitem -> DeQuantStub -> tuple -> consumer # consume lstm + + Thus, we must match against the above patterns instead of simply checking the parent node + to determine whether this node is a consumer of a custom module LSTM. + """ + def match_dq(a): + return isinstance(_get_module(a, named_modules), DeQuantStub) + + def match_lstm(a): + return _is_custom_module_lstm(a, named_modules) + + def match_getitem(a): + return a.op == "call_function" and a.target == operator.getitem + + def match_tuple(a): + return a.op == "call_function" and a.target == tuple + + def _match_pattern(match_pattern: List[Callable]) -> Optional[Node]: + """ + Traverse up the graph and match the args one by one. + If there is a match, return the last matched node, or None otherwise. + """ + a = arg + for i, match in enumerate(match_pattern): + if not match(a): + return None + # Match next arg, for tuple the arg is a tuple of a list, e.g. ([dq_1, other_node],) + if i < len(match_pattern) - 1: + if match == match_tuple: + a = a.args[0][0] # type: ignore[assignment,index] + else: + a = a.args[0] # type: ignore[assignment] + return a + + all_match_patterns = [ + [match_dq, match_getitem, match_lstm], + [match_tuple, match_dq, match_getitem, match_getitem, match_lstm], + [match_dq, match_getitem, match_getitem, match_lstm], + [match_tuple, match_dq, match_getitem, match_lstm], + ] + + for p in all_match_patterns: + matched_node = _match_pattern(p) + if matched_node is not None: + return matched_node + return None + +def _reroute_tuple_getitem_pattern(graph: Graph): + """ + Search for patterns where N consecutive `tuple` call_function nodes are followed by + N consecutive `getitem` call_function nodes that are "reverses" of the `tuple` nodes. + If we find this pattern, reroute the consumers of the last `getitem` to skip these + N `tuple` and `getitem` nodes. + + Before: + + a b c + | \\ / + \\ tuple + \\ / + tuple + | + getitem(1) + | + getitem(0) + | + d + + After: + + b + | + d + """ + def find_patterns( + node: Node, + index_stack: List[int], + current_pattern: List[Node], + matched_patterns: List[List[Node]], + seen: Set[Tuple[Node, Tuple[int, ...]]]): + """ + Traverse the graph recursively to match for the N-tuple - N-getitem patterns, + starting at the given node. + + We use a stack to keep track of the expected `getitem` indices, since these are + reversed from the `tuple` indices. In the above example, the stack after + (b -> tuple -> tuple) will be [0, 1], which will be popped by getitem(1) first + and then by getitem(0). + + TODO: traverse upwards from the output and handle the case when tuple is not a + separate node, e.g. graph.call_function(operator.getitem, args=(a, (b, c))) + """ + if len(index_stack) == 0 and len(current_pattern) > 0: + matched_patterns.append(copy.copy(current_pattern)) + current_pattern.clear() + + # Avoid duplicating work + state = (node, tuple(index_stack)) + if state in seen: + return + seen.add(state) + + # Iterate through users of this node to find tuple/getitem nodes to match + for user in node.users: + if user.op == "call_function" and user.target == tuple: + for i, user_arg in enumerate(user.args[0]): # type: ignore[arg-type] + if user_arg == node: + index_stack.append(i) + current_pattern.append(user) + find_patterns(user, index_stack, current_pattern, matched_patterns, seen) + elif user.op == "call_function" and user.target == operator.getitem: + if len(index_stack) > 0: + if user.args[1] == index_stack[-1]: + index_stack.pop() + current_pattern.append(user) + find_patterns(user, index_stack, current_pattern, matched_patterns, seen) + return matched_patterns + + # Collect all matched patterns + matched_patterns: List[List[Node]] = [] + seen: Set[Tuple[Node, Tuple[int, ...]]] = set() # (node, index_stack) + for node in graph.nodes: + find_patterns(node, [], [], matched_patterns, seen) + + # For each pattern, redirect all consumers of the last getitem node to the correct input + # of the first tuple node + for pattern in matched_patterns: + first_tuple = pattern[0] + last_getitem = pattern[-1] + assert first_tuple.op == "call_function" and first_tuple.target == tuple + assert last_getitem.op == "call_function" and last_getitem.target == operator.getitem + last_getitem_index = last_getitem.args[1] + new_input = first_tuple.args[0][last_getitem_index] # type: ignore[index] + for user in list(last_getitem.users.keys()): + user.replace_input_with(last_getitem, new_input) + +def _get_observer_from_activation_post_process( + activation_post_process: Union[ObserverBase, FakeQuantizeBase], +) -> ObserverBase: + """ + If `activation_post_process` is an observer, return the observer. + If `activation_post_process` is a fake quantize, return the internal observer. + """ + if isinstance(activation_post_process, ObserverBase): + return activation_post_process + else: + assert isinstance(activation_post_process, FakeQuantizeBase) + return activation_post_process.activation_post_process # type: ignore[return-value] + +def _qconfig_satisfies_dtype_config_constraints( + qconfig: QConfigAny, + dtype_with_constraints: DTypeWithConstraints, + is_activation: bool = True) -> bool: + """ + Return whether `qconfig` satisfies the following constraints from the backend, + specified through the activation and weight DTypeWithConstraints. + + 1. QConfig specified a quantization range that falls within the backend's, if any + 2. QConfig specified a min scale value that is >= the backend's, if any + 3. QConfig specified a FixedQParamsObserver or FixedQParamsFakeQuantize that has + scale and zero point that match the backend's, if any + + If `is_activation` is True, we check `qconfig.activation`, else we check `qconfig.weight`. + If `qconfig` or `dtype_with_constraints.dtype` is None, or the dtypes do not match, return True. + """ + # TODO: log warnings only when the user enabled a debug flag + def _activation_post_process_satisfies_dtype_config_constraints( + activation_post_process: Union[ObserverBase, FakeQuantizeBase], + dtype_with_constraints: DTypeWithConstraints, + debug_string: str) -> bool: + observer = _get_observer_from_activation_post_process(activation_post_process) + app_quant_min = getattr(observer, "quant_min", None) + app_quant_max = getattr(observer, "quant_max", None) + # TODO: for now, just use the existing eps value as scale_min. In the future, we should + # resolve the differences between the two, either by renaming eps or some other way + app_scale_min = getattr(observer, "eps", None) + backend_quant_min = dtype_with_constraints.quant_min_lower_bound + backend_quant_max = dtype_with_constraints.quant_max_upper_bound + backend_scale_min = dtype_with_constraints.scale_min_lower_bound + backend_scale_exact_match = dtype_with_constraints.scale_exact_match + backend_zero_point_exact_match = dtype_with_constraints.zero_point_exact_match + # check quantization ranges + if backend_quant_min is not None and backend_quant_max is not None: + if app_quant_min is None or app_quant_max is None: + warnings.warn(f"QConfig {debug_string} must specify 'quant_min' and 'quant_max', ignoring {qconfig}") + return False + elif app_quant_min < backend_quant_min or app_quant_max > backend_quant_max: + warnings.warn( + f"QConfig {debug_string} quantization range must fall within the backend's:\n" + f"QConfig range = ({app_quant_min}, {app_quant_max}), " + f"BackendConfig range = ({backend_quant_min}, {backend_quant_max}), " + f"ignoring {qconfig}" + ) + return False + # check scale min + if backend_scale_min is not None: + if app_scale_min is None: + warnings.warn(f"QConfig {debug_string} must specify 'eps', ignoring {qconfig}") + return False + if app_scale_min < backend_scale_min: + warnings.warn( + f"QConfig {debug_string} eps ({app_scale_min}) must be greater than or equal to " + f"the backend's min scale value ({backend_scale_min}), ignoring {qconfig}" + ) + return False + # check fixed scale and zero point + if backend_scale_exact_match is not None and backend_zero_point_exact_match is not None: + # For tests only, accept the following qconfigs for now + # TODO: handle fp16 qconfigs properly + for accepted_qconfig in [float16_static_qconfig, float16_dynamic_qconfig]: + if qconfig_equals(qconfig, accepted_qconfig): + return True + suggestion_str = ( + "Please use torch.ao.quantization.get_default_qconfig_mapping or " + "torch.ao.quantization.get_default_qat_qconfig_mapping. Example:\n" + ' qconfig_mapping = get_default_qconfig_mapping("fbgemm")\n' + " model = prepare_fx(model, qconfig_mapping, example_inputs)" + ) + if not isinstance(activation_post_process, FixedQParamsObserver) and \ + not isinstance(activation_post_process, FixedQParamsFakeQuantize): + warnings.warn( + f"QConfig must specify a FixedQParamsObserver or a FixedQParamsFakeQuantize " + f"for fixed qparams ops, ignoring {qconfig}.\n{suggestion_str}" + ) + return False + if observer.scale != backend_scale_exact_match or observer.zero_point != backend_zero_point_exact_match: + warnings.warn( + f"QConfig fixed scale ({observer.scale}) and zero point ({observer.zero_point}) " + f"do not match the backend's ({backend_scale_exact_match} and {backend_zero_point_exact_match}), " + f"ignoring {qconfig}.\n{suggestion_str}" + ) + return False + return True + + if qconfig is None or dtype_with_constraints.dtype is None: + return True + + activation_post_process_ctr = qconfig.activation if is_activation else qconfig.weight + debug_string = "activation" if is_activation else "weight" + satisfies_constraints = True + if activation_post_process_ctr is not None: + activation_post_process = activation_post_process_ctr() + assert _is_activation_post_process(activation_post_process) + # If dtypes don't match, don't check the activation_post_process and return True early + if activation_post_process.dtype != dtype_with_constraints.dtype: + return True + satisfies_constraints = _activation_post_process_satisfies_dtype_config_constraints( + activation_post_process, dtype_with_constraints, debug_string) + return satisfies_constraints diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/observer.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/observer.py new file mode 100644 index 0000000000000000000000000000000000000000..656372d37555f21aaade7219f1cbcaa408f503d8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/observer.py @@ -0,0 +1,1705 @@ +# mypy: allow-untyped-defs +""" +This module implements observers which are used to collect statistics about +the values observed during calibration (PTQ) or training (QAT). +""" + +import re +import warnings +from abc import ABCMeta, abstractmethod +from collections import OrderedDict +from functools import partial +from typing import Any, List, Tuple, Optional, Dict + +import torch +import torch.nn as nn +from torch.ao.quantization.utils import ( + check_min_max_valid, calculate_qmin_qmax, is_per_tensor, is_per_channel, validate_qmin_qmax) + +__all__ = [ + "default_affine_fixed_qparams_observer", + "default_debug_observer", + "default_dynamic_quant_observer", + "default_fixed_qparams_range_0to1_observer", + "default_fixed_qparams_range_neg1to1_observer", + "default_float_qparams_observer", + "default_float_qparams_observer_4bit", + "default_histogram_observer", + "default_observer", + "default_per_channel_weight_observer", + "default_placeholder_observer", + "default_reuse_input_observer", + "default_symmetric_fixed_qparams_observer", + "default_weight_observer", + "get_observer_state_dict", + "load_observer_state_dict", + "per_channel_weight_observer_range_neg_127_to_127", + "weight_observer_range_neg_127_to_127", + "FixedQParamsObserver", + "HistogramObserver", + "MinMaxObserver", + "MovingAverageMinMaxObserver", + "MovingAveragePerChannelMinMaxObserver", + "NoopObserver", + "ObserverBase", + "PerChannelMinMaxObserver", + "PlaceholderObserver", + "RecordingObserver", + "ReuseInputObserver", + "UniformQuantizationObserverBase", +] + + +class _PartialWrapper: + def __init__(self, p): + self.p = p + self.callable_args = {} + + def __call__(self, *args, **keywords): + # call each arg in callable_args and add them partial, then run with keywords + # skip if arg_name in keywords so its possible to overwrite + for arg_name in self.callable_args: + if arg_name not in keywords: + keywords = {**keywords, arg_name: self.callable_args[arg_name]()} + return self.p(*args, **keywords) + + def __repr__(self): + return self.p.__repr__() + self.callable_args.__repr__() + + def with_args(self, **kwargs): + return _with_args(self, **kwargs) + + def with_callable_args(self, **kwargs): + result = _PartialWrapper(p=self.p) + result.callable_args = {**self.callable_args, **kwargs} + return result + + +def _with_args(cls_or_self, **kwargs): + r"""Wrapper that allows creation of class factories. + + This can be useful when there is a need to create classes with the same + constructor arguments, but different instances. Can be used in conjunction with + _callable_args + + Example:: + + >>> # xdoctest: +SKIP("Undefined vars") + >>> Foo.with_args = classmethod(_with_args) + >>> foo_builder = Foo.with_args(a=3, b=4).with_args(answer=42) + >>> foo_instance1 = foo_builder() + >>> foo_instance2 = foo_builder() + >>> id(foo_instance1) == id(foo_instance2) + False + """ + r = _PartialWrapper(partial(cls_or_self, **kwargs)) + return r + +def _with_callable_args(cls_or_self, **kwargs): + r"""Wrapper that allows creation of class factories args that need to be + called at construction time. + + This can be useful when there is a need to create classes with the same + constructor arguments, but different instances and those arguments should only + be calculated at construction time. Can be used in conjunction with _with_args + + Example:: + + >>> # xdoctest: +SKIP("Undefined vars") + >>> Foo.with_callable_args = classmethod(_with_callable_args) + >>> Foo.with_args = classmethod(_with_args) + >>> foo_builder = Foo.with_callable_args(cur_time=get_time_func).with_args(name="dan") + >>> foo_instance1 = foo_builder() + >>> # wait 50 + >>> foo_instance2 = foo_builder() + >>> id(foo_instance1.creation_time) == id(foo_instance2.creation_time) + False + """ + r = _PartialWrapper(partial(cls_or_self)) + return r.with_callable_args(**kwargs) + + +ABC: Any = ABCMeta("ABC", (object,), {}) # compatible with Python 2 *and* 3: + + +class ObserverBase(ABC, nn.Module): + r"""Base observer Module. + Any observer implementation should derive from this class. + + Concrete observers should follow the same API. In forward, they will update + the statistics of the observed Tensor. And they should provide a + `calculate_qparams` function that computes the quantization parameters given + the collected statistics. + + Args: + dtype: dtype argument to the `quantize` node needed to implement the + reference model spec. + is_dynamic: indicator for whether the observer is a placeholder for dynamic quantization + or static quantization + """ + + def __init__(self, dtype, is_dynamic=False): + super().__init__() + self.dtype = dtype + self.is_dynamic = is_dynamic + + @abstractmethod + def forward(self, x): + pass + + @abstractmethod + def calculate_qparams(self, **kwargs): + pass + + with_args = classmethod(_with_args) + with_callable_args = classmethod(_with_callable_args) + + +class UniformQuantizationObserverBase(ObserverBase): + r"""Common base for all observers using uniform quantization to calculate + scale and zero_point. + + Args: + dtype: dtype argument to the `quantize` node needed to implement the + reference model spec. + qscheme: Quantization scheme to be used. + reduce_range: Reduces the range of the quantized data type by 1 bit. + This is sometimes required to avoid instruction overflow. + quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup. + quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup. + eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`. + + .. warning:: + + :attr:`dtype` can only take ``torch.qint8`` or ``torch.quint8``. + or `torch.int8` or `torch.uint8` + + .. warning:: + + :attr:`qscheme` can only take one of the following options: + + - ``torch.per_tensor_affine`` + - ``torch.per_tensor_symmetric`` + - ``torch.per_channel_affine`` + - ``torch.per_channel_symmetric`` + """ + + # Note: the version is shared by all observer types + # + # Version 1/None + # self + # + # Version 2 (base class only, does not include child class buffers) + # self + # |--- eps : Tensor + # + # Version 3 + # for HistogramObserver only, changed the shape of uninitialized + # min_val and max_val buffers from torch.Size([0]) to torch.Size([]) + # for PerChannelObservers, changed the name of the buffers from min_vals + # to min_val and from max_vals to max_val. + _version = 3 + + eps: torch.Tensor + + def __init__( + self, + dtype=torch.quint8, + qscheme=torch.per_tensor_affine, + reduce_range=False, + quant_min=None, + quant_max=None, + factory_kwargs=None, + eps=torch.finfo(torch.float32).eps, + is_dynamic=False, + **kwargs, + ) -> None: + factory_kwargs = torch.nn.factory_kwargs(factory_kwargs) + super().__init__(dtype=dtype, is_dynamic=is_dynamic, **kwargs) + self.qscheme = qscheme + if reduce_range: + warnings.warn( + "Please use quant_min and quant_max to specify the range for observers. \ + reduce_range will be deprecated in a future release of PyTorch." + ) + self.reduce_range = reduce_range + self.register_buffer( + "eps", torch.tensor([eps], **factory_kwargs) + ) + assert self.qscheme in ( + torch.per_tensor_affine, + torch.per_tensor_symmetric, + torch.per_channel_affine, + torch.per_channel_symmetric, + torch.per_channel_affine_float_qparams, + ), "Default Observer only works for per_tensor_affine, \ + per_tensor_symmetric, per_channel_affine, \ + per_channel_symmetric and per_channel_float_qparams quantization scheme" + + _ALLOWED_DTYPES = ( + torch.qint8, + torch.quint8, + torch.quint4x2, + torch.qint32, + torch.int8, + torch.uint8, + torch.int16, + torch.int32, + torch.float8_e5m2, + torch.float8_e4m3fn, + ) + + assert self.dtype in _ALLOWED_DTYPES, f"Default Observer only works for {_ALLOWED_DTYPES} data type" + self.has_customized_qrange = (quant_min is not None) and (quant_max is not None) + if self.has_customized_qrange: + validate_qmin_qmax(quant_min, quant_max) + self.quant_min, self.quant_max = \ + calculate_qmin_qmax(quant_min, quant_max, self.has_customized_qrange, self.dtype, self.reduce_range) + + def _load_from_state_dict( + self, + state_dict, + prefix, + local_metadata, + strict, + missing_keys, + unexpected_keys, + error_msgs, + ): + + version = local_metadata.get("version", None) + + if version is None or version == 1: + # eps was moved to a buffer in version 2 + eps = torch.tensor([torch.finfo(torch.float32).eps]) + state_dict[prefix + "eps"] = eps + + super()._load_from_state_dict( + state_dict, + prefix, + local_metadata, + strict, + missing_keys, + unexpected_keys, + error_msgs, + ) + + @torch.jit.export + def _validate_qmin_qmax(self, quant_min: int, quant_max: int) -> None: + r"""Validates that the user-specified quantization range is properly initialized + and within the given bound supported by the observer dtype. + + To accommodate lower-bit quantization with respect to the existing torch.qint8 and + torch.quint8 datatypes, the user can choose to use dynamic quantization range by passing + in a tuple of initial qmin and qmax values. One use case is these customized qmin and qmax + values are used to calculate static estimates of the scale and zero point for aggressive lower-bit + fake quantization. These estimates are compared against parameters learned through backpropagation. + The related literatures for scale and zero point via backpropagation are as follows: + + Learned Step Size Quantization: https://openreview.net/pdf?id=rkgO66VKDS + Trained Quantization Thresholds: https://arxiv.org/pdf/1903.08066.pdf + """ + # The variable names are prefixed with "initial" because their values (qmin and qmax) might be adjusted + # based on whether quantization range is reduced and the datatype (signed/unsigned) used by the observer. + assert ( + quant_min <= 0 <= quant_max + ), "Used-specified quantization range must include 0." + assert ( + quant_min < quant_max + ), "qmin must be strictly less than qmax for user-specified quantization range." + + @torch.jit.export + def _calculate_qparams( + self, min_val: torch.Tensor, max_val: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor]: + r"""Calculates the quantization parameters, given min and max + value tensors. Works for both per tensor and per channel cases + + Args: + min_val: Minimum values per channel + max_val: Maximum values per channel + + Returns: + scales: Scales tensor of shape (#channels,) + zero_points: Zero points tensor of shape (#channels,) + """ + # Functionally equivalent to 'determine_qparams' in utils.py. Observers must be torchscriptable however and qscheme + # as far as I can tell is not allowed to passed as a parameter in torchscript functions. This makes refactoring observer + # to use this utility a massive pain and very gross. For now Im opting just to duplicate as this code + # seems unlikey to change (last update over 1 year ago) and when torchscript is fully deprecated we can refactor. + # TODO(jakeszwe, jerryzh168) + if not check_min_max_valid(min_val, max_val): + return torch.tensor([1.0], device=min_val.device.type), torch.tensor([0], device=min_val.device.type) + + quant_min, quant_max = self.quant_min, self.quant_max + min_val_neg = torch.min(min_val, torch.zeros_like(min_val)) + max_val_pos = torch.max(max_val, torch.zeros_like(max_val)) + + device = min_val_neg.device + scale = torch.ones(min_val_neg.size(), dtype=torch.float32, device=device) + zero_point = torch.zeros(min_val_neg.size(), dtype=torch.int64, device=device) + + if ( + self.qscheme == torch.per_tensor_symmetric + or self.qscheme == torch.per_channel_symmetric + ): + max_val_pos = torch.max(-min_val_neg, max_val_pos) + scale = max_val_pos / (float(quant_max - quant_min) / 2) + scale = torch.max(scale, self.eps) + if self.dtype in [torch.quint8, torch.uint8]: + if self.has_customized_qrange: + # When customized quantization range is used, down-rounded midpoint of the range is chosen. + zero_point = zero_point.new_full( + zero_point.size(), (quant_min + quant_max) // 2 + ) + else: + zero_point = zero_point.new_full(zero_point.size(), 128) + elif self.qscheme == torch.per_channel_affine_float_qparams: + scale = (max_val - min_val) / float(quant_max - quant_min) + scale = torch.where(scale > self.eps, scale, torch.ones_like(scale)) + # We use the quantize function + # xq = Round(Xf * inv_scale + zero_point), + # setting zero_point to (-1 * min *inv_scale) we get + # Xq = Round((Xf - min) * inv_scale) + zero_point = -1 * min_val / scale + else: + scale = (max_val_pos - min_val_neg) / float(quant_max - quant_min) + scale = torch.max(scale, self.eps) + zero_point = quant_min - torch.round(min_val_neg / scale).to(torch.int) + zero_point = torch.clamp(zero_point, quant_min, quant_max) + + # For scalar values, cast them to Tensors of size 1 to keep the shape + # consistent with default values in FakeQuantize. + if len(scale.shape) == 0: + # TODO: switch to scale.item() after adding JIT support + scale = torch.tensor([float(scale)], dtype=scale.dtype, device=device) + if len(zero_point.shape) == 0: + # TODO: switch to zero_point.item() after adding JIT support + zero_point = torch.tensor( + [int(zero_point)], dtype=zero_point.dtype, device=device + ) + if self.qscheme == torch.per_channel_affine_float_qparams: + zero_point = torch.tensor( + [float(zero_point)], dtype=zero_point.dtype, device=device + ) + + return scale, zero_point + + @torch.jit.export + def reset_min_max_vals(self): + raise NotImplementedError("Cannot reset min/max values in the given observer.") + + +# Originally, this class was called `_ObserverBase`. Keeping the old name around +# for backwards compatibility. +# TODO(after v1.13): delete this +_ObserverBase = UniformQuantizationObserverBase + + +class MinMaxObserver(UniformQuantizationObserverBase): + r"""Observer module for computing the quantization parameters based on the + running min and max values. + + This observer uses the tensor min/max statistics to compute the quantization + parameters. The module records the running minimum and maximum of incoming + tensors, and uses this statistic to compute the quantization parameters. + + Args: + dtype: dtype argument to the `quantize` node needed to implement the + reference model spec. + qscheme: Quantization scheme to be used + reduce_range: Reduces the range of the quantized data type by 1 bit + quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup. + quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup. + eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`. + + Given running min/max as :math:`x_\text{min}` and :math:`x_\text{max}`, + scale :math:`s` and zero point :math:`z` are computed as: + + The running minimum/maximum :math:`x_\text{min/max}` is computed as: + + .. math:: + + \begin{array}{ll} + x_\text{min} &= \begin{cases} + \min(X) & \text{if~}x_\text{min} = \text{None} \\ + \min\left(x_\text{min}, \min(X)\right) & \text{otherwise} + \end{cases}\\ + x_\text{max} &= \begin{cases} + \max(X) & \text{if~}x_\text{max} = \text{None} \\ + \max\left(x_\text{max}, \max(X)\right) & \text{otherwise} + \end{cases}\\ + \end{array} + + where :math:`X` is the observed tensor. + + The scale :math:`s` and zero point :math:`z` are then computed as: + + .. math:: + + \begin{aligned} + \text{if Symmetric:}&\\ + &s = 2 \max(|x_\text{min}|, x_\text{max}) / + \left( Q_\text{max} - Q_\text{min} \right) \\ + &z = \begin{cases} + 0 & \text{if dtype is qint8} \\ + 128 & \text{otherwise} + \end{cases}\\ + \text{Otherwise:}&\\ + &s = \left( x_\text{max} - x_\text{min} \right ) / + \left( Q_\text{max} - Q_\text{min} \right ) \\ + &z = Q_\text{min} - \text{round}(x_\text{min} / s) + \end{aligned} + + where :math:`Q_\text{min}` and :math:`Q_\text{max}` are the minimum and + maximum of the quantized data type. + + .. warning:: :attr:`dtype` can only take ``torch.qint8`` or ``torch.quint8``. + + .. note:: If the running minimum equals to the running maximum, the scale + and zero_point are set to 1.0 and 0. + """ + min_val: torch.Tensor + max_val: torch.Tensor + + def __init__( + self, + dtype=torch.quint8, + qscheme=torch.per_tensor_affine, + reduce_range=False, + quant_min=None, + quant_max=None, + factory_kwargs=None, + eps=torch.finfo(torch.float32).eps, + is_dynamic=False, + **kwargs, + ) -> None: + if not is_per_tensor(qscheme): + raise NotImplementedError( + "MinMaxObserver's qscheme only support torch.per_tensor_symmetric \ + and torch.per_tensor_affine." + ) + # TODO: MinMaxObserver by itself doesn't support dynamic quantization, but + # if it's inherited by MovingAverageObserver, and averaging_constant is 1, it + # supports dynamic quantization, we may need to better error checking here + + # For x86 quantized kernels, we need to ensure that the vpmaddubsw + # instruction does not overflow. We allow for a reduce_range argument to + # observers that reduces the quantized range to (0,127) or (-64, 63). + # For more details see aten/src/ATen/native/quantized/cpu/qconv.cpp + # This is not an optimal choice for non x86 backends as it loses a bit + # of precision for activations. + super().__init__( + dtype=dtype, + qscheme=qscheme, + reduce_range=reduce_range, + quant_min=quant_min, + quant_max=quant_max, + factory_kwargs=factory_kwargs, + eps=eps, + is_dynamic=is_dynamic, + **kwargs, + ) + factory_kwargs = torch.nn.factory_kwargs(factory_kwargs) + self.register_buffer("min_val", torch.tensor(float("inf"), **factory_kwargs)) + self.register_buffer("max_val", torch.tensor(float("-inf"), **factory_kwargs)) + if ( + self.qscheme == torch.per_tensor_symmetric + and self.reduce_range + and self.dtype == torch.quint8 + ): + raise NotImplementedError( + "Cannot reduce range for symmetric \ + quantization for quint8" + ) + + def forward(self, x_orig): + r"""Records the running minimum and maximum of ``x``.""" + if x_orig.numel() == 0: + return x_orig + x = x_orig.detach() # avoid keeping autograd tape + x = x.to(self.min_val.dtype) + min_val_cur, max_val_cur = torch.aminmax(x) + min_val = torch.min(min_val_cur, self.min_val) + max_val = torch.max(max_val_cur, self.max_val) + self.min_val.copy_(min_val) + self.max_val.copy_(max_val) + return x_orig + + @torch.jit.export + def calculate_qparams(self): + r"""Calculates the quantization parameters.""" + return self._calculate_qparams(self.min_val, self.max_val) + + @torch.jit.export + def extra_repr(self): + return f"min_val={self.min_val}, max_val={self.max_val}" + + @torch.jit.export + def reset_min_max_vals(self): + """Resets the min/max values.""" + self.min_val.copy_(torch.tensor(float("inf"))) + self.max_val.copy_(torch.tensor(float("-inf"))) + +class MovingAverageMinMaxObserver(MinMaxObserver): + r"""Observer module for computing the quantization parameters based on the + moving average of the min and max values. + + This observer computes the quantization parameters based on the moving + averages of minimums and maximums of the incoming tensors. The module + records the average minimum and maximum of incoming tensors, and uses this + statistic to compute the quantization parameters. + + Args: + averaging_constant: Averaging constant for min/max. + dtype: dtype argument to the `quantize` node needed to implement the + reference model spec. + qscheme: Quantization scheme to be used + reduce_range: Reduces the range of the quantized data type by 1 bit + quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup. + quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup. + eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`. + + The moving average min/max is computed as follows + + .. math:: + + \begin{array}{ll} + x_\text{min} = \begin{cases} + \min(X) & \text{if~}x_\text{min} = \text{None} \\ + (1 - c) x_\text{min} + c \min(X) & \text{otherwise} + \end{cases}\\ + x_\text{max} = \begin{cases} + \max(X) & \text{if~}x_\text{max} = \text{None} \\ + (1 - c) x_\text{max} + c \max(X) & \text{otherwise} + \end{cases}\\ + \end{array} + + where :math:`x_\text{min/max}` is the running average min/max, :math:`X` is + is the incoming tensor, and :math:`c` is the ``averaging_constant``. + + The scale and zero point are then computed as in + :class:`~torch.ao.quantization.observer.MinMaxObserver`. + + .. note:: Only works with ``torch.per_tensor_affine`` quantization scheme. + + .. note:: If the running minimum equals to the running maximum, the scale + and zero_point are set to 1.0 and 0. + """ + + def __init__( + self, + averaging_constant=0.01, + dtype=torch.quint8, + qscheme=torch.per_tensor_affine, + reduce_range=False, + quant_min=None, + quant_max=None, + eps=torch.finfo(torch.float32).eps, + is_dynamic=False, + **kwargs + ) -> None: + if not is_per_tensor(qscheme): + raise NotImplementedError( + f"MovingAverageMinMaxObserver's qscheme only support \ + torch.per_tensor_symmetric and torch.per_tensor_affine. \ + but got: {qscheme}" + ) + self.averaging_constant = averaging_constant + if is_dynamic and self.averaging_constant != 1: + raise NotImplementedError( + "MovingAverageMinMaxObserver doesn't support dynamic quantization for " + f"averaging constant of {self.averaging_constant}" + ) + super().__init__( + dtype=dtype, + qscheme=qscheme, + reduce_range=reduce_range, + quant_min=quant_min, + quant_max=quant_max, + eps=eps, + is_dynamic=is_dynamic, + **kwargs + ) + + def forward(self, x_orig): + if x_orig.numel() == 0: + return x_orig + x = x_orig.detach() # avoid keeping autograd tape + x = x.to(self.min_val.dtype) + min_val = self.min_val + max_val = self.max_val + if min_val == float("inf") and max_val == float("-inf"): + min_val, max_val = torch.aminmax(x) + else: + min_val_cur, max_val_cur = torch.aminmax(x) + min_val = min_val + self.averaging_constant * (min_val_cur - min_val) + max_val = max_val + self.averaging_constant * (max_val_cur - max_val) + self.min_val.copy_(min_val) + self.max_val.copy_(max_val) + return x_orig + + +class PerChannelMinMaxObserver(UniformQuantizationObserverBase): + r"""Observer module for computing the quantization parameters based on the + running per channel min and max values. + + This observer uses the tensor min/max statistics to compute the per channel + quantization parameters. The module records the running minimum and maximum + of incoming tensors, and uses this statistic to compute the quantization + parameters. + + Args: + ch_axis: Channel axis + dtype: dtype argument to the `quantize` node needed to implement the + reference model spec. + qscheme: Quantization scheme to be used + reduce_range: Reduces the range of the quantized data type by 1 bit + quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup. + quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup. + eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`. + + The quantization parameters are computed the same way as in + :class:`~torch.ao.quantization.observer.MinMaxObserver`, with the difference + that the running min/max values are stored per channel. + Scales and zero points are thus computed per channel as well. + + .. note:: If the running minimum equals to the running maximum, the scales + and zero_points are set to 1.0 and 0. + """ + min_val: torch.Tensor + max_val: torch.Tensor + + def __init__( + self, + ch_axis=0, + dtype=torch.quint8, + qscheme=torch.per_channel_affine, + reduce_range=False, + quant_min=None, + quant_max=None, + factory_kwargs=None, + eps=torch.finfo(torch.float32).eps, + is_dynamic=False, + **kwargs, + ) -> None: + if not is_per_channel(qscheme): + raise NotImplementedError( + "PerChannelMinMaxObserver's qscheme only support \ + torch.per_channel_symmetric, torch.per_channel_affine and torch.per_channel_affine_float_qparams." + ) + if is_dynamic: + raise NotImplementedError( + "PerChannelMinMaxObserver doesn't support dynamic quantization" + ) + super().__init__( + dtype=dtype, + qscheme=qscheme, + reduce_range=reduce_range, + quant_min=quant_min, + quant_max=quant_max, + factory_kwargs=factory_kwargs, + eps=eps, + is_dynamic=is_dynamic, + **kwargs, + ) + factory_kwargs = torch.nn.factory_kwargs(factory_kwargs) + self.ch_axis = ch_axis + self.register_buffer("min_val", torch.tensor([], **factory_kwargs)) + self.register_buffer("max_val", torch.tensor([], **factory_kwargs)) + if ( + self.qscheme == torch.per_channel_symmetric + and self.reduce_range + and self.dtype == torch.quint8 + ): + raise NotImplementedError( + "Cannot reduce range for symmetric quantization for quint8" + ) + + def forward(self, x_orig): + return self._forward(x_orig) + + def _forward(self, x_orig): + if x_orig.numel() == 0: + return x_orig + x = x_orig.detach() # avoid keeping autograd tape + min_val = self.min_val + max_val = self.max_val + x_dim = x.size() + + new_axis_list = [i for i in range(len(x_dim))] # noqa: C416 + new_axis_list[self.ch_axis] = 0 + new_axis_list[0] = self.ch_axis + y = x.permute(new_axis_list) + # Need to match dtype of min/max because the updates to buffers + # are done in place and types need to match for comparisons + y = y.to(self.min_val.dtype) + y = torch.flatten(y, start_dim=1) + if min_val.numel() == 0 or max_val.numel() == 0: + min_val, max_val = torch.aminmax(y, dim=1) + else: + min_val_cur, max_val_cur = torch.aminmax(y, dim=1) + min_val = torch.min(min_val_cur, min_val) + max_val = torch.max(max_val_cur, max_val) + self.min_val.resize_(min_val.shape) + self.max_val.resize_(max_val.shape) + self.min_val.copy_(min_val) + self.max_val.copy_(max_val) + return x_orig + + @torch.jit.export + def calculate_qparams(self): + return self._calculate_qparams(self.min_val, self.max_val) + + def extra_repr(self): + return f"min_val={self.min_val}, max_val={self.max_val}" + + def _load_from_state_dict( + self, + state_dict: Dict[str, Any], + prefix: str, + local_metadata: Dict[str, torch.Tensor], + strict: bool, + missing_keys: List[str], + unexpected_keys: List[str], + error_msgs: List[str], + ): + version = local_metadata.get("version", None) + if version is not None and version < 3: + local_state = ["min_vals", "max_vals"] + expected_min_name = "min_vals" + expected_max_name = "max_vals" + else: + local_state = ["min_val", "max_val"] + expected_min_name = "min_val" + expected_max_name = "max_val" + for name in local_state: + key = prefix + name + if key in state_dict: + val = state_dict[key] + # Custom handling to allow loading min_val or max_val + # of size N into uninitialized buffers of size 0. The + # buffers are resized here, and the values are copied in + # the default state_dict loading code of the parent. + if name == expected_min_name: + self.min_val.resize_(val.shape) + elif name == expected_max_name: + self.max_val.resize_(val.shape) + else: + warnings.warn(f"Observer load_from_state_dict got unexpected name {name}") + # For torchscript module we need to update the attributes here since we do not + # call the `_load_from_state_dict` function defined module.py + if torch.jit.is_scripting(): + if name == expected_min_name: + self.min_val.copy_(val) + elif name == expected_max_name: + self.max_val.copy_(val) + else: + warnings.warn(f"Observer load_from_state_dict got unexpected name {name}") + elif strict: + missing_keys.append(key) + + if not torch.jit.is_scripting(): + super()._load_from_state_dict( + state_dict, + prefix, + local_metadata, + False, + missing_keys, + unexpected_keys, + error_msgs, + ) + + def _load_from_state_dict_script( + self, + state_dict: Dict[str, Any], + prefix: str, + local_metadata: Dict[str, torch.Tensor], + strict: bool, + missing_keys: List[str], + unexpected_keys: List[str], + error_msgs: List[str], + ): + + self._load_from_state_dict( + state_dict, + prefix, + local_metadata, + strict, + missing_keys, + unexpected_keys, + error_msgs, + ) + + @torch.jit.export + def reset_min_max_vals(self): + """Resets the min/max values.""" + # This used to be torch.ones but that does not work because + # JIT compiler can optimize it via common subexpression elimination + # in which case both min_val and max_val point to the same tensor. + self.min_val = torch.rand(0, ) + self.max_val = torch.rand(0, ) + + +class MovingAveragePerChannelMinMaxObserver(PerChannelMinMaxObserver): + r"""Observer module for computing the quantization parameters based on the + running per channel min and max values. + + This observer uses the tensor min/max statistics to compute the per channel + quantization parameters. The module records the running minimum and maximum + of incoming tensors, and uses this statistic to compute the quantization + parameters. + + Args: + averaging_constant: Averaging constant for min/max. + ch_axis: Channel axis + dtype: Quantized data type + qscheme: Quantization scheme to be used + reduce_range: Reduces the range of the quantized data type by 1 bit + quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup. + quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup. + eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`. + + The quantization parameters are computed the same way as in + :class:`~torch.ao.quantization.observer.MovingAverageMinMaxObserver`, with the + difference that the running min/max values are stored per channel. + Scales and zero points are thus computed per channel as well. + + .. note:: If the running minimum equals to the running maximum, the scales + and zero_points are set to 1.0 and 0. + """ + + def __init__( + self, + averaging_constant=0.01, + ch_axis=0, + dtype=torch.quint8, + qscheme=torch.per_channel_affine, + reduce_range=False, + quant_min=None, + quant_max=None, + eps=torch.finfo(torch.float32).eps, + is_dynamic=False, + **kwargs + ) -> None: + if not is_per_channel(qscheme): + raise NotImplementedError( + "MovingAveragePerChannelMinMaxObserver's qscheme only support \ + torch.per_channel_symmetric, torch.per_channel_affine and torch.per_channel_affine_float_qparams." + ) + if is_dynamic: + raise NotImplementedError( + "MovingAveragePerChannelMinMaxObserver doesn't support dynamic quantization" + ) + super().__init__( + ch_axis=ch_axis, + dtype=dtype, + qscheme=qscheme, + reduce_range=reduce_range, + quant_min=quant_min, + quant_max=quant_max, + eps=eps, + is_dynamic=is_dynamic, + **kwargs + ) + self.averaging_constant = averaging_constant + + def forward(self, x_orig): + if x_orig.numel() == 0: + return x_orig + x = x_orig.detach() # avoid keeping autograd tape + x = x.to(self.min_val.dtype) + min_val = self.min_val + max_val = self.max_val + x_dim = x.size() + + new_axis_list = [i for i in range(len(x_dim))] # noqa: C416 + new_axis_list[self.ch_axis] = 0 + new_axis_list[0] = self.ch_axis + y = x.permute(new_axis_list) + y = torch.flatten(y, start_dim=1) + if min_val.numel() == 0 or max_val.numel() == 0: + min_val, max_val = torch.aminmax(y, dim=1) + else: + min_val_cur, max_val_cur = torch.aminmax(y, dim=1) + min_val = min_val + self.averaging_constant * (min_val_cur - min_val) + max_val = max_val + self.averaging_constant * (max_val_cur - max_val) + self.min_val.resize_(min_val.shape) + self.max_val.resize_(max_val.shape) + self.min_val.copy_(min_val) + self.max_val.copy_(max_val) + return x_orig + + +class HistogramObserver(UniformQuantizationObserverBase): + r""" + The module records the running histogram of tensor values along with + min/max values. ``calculate_qparams`` will calculate scale and zero_point. + + Args: + bins: Number of bins to use for the histogram + upsample_rate: Factor by which the histograms are upsampled, this is + used to interpolate histograms with varying ranges across observations + dtype: dtype argument to the `quantize` node needed to implement the + reference model spec + qscheme: Quantization scheme to be used + reduce_range: Reduces the range of the quantized data type by 1 bit + eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`. + + The scale and zero point are computed as follows: + + 1. Create the histogram of the incoming inputs. + The histogram is computed continuously, and the ranges per bin change + with every new tensor observed. + 2. Search the distribution in the histogram for optimal min/max values. + The search for the min/max values ensures the minimization of the + quantization error with respect to the floating point model. + 3. Compute the scale and zero point the same way as in the + :class:`~torch.ao.quantization.MinMaxObserver` + """ + histogram: torch.Tensor + min_val: torch.Tensor + max_val: torch.Tensor + + def __init__( + self, + bins: int = 2048, + upsample_rate: int = 128, + dtype: torch.dtype = torch.quint8, + qscheme=torch.per_tensor_affine, + reduce_range=False, + quant_min=None, + quant_max=None, + factory_kwargs=None, + eps=torch.finfo(torch.float32).eps, + is_dynamic=False, + **kwargs, + ) -> None: + if not is_per_tensor(qscheme): + raise NotImplementedError( + "HistogramObserver's qscheme only support torch.per_tensor_symmetric \ + and torch.per_tensor_affine." + ) + if is_dynamic: + raise NotImplementedError( + "HistogramObserver doesn't support dynamic quantization" + ) + # bins: The number of bins used for histogram calculation. + super().__init__( + dtype=dtype, + qscheme=qscheme, + reduce_range=reduce_range, + quant_min=quant_min, + quant_max=quant_max, + factory_kwargs=factory_kwargs, + eps=eps, + is_dynamic=is_dynamic, + **kwargs + ) + factory_kwargs = torch.nn.factory_kwargs(factory_kwargs) + self.bins = bins + self.register_buffer("histogram", torch.zeros(self.bins, **factory_kwargs)) + self.register_buffer("min_val", torch.tensor(float("inf"), **factory_kwargs)) + self.register_buffer("max_val", torch.tensor(float("-inf"), **factory_kwargs)) + self.dst_nbins = 2 ** torch.iinfo(self.dtype).bits + self.upsample_rate = upsample_rate + + def _get_norm( + self, delta_begin: torch.Tensor, delta_end: torch.Tensor, density: torch.Tensor + ) -> torch.Tensor: + r""" + Compute the norm of the values uniformaly distributed between + delta_begin and delta_end. + Currently only L2 norm is supported. + + norm = density * (integral_{begin, end} x^2) + = density * (end^3 - begin^3) / 3 + """ + norm = ( + delta_end * delta_end * delta_end - delta_begin * delta_begin * delta_begin + ) / 3 + return density * norm + + def _compute_quantization_error(self, next_start_bin: int, next_end_bin: int): + r""" + Compute the quantization error if we use start_bin to end_bin as the + min and max to do the quantization. + """ + bin_width = (self.max_val.item() - self.min_val.item()) / self.bins + + dst_bin_width = bin_width * (next_end_bin - next_start_bin + 1) / self.dst_nbins + if dst_bin_width == 0.0: + return 0.0 + + src_bin = torch.arange(self.bins, device=self.histogram.device) + # distances from the beginning of first dst_bin to the beginning and + # end of src_bin + src_bin_begin = (src_bin - next_start_bin) * bin_width + src_bin_end = src_bin_begin + bin_width + + # which dst_bins the beginning and end of src_bin belong to? + dst_bin_of_begin = torch.clamp( + torch.div(src_bin_begin, dst_bin_width, rounding_mode='floor'), 0, self.dst_nbins - 1 + ) + dst_bin_of_begin_center = (dst_bin_of_begin + 0.5) * dst_bin_width + + dst_bin_of_end = torch.clamp( + torch.div(src_bin_end, dst_bin_width, rounding_mode='floor'), 0, self.dst_nbins - 1 + ) + density = self.histogram / bin_width + + norm = torch.zeros(self.bins, device=self.histogram.device) + + delta_begin = src_bin_begin - dst_bin_of_begin_center + delta_end = dst_bin_width / 2 + norm += self._get_norm(delta_begin, + torch.ones(self.bins, device=self.histogram.device) * delta_end, + density) + + norm += (dst_bin_of_end - dst_bin_of_begin - 1) * self._get_norm( + torch.tensor(-dst_bin_width / 2), torch.tensor(dst_bin_width / 2), density + ) + + dst_bin_of_end_center = dst_bin_of_end * dst_bin_width + dst_bin_width / 2 + + delta_begin = -dst_bin_width / 2 + delta_end = src_bin_end - dst_bin_of_end_center + norm += self._get_norm(torch.tensor(delta_begin), delta_end, density) + + return norm.sum().item() + + def _non_linear_param_search(self) -> Tuple[torch.Tensor, torch.Tensor]: + r"""Non-linear parameter search. + + An approximation for L2 error minimization for selecting min/max. + By selecting new min/max, we filter out outliers in input distribution. + This follows the implementation of NormMinimization::NonlinearQuantizationParamsSearch in + caffe2/quantization/server/norm_minimization.cc + """ + assert self.histogram.size()[0] == self.bins, "bins mismatch" + bin_width = (self.max_val - self.min_val) / self.bins + + # cumulative sum + total = torch.sum(self.histogram).item() + cSum = torch.cumsum(self.histogram, dim=0) + + stepsize = 1e-5 # granularity + alpha = 0.0 # lower bound + beta = 1.0 # upper bound + start_bin = 0 + end_bin = self.bins - 1 + norm_min = float("inf") + + while alpha < beta: + # Find the next step + next_alpha = alpha + stepsize + next_beta = beta - stepsize + + # find the left and right bins between the quantile bounds + l = start_bin + r = end_bin + while l < end_bin and cSum[l] < next_alpha * total: + l = l + 1 + while r > start_bin and cSum[r] > next_beta * total: + r = r - 1 + + # decide the next move + next_start_bin = start_bin + next_end_bin = end_bin + if (l - start_bin) > (end_bin - r): + # move the start bin + next_start_bin = l + alpha = next_alpha + else: + # move the end bin + next_end_bin = r + beta = next_beta + + if next_start_bin == start_bin and next_end_bin == end_bin: + continue + + # calculate the quantization error using next_start_bin and next_end_bin + norm = self._compute_quantization_error(next_start_bin, next_end_bin) + + if norm > norm_min: + break + norm_min = norm + start_bin = next_start_bin + end_bin = next_end_bin + + new_min = self.min_val + bin_width * start_bin + new_max = self.min_val + bin_width * (end_bin + 1) + return new_min, new_max + + def _adjust_min_max( + self, combined_min: torch.Tensor, combined_max: torch.Tensor, upsample_rate: int + ) -> Tuple[torch.Tensor, torch.Tensor, int, int]: + # We ensure that: + # (combined_max - combined_min)/(downsample_rate*Nbins) = (max - min)/(upsample_rate*Nbins) + # This allows us to have a common grid of resolution s, where we can align + # the input histogram + # start_idx maps min_val to the histogram bin index. + + # Compute the width of histogram bins is a straightforward solution, where + # hist_bin_width = (self.max_val - self.min_val) / (self.bins * upsample_rate) + # Underflow happens if the numerator is close to the smallest positive subnormal number of FP32 + # Therefore, we avoid such division operation. + downsample_rate = int( + torch.ceil( + ((combined_max - combined_min) / (self.max_val - self.min_val)) * upsample_rate + ).item() + ) + e = downsample_rate / upsample_rate * (self.max_val - self.min_val) - (combined_max - combined_min) + start_idx = int( + torch.round((self.min_val - combined_min) / (self.max_val - self.min_val) * self.bins * upsample_rate).item() + ) + combined_max = combined_max + e + return combined_min, combined_max, downsample_rate, start_idx + + def _combine_histograms( + self, + orig_hist: torch.Tensor, + new_hist: torch.Tensor, + upsample_rate: int, + downsample_rate: int, + start_idx: int, + Nbins: int, + ) -> torch.Tensor: + # First up-sample the histogram with new data by a factor of L + # This creates an approximate probability density thats piecewise constant + upsampled_histogram = new_hist.repeat_interleave(upsample_rate) + # Now insert the upsampled histogram into the output + # histogram, which is initialized with zeros. + # The offset at which the histogram is introduced is determined + # by the start index as the output histogram can cover a wider range + histogram_with_output_range = torch.zeros( + (Nbins * downsample_rate), device=orig_hist.device + ) + histogram_with_output_range[ + start_idx : Nbins * upsample_rate + start_idx + ] = upsampled_histogram + # Compute integral histogram, double precision is needed to ensure + # that there are no overflows + integral_histogram = torch.cumsum( + histogram_with_output_range, 0, dtype=torch.double + )[downsample_rate - 1 :: downsample_rate] + # Finally perform interpolation + shifted_integral_histogram = torch.zeros((Nbins), device=orig_hist.device) + shifted_integral_histogram[1:Nbins] = integral_histogram[0:-1] + interpolated_histogram = ( + integral_histogram - shifted_integral_histogram + ) / upsample_rate + orig_hist = orig_hist + interpolated_histogram.to(torch.float) + return orig_hist + + def reset_histogram(self, x: torch.Tensor, min_val: torch.Tensor, max_val: torch.Tensor) -> None: + self.min_val.resize_(min_val.shape) + self.min_val.copy_(min_val) + self.max_val.resize_(max_val.shape) + self.max_val.copy_(max_val) + assert ( + min_val.numel() == 1 and max_val.numel() == 1 + ), "histogram min/max values must be scalar." + torch.histc( + x, self.bins, min=min_val, max=max_val, out=self.histogram # type: ignore[arg-type] + ) + + def forward(self, x_orig: torch.Tensor) -> torch.Tensor: + if x_orig.numel() == 0: + return x_orig + x = x_orig.detach() + x_min, x_max = torch.aminmax(x) + # want to ignore torch.inf since we don't actually + # want to make our quantization range infinite + # and in practice those values will be clamped + if x_min == -torch.inf or x_max == torch.inf: + warnings.warn("torch.inf detected in input tensor, ignoring input") + x = x[x.abs() != torch.inf] + if x.numel() == 0: + return x_orig + x_min, x_max = torch.aminmax(x) + min_val = self.min_val + max_val = self.max_val + same_values = min_val.item() == max_val.item() + # When (max_val - min_val) is very small, downsample_rate will be large. + # This can cause OOM issue in the allocation of histogram_with_output_range tensor. + close_values = (self.max_val - self.min_val) < 1e-6 + is_uninitialized = min_val == float("inf") and max_val == float("-inf") + if is_uninitialized or same_values or close_values: + min_val, max_val = x_min, x_max + self.reset_histogram(x, min_val, max_val) + else: + new_min, new_max = x_min, x_max + combined_min = torch.min(new_min, min_val) + combined_max = torch.max(new_max, max_val) + # combine the existing histogram and new histogram into 1 histogram + # We do this by first upsampling the histogram to a dense grid + # and then downsampling the histogram efficiently + ( + combined_min, + combined_max, + downsample_rate, + start_idx, + ) = self._adjust_min_max(combined_min, combined_max, self.upsample_rate) + assert ( + combined_min.numel() == 1 and combined_max.numel() == 1 + ), "histogram min/max values must be scalar." + + # TODO: For some reason, this is required for it to pass torchscript test + # combined_min and combined_max should already have requires_grad set to False + combined_min, combined_max = combined_min.detach(), combined_max.detach() + + combined_histogram = torch.histc( + x, self.bins, min=combined_min, max=combined_max # type: ignore[arg-type] + ) + if combined_min == min_val and combined_max == max_val: + combined_histogram += self.histogram + else: + MAX_HISTOGRAM_SIZE = 1e9 # 1 GB + histogram_size = self.bins * downsample_rate * 4 + if histogram_size > MAX_HISTOGRAM_SIZE: + warnings.warn( + "Fail to combine histograms. Fall back to reset histogram." + ) + self.reset_histogram(x, x_min, x_max) + else: + combined_histogram = self._combine_histograms( + combined_histogram, + self.histogram, + self.upsample_rate, + downsample_rate, + start_idx, + self.bins, + ) + self.histogram.detach_().resize_(combined_histogram.shape) + self.histogram.copy_(combined_histogram) + self.min_val.detach_().resize_(combined_min.shape) + self.min_val.copy_(combined_min) + self.max_val.detach_().resize_(combined_max.shape) + self.max_val.copy_(combined_max) + + return x_orig + + @torch.jit.export + def calculate_qparams(self): + is_uninitialized = self.min_val == float("inf") and self.max_val == float( + "-inf" + ) + if is_uninitialized: + warnings.warn( + "must run observer before calling calculate_qparams.\ + Returning default scale and zero point " + ) + return torch.tensor([1.0], device=self.min_val.device.type), torch.tensor([0], device=self.min_val.device.type) + assert self.bins == len(self.histogram), ( + "The number of bins in histogram should be equal to the number of bins " + "supplied while making this observer" + ) + + new_min, new_max = self._non_linear_param_search() + + return self._calculate_qparams(new_min, new_max) + + def _save_to_state_dict(self, destination, prefix, keep_vars): + super()._save_to_state_dict(destination, prefix, keep_vars) + destination[prefix + "min_val"] = self.min_val + destination[prefix + "max_val"] = self.max_val + + def _load_from_state_dict( + self, + state_dict, + prefix, + local_metadata, + strict, + missing_keys, + unexpected_keys, + error_msgs, + ): + version = local_metadata.get("version", None) + + if version is None or version < 3: + # if min_val and max_val are not initialized, update their shape + # to account for the differences between v2 and v3 + min_val_name, max_val_name = prefix + "min_val", prefix + "max_val" + if min_val_name in state_dict: + if state_dict[min_val_name].shape == torch.Size([0]): + state_dict[min_val_name] = torch.tensor(float("inf")) + if max_val_name in state_dict: + if state_dict[max_val_name].shape == torch.Size([0]): + state_dict[max_val_name] = torch.tensor(float("-inf")) + + local_state = ["min_val", "max_val"] + for name in local_state: + key = prefix + name + if key in state_dict: + val = state_dict[key] + setattr(self, name, val) + elif strict: + missing_keys.append(key) + super()._load_from_state_dict( + state_dict, + prefix, + local_metadata, + strict, + missing_keys, + unexpected_keys, + error_msgs, + ) + + def extra_repr(self): + return f"min_val={self.min_val}, max_val={self.max_val}" + + +class FixedQParamsObserver(ObserverBase): + r""" + Observer that simulates quantize and dequantize with fixed + quantization parameters in training time. Only per tensor + quantization is supported. + + Args: + `scale` (float): fixed scale for the observer + `zero_point` (int): fixed zero point for the observer + `dtype`, `qscheme`, `quant_min`, `quant_max` + """ + + scale: torch.Tensor + zero_point: torch.Tensor + + def __init__( + self, + scale, + zero_point, + dtype=torch.quint8, + qscheme=torch.per_tensor_affine, + quant_min=0, + quant_max=255, + is_dynamic=False, + **kwargs, + ): + if is_dynamic: + raise NotImplementedError( + "FixedQParamsObserver doesn't support dynamic quantization" + ) + super().__init__(dtype=dtype, is_dynamic=is_dynamic, **kwargs) + self.quant_min = quant_min + self.quant_max = quant_max + self.register_buffer('scale', torch.tensor([scale], dtype=torch.float)) + self.register_buffer('zero_point', torch.tensor([zero_point], dtype=torch.int)) + self.dtype = dtype + self.qscheme = qscheme + + def forward(self, X): + return X + + @torch.jit.export + def calculate_qparams(self): + return self.scale, self.zero_point + + +class PlaceholderObserver(ObserverBase): + r""" + Observer that doesn't do anything and just passes its configuration to the + quantized module's ``.from_float()``. + + Can be used for quantization to float16 which doesn't require determining + ranges. + + Args: + dtype: dtype argument to the `quantize` node needed to implement the + reference model spec. + quant_min: minimum value in quantized domain (TODO: align behavior with other observers) + quant_max: maximum value in quantized domain + custom_op_name: (temporary) specify this observer for an operator that doesn't require any observation + (Can be used in Graph Mode Passes for special case ops). + compute_dtype (deprecated): if set, marks the future quantize function to use + dynamic quantization instead of static quantization. + This field is deprecated, use `is_dynamic=True` instead. + is_dynamic: if True, the `quantize` function in the reference model + representation taking stats from this observer instance will + use dynamic quantization. + """ + + def __init__( + self, dtype=torch.float32, custom_op_name="", compute_dtype=None, + quant_min=None, quant_max=None, qscheme=None, eps=None, + is_dynamic=False, + ) -> None: + super().__init__(dtype=dtype, is_dynamic=is_dynamic) + if qscheme is None: + qscheme = torch.per_tensor_affine + if eps is None: + eps = torch.finfo(torch.float32).eps + + # dtype of input of the target operator, e.g. for dynamic quantization + # ops, the dtype will be float32 + self.dtype = dtype + self.qscheme = qscheme + self.quant_min = quant_min + self.quant_max = quant_max + self.eps = eps + self.custom_op = custom_op_name + # used for configuration of computation type for dynamic quantization + if compute_dtype: + is_dynamic = True + warnings.warn( + "Please use `is_dynamic` instead of `compute_dtype`. \ + `compute_dtype` will be deprecated in a future release \ + of PyTorch." + ) + + def forward(self, x): + return x + + @torch.jit.export + def extra_repr(self): + return f"dtype={self.dtype}, is_dynamic={self.is_dynamic}" + + @torch.jit.export + def calculate_qparams(self): + raise Exception( # noqa: TRY002 + "calculate_qparams should not be called for PlaceholderObserver" + ) + + +class RecordingObserver(ObserverBase): + r""" + The module is mainly for debug and records the tensor values during runtime. + + Args: + dtype: Quantized data type + qscheme: Quantization scheme to be used + reduce_range: Reduces the range of the quantized data type by 1 bit + """ + __annotations__ = {"tensor_val": List[Optional[torch.Tensor]]} + + def __init__(self, dtype=torch.quint8): + super().__init__(dtype=dtype, is_dynamic=False) # type: ignore[call-arg] + self.tensor_val = [] + + def forward(self, x): + self.tensor_val.append(x.clone()) + return x + + @torch.jit.export + def calculate_qparams(self): + raise Exception("calculate_qparams should not be called for RecordingObserver") # noqa: TRY002 + + @torch.jit.export + def get_tensor_value(self): + return self.tensor_val + + +class NoopObserver(ObserverBase): + r""" + Observer that doesn't do anything and just passes its configuration to the + quantized module's ``.from_float()``. + + Primarily used for quantization to float16 which doesn't require determining + ranges. + + Args: + dtype: Quantized data type + custom_op_name: (temporary) specify this observer for an operator that doesn't require any observation + (Can be used in Graph Mode Passes for special case ops). + """ + + def __init__(self, dtype=torch.float16, custom_op_name="") -> None: + super().__init__(dtype=dtype, is_dynamic=False) + self.dtype = dtype + self.custom_op = custom_op_name + + def forward(self, x): + return x + + @torch.jit.export + def calculate_qparams(self): + raise Exception("calculate_qparams should not be called for NoopObserver") # noqa: TRY002 + +class ReuseInputObserver(ObserverBase): + r""" This observer is used when we want to reuse the observer from the operator + that produces the input Tensor, typically used for operators like reshape, e.g. + ``` + x0 = ... + x1 = x0.reshape() + ``` + if we configure x0 to be observed by some observer, let's say MinMaxObserver, + and reshape is configured with ReuseInputObserver, we'll reuse the observer instance + for x0 for x1 (output of reshape). If x0 is not observed, we also won't observe x1. + + Note: this is only enabled in FX Graph Mode Quantization + """ + def __init__(self): + super().__init__(torch.quint8, is_dynamic=False) + + def forward(self, x): + return x + + @torch.jit.export + def calculate_qparams(self): + raise Exception("calculate_qparams should not be called for ReuseInputObserver") # noqa: TRY002 + +def _is_observer_script_module(mod, obs_type_name): + """Returns true if given mod is an instance of Observer script module.""" + if isinstance(mod, torch.jit.RecursiveScriptModule): + # qualified name looks like '__torch__.torch.ao.quantization.observer.___torch_mangle_2.MinMaxObserver' + suffix = mod._c.qualified_name.split(".", 1)[1] + name = re.sub(r"\.___torch_mangle_\d+", "", suffix) + return obs_type_name in name + return False + + +def _is_activation_post_process(module): + return ( + isinstance(module, (torch.ao.quantization.ObserverBase, + torch.ao.quantization.FakeQuantizeBase)) or _is_observer_script_module(module, "quantization.observer") + ) + + +def _is_per_channel_script_obs_instance(module): + if isinstance(module, torch.jit.RecursiveScriptModule): + return _is_observer_script_module( + module, "quantization.observer.PerChannelMinMaxObserver" + ) or _is_observer_script_module( + module, "quantization.observer.MovingAveragePerChannelMinMaxObserver" + ) + return False + + +def get_observer_state_dict(mod): + r""" + Returns the state dict corresponding to the observer stats. + Traverse the model state_dict and extract out the stats. + """ + od = OrderedDict() + if isinstance(mod, torch.jit.RecursiveScriptModule): + for k, v in mod.state_dict().items(): + if "observer" in k: + od[k] = v + else: + # path for GraphModule and nn.Module (eager mode) + for k, v in mod.state_dict().items(): + if "activation_post_process" in k: + od[k] = v + od._metadata = mod.state_dict()._metadata # type: ignore[attr-defined] + return od + + +def load_observer_state_dict(mod, obs_dict): + r""" + Given input model and a state_dict containing model observer stats, + load the stats back into the model. The observer state_dict can be saved + using torch.ao.quantization.get_observer_state_dict + """ + missing_keys: List[str] = [] + unexpected_keys: List[str] = [] + for name, module in mod.named_modules(): + prefix = name + "." + if _is_activation_post_process(module): + if _is_per_channel_script_obs_instance(module): + # For per-channel observers we need to call a custom load_from_state_dict to resize the tensor. + # However this is not called when the module is scripted and we end up calling the default one in module.py + module._load_from_state_dict_script( + obs_dict, prefix, {}, True, missing_keys, unexpected_keys, [] + ) + else: + module._load_from_state_dict( + obs_dict, prefix, {}, False, missing_keys, unexpected_keys, [] + ) + for k in missing_keys: + if "observer" in k or "activation_post_process" in k: + raise Exception(f"Missing keys for observer {k} in state_dict") # noqa: TRY002 + for k in unexpected_keys: + if "observer" in k or "activation_post_process" in k: + raise Exception(f"Unexpected keys for observer {k} in state_dict") # noqa: TRY002 + + +# Restrict activations to be in the range (0,127) +default_observer = MinMaxObserver.with_args(quant_min=0, quant_max=127) +""" +Default observer for static quantization, usually used for debugging. +""" + +default_placeholder_observer = PlaceholderObserver +""" +Default placeholder observer, usually used for quantization to torch.float16. +""" + +default_debug_observer = RecordingObserver +""" +Default debug-only observer. +""" + +default_weight_observer = MinMaxObserver.with_args( + dtype=torch.qint8, qscheme=torch.per_tensor_symmetric +) +""" +Default weight observer. +""" + +weight_observer_range_neg_127_to_127 = MinMaxObserver.with_args( + dtype=torch.qint8, qscheme=torch.per_tensor_symmetric, + quant_min=-127, quant_max=127, eps=2 ** -12) +""" +Symmetric weight observer with the 8-bit values restricted to [-127, +127], excluding -128. +""" + +default_histogram_observer = HistogramObserver.with_args(quant_min=0, quant_max=127) +""" +Default histogram observer, usually used for PTQ. +""" + +default_per_channel_weight_observer = PerChannelMinMaxObserver.with_args( + dtype=torch.qint8, qscheme=torch.per_channel_symmetric +) +""" +Default per-channel weight observer, usually used on backends where per-channel +weight quantization is supported, such as `fbgemm`. +""" + +per_channel_weight_observer_range_neg_127_to_127 = PerChannelMinMaxObserver.with_args( + dtype=torch.qint8, qscheme=torch.per_channel_symmetric, + quant_min=-127, quant_max=127, eps=2 ** -12) +""" +Per-channel, symmetric weight observer with the 8-bit values restricted to [-127, +127], excluding -128. +""" + +default_dynamic_quant_observer = PlaceholderObserver.with_args( + dtype=torch.quint8, quant_min=0, quant_max=255, is_dynamic=True, +) +""" +Default observer for dynamic quantization. +""" + +default_float_qparams_observer = PerChannelMinMaxObserver.with_args( + dtype=torch.quint8, qscheme=torch.per_channel_affine_float_qparams, ch_axis=0 +) +""" +Default observer for a floating point zero-point. +""" + +default_float_qparams_observer_4bit = PerChannelMinMaxObserver.with_args( + dtype=torch.quint4x2, qscheme=torch.per_channel_affine_float_qparams, ch_axis=0 +) +""" +Default observer for a floating point zero-point and 4 bit activations. +""" + +# TODO(future PR): remove these defaults and enforce activation functions +# to explicitly specify their output range +default_fixed_qparams_range_neg1to1_observer = FixedQParamsObserver.with_args( + scale=2.0 / 256.0, zero_point=128, dtype=torch.quint8, quant_min=0, quant_max=255) +default_fixed_qparams_range_0to1_observer = FixedQParamsObserver.with_args( + scale=1.0 / 256.0, zero_point=0, dtype=torch.quint8, quant_min=0, quant_max=255) +# TODO: the following 2 variables are kept for backwards compatibility; remove after a few releases +default_symmetric_fixed_qparams_observer = default_fixed_qparams_range_neg1to1_observer +default_affine_fixed_qparams_observer = default_fixed_qparams_range_0to1_observer + +""" +Default observers for fixed qparams operations. +""" + +default_reuse_input_observer = ReuseInputObserver +""" +Default observer for operators like reshape that reuses the observer of input to +the operator +""" diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50003a5ca316f438238d404dc56714c85b253499 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/duplicate_dq_pass.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/duplicate_dq_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98ca257f97fa4036cf85099d2b3a1e1539bc65ec Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/duplicate_dq_pass.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/export_utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/export_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77901a61ce3a73e85eeed765e25f536c4e1eea91 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/export_utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/generate_numeric_debug_handle.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/generate_numeric_debug_handle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..524f12270a6cc9f0c044e355b12d5c3e7d1eb0f4 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/generate_numeric_debug_handle.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/graph_utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/graph_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f05d99c5fe6f23ee60ea1f2caa9a6dda7589f6f Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/graph_utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/port_metadata_pass.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/port_metadata_pass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..017fa1be21f86ea1011a2ea6d5fd0a9fdcfd30c4 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/port_metadata_pass.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/prepare.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/prepare.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20ef72737d50eeb7bb2ba401da63cbfaae4df013 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/prepare.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67eff5a1e765de1bdd6c636b5a8417322026a6f6 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/__pycache__/utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/graph_utils.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/graph_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6ae93ba1d2604a6a195e309dbc8d2fbedaadec9d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/graph_utils.py @@ -0,0 +1,110 @@ +# mypy: allow-untyped-defs +import itertools +from typing import Any, List, OrderedDict, Set, Optional, Callable +import operator +from torch.fx import Node + +import torch + +from torch.fx.passes.utils.source_matcher_utils import ( + check_subgraphs_connected, + get_source_partitions, + SourcePartition, +) + +__all__ = [ + "find_sequential_partitions", + "get_equivalent_types", + "update_equivalent_types_dict", +] + +_EQUIVALENT_TYPES: List[Set] = [ + {torch.nn.Conv1d, torch.nn.functional.conv1d}, + {torch.nn.Conv2d, torch.nn.functional.conv2d}, + {torch.nn.AdaptiveAvgPool2d, torch.nn.functional.adaptive_avg_pool2d}, + {torch.nn.ReLU, torch.nn.functional.relu, torch.nn.functional.relu_}, + {torch.nn.BatchNorm2d, torch.nn.functional.batch_norm}, + {torch.nn.Hardtanh, torch.nn.functional.hardtanh, torch.nn.functional.hardtanh_}, + {torch.add, operator.add, operator.iadd, "add", "add_"}, + {torch.mul, operator.mul, operator.imul, "mul", "mul_"}, +] + + +def _create_equivalent_types_dict(): + _DICT = {} + for values in _EQUIVALENT_TYPES: + for v in values: + _DICT[v] = list(values) + return _DICT + + +_EQUIVALENT_TYPES_DICT = _create_equivalent_types_dict() + +def get_equivalent_types() -> List[Set]: + return _EQUIVALENT_TYPES + +def update_equivalent_types_dict(customized_equivalent_types=None): + """Help function for user who wants to customize the _EQUIVALENT_TYPES and _EQUIVALENT_TYPES_DICT. + When customized_equivalent_types passes in, + re-generate _EQUIVALENT_TYPES and _EQUIVALENT_TYPES_DICT. + """ + if customized_equivalent_types is None: + raise ValueError("customized_equivalent_types should not be None") + global _EQUIVALENT_TYPES + global _EQUIVALENT_TYPES_DICT + _EQUIVALENT_TYPES = customized_equivalent_types + _EQUIVALENT_TYPES_DICT = _create_equivalent_types_dict() + +def _partitions_sequential(partitions: List[SourcePartition]): + prev_partition = None + for partition in partitions: + if prev_partition is not None and not check_subgraphs_connected( + prev_partition, partition + ): + return False + prev_partition = partition + return True + + +def _get_matching_types(partition_type): + matching_types = [partition_type] + if partition_type in _EQUIVALENT_TYPES_DICT: + matching_types.extend(_EQUIVALENT_TYPES_DICT[partition_type]) + return matching_types + + +def _valid_type_sequence(partition_types: List[Any]): + partition_types_set = set() # type: ignore[var-annotated] + for partition_type in partition_types: + matching_types = _get_matching_types(partition_type) + matching_types_set = set(matching_types) + if len(partition_types_set & matching_types_set) > 0: + return False + partition_types_set |= matching_types_set + return True + + +def find_sequential_partitions( + gm: torch.fx.GraphModule, + partition_types: List[Any], + include_functional_equivalent=True, + filter_fn: Optional[Callable[[Node], bool]] = None, +): + if not _valid_type_sequence(partition_types): + raise ValueError( + f"Invalid partition types: {partition_types}. Each type in the sequence must be unique" + ) + + typed_partitions: OrderedDict[Any, List[SourcePartition]] = OrderedDict() + for partition_type in partition_types: + types_to_match = _get_matching_types(partition_type) + partitions = get_source_partitions(gm.graph, types_to_match, filter_fn) + typed_partitions[partition_type] = list(itertools.chain.from_iterable(partitions.values())) + + typed_partitions_list = list(typed_partitions.values()) + fusion_candidates = itertools.product(*typed_partitions_list) + fused_partitions = [] + for candidate in fusion_candidates: + if _partitions_sequential(candidate): # type: ignore[arg-type] + fused_partitions.append(candidate) + return fused_partitions diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/representation/__init__.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/representation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9ddac64c04fa4bbc6a781540cbce9c6416ba0b52 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/representation/__init__.py @@ -0,0 +1,5 @@ +from .rewrite import reference_representation_rewrite + +__all__ = [ + "reference_representation_rewrite", +] diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/representation/rewrite.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/representation/rewrite.py new file mode 100644 index 0000000000000000000000000000000000000000..40801344740b1dfa520761b8eecb5d996685db7e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/pt2e/representation/rewrite.py @@ -0,0 +1,601 @@ +# mypy: allow-untyped-defs +import torch +from torch.fx import GraphModule +from ..export_utils import _WrapperModule +from ..utils import ( + _get_aten_graph_module_for_pattern, + remove_tensor_overload_for_qdq_ops, + _replace_literals_with_new_placeholders, + _replace_literals_with_existing_placeholders, +) +from torch.ao.quantization.fx._decomposed import quantized_decomposed_lib # noqa: F401 +from torch.fx.subgraph_rewriter import replace_pattern +from torch._higher_order_ops.out_dtype import out_dtype +from typing import Optional, Callable, Tuple, Any +from dataclasses import dataclass + +from functools import partial + +__all__ = [ + "reference_representation_rewrite", +] + + +_QUANTIZED_LINEAR_EXAMPLE_INPUTS = ( + torch.randint(-128, 127, (2, 5), dtype=torch.int8), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-128], dtype=torch.int), + torch.tensor([127], dtype=torch.int), + torch.randint(-128, 127, (5, 5), dtype=torch.int8), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-127], dtype=torch.int), + torch.tensor([127], dtype=torch.int), + torch.randn(1, dtype=torch.float), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-128], dtype=torch.int), + torch.tensor([127], dtype=torch.int), +) + +def _qdq_quantized_linear( + x_i8, x_scale, x_zero_point, x_quant_min, x_quant_max, + weight_i8, weight_scale, weight_zero_point, weight_quant_min, weight_quant_max, + bias_fp32, + out_scale, out_zero_point, out_quant_min, out_quant_max +): + x_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor( + x_i8, x_scale, x_zero_point, x_quant_min, x_quant_max, torch.int8) + weight_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor( + weight_i8, weight_scale, weight_zero_point, weight_quant_min, weight_quant_max, torch.int8) + out_fp32 = torch.ops.aten.linear.default(x_fp32, weight_fp32, bias_fp32) + out_i8 = torch.ops.quantized_decomposed.quantize_per_tensor( + out_fp32, out_scale, out_zero_point, out_quant_min, out_quant_max, torch.int8) + return out_i8 + +def _reference_quantized_linear( + x_i8, x_scale, x_zero_point, x_quant_min, x_quant_max, + weight_i8, weight_scale, weight_zero_point, weight_quant_min, weight_quant_max, + bias_fp32, + out_scale, out_zero_point, out_quant_min, out_quant_max +): + # without using quant_min/max in clamp, the traced graph will not have quant_mi/max args. + # This results in failure to match the pattern. + # Therefore, we call a torch.ops.aten.clamp here + x_i8 = torch.ops.aten.clamp(x_i8, x_quant_min, x_quant_max) + weight_i8 = torch.ops.aten.clamp(weight_i8, weight_quant_min, weight_quant_max) + + x_i16 = x_i8.to(torch.int16) + weight_i16 = weight_i8.to(torch.int16) + # always set bias to None so that the same representation can work for the case + # no matter if bias_scale == x_scale * weight_scale or not + acc_i32 = out_dtype( + torch.ops.aten.linear.default, + torch.int32, + x_i16 - x_zero_point, + weight_i16 - weight_zero_point, + None) + # TODO: change to mul.Scalar + # Note: we are quantizing bias with these scales without signal from user, but it might be OK + bias_scale = x_scale * weight_scale + bias_i32 = out_dtype(torch.ops.aten.div.Tensor, torch.int32, bias_fp32, bias_scale) + acc_i32 = acc_i32 + bias_i32 + # TODO: change to mul.Scalar when we make x_scale/weight_scale etc. Scalar values + acc_i32 = out_dtype(torch.ops.aten.mul.Tensor, torch.int32, acc_i32, x_scale * weight_scale / out_scale) + out_zero_point + out_i8 = torch.ops.aten.clamp(acc_i32, out_quant_min, out_quant_max).to(torch.int8) + return out_i8 + + +_DYNAMIC_QUANTIZED_LINEAR_EXAMPLE_INPUTS = ( + torch.randn((2, 5), dtype=torch.float), + -128, + 127, + torch.finfo(torch.float32).eps, + torch.randint(-128, 127, (5, 5), dtype=torch.int8), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-127], dtype=torch.int), + torch.tensor([127], dtype=torch.int), + torch.randn(1, dtype=torch.float), +) + + +def _qdq_dynamic_quantized_linear( + x_fp32, x_quant_min, x_quant_max, x_eps, + weight_i8, weight_scale, weight_zero_point, weight_quant_min, weight_quant_max, + bias_fp32, +): + x_scale, x_zero_point = torch.ops.quantized_decomposed.choose_qparams(x_fp32, x_quant_min, x_quant_max, x_eps, torch.int8) + x_i8 = torch.ops.quantized_decomposed.quantize_per_tensor( + x_fp32, x_scale, x_zero_point, x_quant_min, x_quant_max, torch.int8) + x_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor( + x_i8, x_scale, x_zero_point, x_quant_min, x_quant_max, torch.int8) + weight_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor( + weight_i8, weight_scale, weight_zero_point, weight_quant_min, weight_quant_max, torch.int8) + out_fp32 = torch.ops.aten.linear.default(x_fp32, weight_fp32, bias_fp32) + return out_fp32 + +def _reference_dynamic_quantized_linear( + x_fp32, x_quant_min, x_quant_max, x_eps, + weight_i8, weight_scale, weight_zero_point, weight_quant_min, weight_quant_max, + bias_fp32, +): + x_scale, x_zero_point = torch.ops.quantized_decomposed.choose_qparams(x_fp32, x_quant_min, x_quant_max, x_eps, torch.int8) + # decomposed representation for quantize_per_tensor + # TODO: use out_dtype(mul, ...) here when the op is ready + x_fp32 = x_fp32 / x_scale # fp32 + # round modes might be different here + # pytorch is rounding to even, which is also common for most of the backends + x_fp32 = torch.round(x_fp32) # fp32 + x_i32 = x_fp32.to(dtype=torch.int32) # int32 + x_i32 = x_i32 + x_zero_point # int32 + # clamp works for fp32, int32 and int8 dtypes + x_i32 = torch.clamp(x_i32, x_quant_min, x_quant_max) # int32 + x_i8 = x_i32.to(dtype=torch.int8) + + weight_i8 = torch.ops.aten.clamp(weight_i8, weight_quant_min, weight_quant_max) + + x_i16 = x_i8.to(torch.int16) + weight_i16 = weight_i8.to(torch.int16) + # always set bias to None so that the same representation can work for the case + # no matter if bias_scale == x_scale * weight_scale or not + acc_i32 = out_dtype( + torch.ops.aten.linear.default, + torch.int32, + x_i16 - x_zero_point, + weight_i16 - weight_zero_point, + None) + bias_scale = x_scale * weight_scale + bias_i32 = out_dtype(torch.ops.aten.div.Tensor, torch.int32, bias_fp32, bias_scale) + acc_i32 = acc_i32 + bias_i32 + out_fp32 = acc_i32 * (x_scale * weight_scale) + return out_fp32 + + +_QUANTIZED_CONV2d_EXAMPLE_INPUTS = ( + torch.randint(-128, 127, (1, 3, 3, 3), dtype=torch.int8), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-128], dtype=torch.int), + torch.tensor([127], dtype=torch.int), + torch.randint(-128, 127, (1, 3, 3, 3), dtype=torch.int8), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-127], dtype=torch.int), + torch.tensor([127], dtype=torch.int), + torch.randn(1, dtype=torch.float), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-128], dtype=torch.int), + torch.tensor([127], dtype=torch.int), +) + +def _qdq_quantized_conv2d( + x_i8, x_scale, x_zero_point, x_quant_min, x_quant_max, + weight_i8, weight_scale, weight_zero_point, weight_quant_min, weight_quant_max, + bias_fp32, + out_scale, out_zero_point, out_quant_min, out_quant_max +): + stride = [1, 1] + padding = [0, 0] + dilation = [1, 1] + transposed = False + output_padding = [0, 0] + groups = 1 + x_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor( + x_i8, x_scale, x_zero_point, x_quant_min, x_quant_max, torch.int8) + weight_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor( + weight_i8, weight_scale, weight_zero_point, weight_quant_min, weight_quant_max, torch.int8) + out_fp32 = torch.ops.aten.convolution.default( + x_fp32, weight_fp32, bias_fp32, stride, padding, dilation, transposed, output_padding, groups) + out_i8 = torch.ops.quantized_decomposed.quantize_per_tensor( + out_fp32, out_scale, out_zero_point, out_quant_min, out_quant_max, torch.int8) + return out_i8 + +def _reference_quantized_conv2d( + x_i8, x_scale, x_zero_point, x_quant_min, x_quant_max, + weight_i8, weight_scale, weight_zero_point, weight_quant_min, weight_quant_max, + bias_fp32, + out_scale, out_zero_point, out_quant_min, out_quant_max +): + stride = [1, 1] + padding = [0, 0] + dilation = [1, 1] + transposed = False + output_padding = [0, 0] + groups = 1 + # without using quant_min/max in clamp, the traced graph will not have quant_mi/max args. + # This results in failure to match the pattern. + # Therefore, we call a torch.ops.aten.clamp here + x_i8 = torch.ops.aten.clamp(x_i8, x_quant_min, x_quant_max) + weight_i8 = torch.ops.aten.clamp(weight_i8, weight_quant_min, weight_quant_max) + + x_i16 = x_i8.to(torch.int16) + weight_i16 = weight_i8.to(torch.int16) + # always set bias to None so that the same representation can work for the case + # no matter if bias_scale == x_scale * weight_scale or not + acc_i32 = out_dtype( + torch.ops.aten.convolution.default, + torch.int32, + x_i16 - x_zero_point, + weight_i16 - weight_zero_point, + None, stride, padding, dilation, transposed, output_padding, groups) + # Note: we are quantizing bias with these scales without signal from user, but it might be OK + bias_scale = x_scale * weight_scale + # bias quantization to int32 uses bias_scale = x_scale * weight_scale due to: + # Take linear calculation for example + # Out_(i, j)_fp32 = Sum_(over k)[X_(i, k)_fp32 * W_(i, k)_fp32] + bias_(i)_fp32 + # Represent X, W fp32 as their dequant transforms + # A_fp32 = (A_q - A_zero_point)/A_scale + # Out_(i, j)_fp32 = Sum_(over k)[(X_(i, k)_fp32 - X_zp) * X_scale * (W_(i, k)_fp32 - W_zp) * W_scale] + bias_(i)_fp32 + # Factor out X_scale and W_scale + # Out_(i, j)_fp32 = ((X_scale * W_scale) * Sum_(over k)[(X_(i, k)_fp32 - X_zp) * (W_(i, k)_fp32 - W_zp)]) + bias_(i)_fp32 + # In order to addition of bias_(i)_fp32 inside, we must do + # Out_(i, j)_fp32 = (X_scale * W_scale) * (Sum_(over k)[(X_(i, k)_fp32 - X_zp) * (W_(i, k)_fp32 - W_zp)] + (1 / (X_scale * W_scale)) * bias_(i)_fp32)W_scale # noqa: B950 + # Note we had to multiply bias_fp32 qith X_scale * W_scale = bias_scale + # Thus bias quantization to int32 must be with X_scale * W_scale + + bias_i32 = out_dtype(torch.ops.aten.div.Tensor, torch.int32, bias_fp32, bias_scale) + # Unsqueeze to match broadcast dims + # Unfortnuately I cannot do bias_i32.unsqueeze(0) due to literal matching nightmare + # in graph pattern replacement + bias_i32 = bias_i32.unsqueeze(-1) + bias_i32 = bias_i32.unsqueeze(-1) + acc_i32 = acc_i32 + bias_i32 + # TODO: change to mul.Scalar when we make x_scale/weight_scale etc. Scalar values + acc_i32 = out_dtype( + torch.ops.aten.mul.Tensor, torch.int32, acc_i32, x_scale * weight_scale / out_scale) + out_zero_point + out_i8 = torch.ops.aten.clamp(acc_i32, out_quant_min, out_quant_max).to(torch.int8) + return out_i8 + + +_QUANTIZED_ADD_OR_ADD_RELU_EXAMPLE_INPUTS = ( + torch.randint(-128, 127, (1, 3, 3, 3), dtype=torch.int8), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.randint(-128, 127, (1, 3, 3, 3), dtype=torch.int8), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-128], dtype=torch.int), + torch.tensor([127], dtype=torch.int), +) + +def _qdq_quantized_add_relu( + x_i8, x_scale, x_zero_point, y_i8, y_scale, y_zero_point, + out_scale, out_zero_point, quant_min, quant_max +): + x_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor(x_i8, x_scale, x_zero_point, quant_min, quant_max, torch.int8) + y_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor(y_i8, y_scale, y_zero_point, quant_min, quant_max, torch.int8) + out_fp32 = x_fp32 + y_fp32 + out_fp32 = torch.ops.aten.relu(out_fp32) + out_i8 = torch.ops.quantized_decomposed.quantize_per_tensor( + out_fp32, out_scale, out_zero_point, quant_min, quant_max, torch.int8 + ) + return out_i8 + +def _reference_quantized_add_relu( + x_i8, x_scale, x_zero_point, y_i8, y_scale, y_zero_point, + out_scale, out_zero_point, quant_min, quant_max +): + """ + See comments for `_reference_quantized_add` for more information on + how to derive the formula for out_i8 based on x_i8 and y_i8 + """ + x_i32 = x_i8.to(torch.int32) + y_i32 = y_i8.to(torch.int32) + # TODO: change this to mul.Scalar? + x_i32 = out_dtype(torch.ops.aten.mul.Tensor, torch.int32, (x_i32 - x_zero_point), (x_scale / out_scale)) + y_i32 = out_dtype(torch.ops.aten.mul.Tensor, torch.int32, (y_i32 - y_zero_point), (y_scale / out_scale)) + out_i32 = x_i32 + y_i32 + out_zero_point + # out_i32 = torch.ops.aten.clamp(out_i32, out_zero_point) + out_i8 = torch.ops.aten.clamp(out_i32, out_zero_point, quant_max).to(torch.int8) + return out_i8 + +def _qdq_quantized_add(x_i8, x_scale, x_zero_point, y_i8, y_scale, y_zero_point, out_scale, out_zero_point, quant_min, quant_max): + x_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor(x_i8, x_scale, x_zero_point, quant_min, quant_max, torch.int8) + y_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor(y_i8, y_scale, y_zero_point, quant_min, quant_max, torch.int8) + out_fp32 = x_fp32 + y_fp32 + out_i8 = torch.ops.quantized_decomposed.quantize_per_tensor( + out_fp32, out_scale, out_zero_point, quant_min, quant_max, torch.int8 + ) + return out_i8 + +def _reference_quantized_add( + x_i8, x_scale, x_zero_point, y_i8, y_scale, y_zero_point, + out_scale, out_zero_point, quant_min, quant_max +): + """ + # How to Derive the formula for out_i8 based on x_i8 and y_i8 + # (since quantized add takes x_i8, y_i8 and their quantization parameters, and produce an out_i8) + + # out_i8 is quantized output, we can write down the formula for it first: +out_i8 = out_f32 / out_scale + out_zero_point (1) + + # then out_fp32 is computed from x_f32 + y_f32, and the x_fp32 and y_fp32 are the dequantized x_i8 and y_i8 + out_f32 = x_f32 + y_f32 (2) + x_fp32 = (x_i8 - x_zero_point) * x_scale (3) + y_fp32 = (y_i8 - y_zero_point) * y_scale (4) + + # applying the above fomula to the out_i8 equation we can get the following: + out_i8 = out_fp32 / out_scale + out_zero_point # (1) + = (x_f32 + y_f32) / out_scale + out_zero_point # applying (2) to substitute out_fp32 with x_fp32 + y_fp32 + = ((x_i8 - x_zero_point) * x_scale + (y_i8 - y_zero_point) * y_scale) / out_scale + out_zero_point # apply (3) and (4) + """ + x_i32 = x_i8.to(torch.int32) + y_i32 = y_i8.to(torch.int32) + # TODO: use out_dtype op + x_i32 = torch.round((x_scale / out_scale) * (x_i32 - x_zero_point)).to(torch.int32) + y_i32 = torch.round((y_scale / out_scale) * (y_i32 - y_zero_point)).to(torch.int32) + out_i32 = x_i32 + y_i32 + out_zero_point + quant_min = -128 + quant_max = 127 + out_i8 = torch.ops.aten.clamp(out_i32, quant_min, quant_max).to(torch.int8) + return out_i8 + +_QUANTIZED_MAX_POOL2D_EXAMPLE_INPUTS = ( + torch.randint(-128, 127, (1, 3, 3, 3), dtype=torch.int8), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-128], dtype=torch.int), + torch.tensor([127], dtype=torch.int), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-128], dtype=torch.int), + torch.tensor([127], dtype=torch.int), +) + +def _qdq_quantized_max_pool2d( + x_i8, x_scale, x_zero_point, x_quant_min, x_quant_max, out_scale, out_zero_point, out_quant_min, out_quant_max): + kernel_size = 1 + stride = 1 + padding = 0 + dilation = 1 + ceil_mode = False + x_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor(x_i8, x_scale, x_zero_point, x_quant_min, x_quant_max, torch.int8) + out_fp32, _ = torch.ops.aten.max_pool2d_with_indices.default(x_fp32, kernel_size, stride, padding, dilation, ceil_mode) + out_i8 = torch.ops.quantized_decomposed.quantize_per_tensor( + out_fp32, out_scale, out_zero_point, out_quant_min, out_quant_max, torch.int8) + return out_i8 + +def _reference_quantized_max_pool2d( + x_i8, x_scale, x_zero_point, x_quant_min, x_quant_max, out_scale, out_zero_point, out_quant_min, out_quant_max): + kernel_size = 1 + stride = 1 + padding = 0 + dilation = 1 + ceil_mode = False + # to preserve x_quant_min, x_quant_max in the graph for pattern matching + x_i8 = torch.clamp(x_i8, x_quant_min, x_quant_max) + x_i32 = x_i8.to(torch.int32) + out_i32, _ = torch.ops.aten.max_pool2d_with_indices.default( + x_i32 - x_zero_point, + kernel_size, + stride, + padding, + dilation, + ceil_mode + ) + out_fp32 = out_i32 * (x_scale / out_scale) + out_zero_point + out_fp32 = torch.clamp(out_fp32, out_quant_min, out_quant_max) + out_i8 = out_fp32.to(torch.int8) + return out_i8 + +_QUANTIZE_PER_TENSOR_INT8_EXAMPLE_INPUTS = ( + torch.randn(1, 3, 3, 3, dtype=torch.float), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-128], dtype=torch.int), + torch.tensor([127], dtype=torch.int), +) + +def _quantize_per_tensor_int8(x_fp32, scale, zero_point, quant_min, quant_max): + x = torch.ops.quantized_decomposed.quantize_per_tensor(x_fp32, scale, zero_point, quant_min, quant_max, torch.int8) + return x + +def _reference_quantize_per_tensor_int8(x_fp32, scale, zero_point, quant_min, quant_max): + # TODO: use out_dtype(mul, ...) here when the op is ready + x = x_fp32 / scale # fp32 + # round modes might be different here + # pytorch is rounding to even, which is also common for most of the backends + x = torch.round(x) # fp32 + x = x.to(dtype=torch.int32) # int32 + x = x + zero_point # int32 + # clamp works for fp32, int32 and int8 dtypes + x = torch.clamp(x, quant_min, quant_max) # int32 + x = x.to(dtype=torch.int8) + return x + +_DEQUANTIZE_PER_TENSOR_INT8_EXAMPLE_INPUTS = ( + torch.randint(-128, 127, (1, 3, 3, 3), dtype=torch.int8), + torch.randn(1, dtype=torch.float), + torch.zeros(1, dtype=torch.int), + torch.tensor([-128], dtype=torch.int), + torch.tensor([127], dtype=torch.int), +) + +def _dequantize_per_tensor_int8(x_i8, scale, zero_point, quant_min, quant_max): + x_fp32 = torch.ops.quantized_decomposed.dequantize_per_tensor(x_i8, scale, zero_point, quant_min, quant_max, torch.int8) + return x_fp32 + +def _reference_dequantize_per_tensor_int8(x_i8, scale, zero_point, quant_min, quant_max): + # without using quant_min/max in clamp, the traced graph will not have quant_mi/max args. + # This results in failure to match the pattern. + # Therefore, we call a torch.ops.aten.clamp here + x_i8 = torch.ops.aten.clamp(x_i8, quant_min, quant_max) + # TODO: use out_dtype op + # note: x_i8.to(torch.int32) does not work here + # TODO: debug the implementation later when torchdynamo time out issue is resolved + return ((x_i8.to(torch.float32) - zero_point) * scale).to(dtype=torch.float32) + +_QUANTIZE_PER_CHANNEL_INT8_EXAMPLE_INPUTS = ( + torch.randn(1, 3, 3, 3, dtype=torch.float), + torch.randn(3, dtype=torch.float), + torch.zeros(3, dtype=torch.int), + 1, + -128, + 127, +) + +def _quantize_per_channel_int8(x_fp32, scales, zero_points, ch_axis, quant_min, quant_max): + out_i8 = torch.ops.quantized_decomposed.quantize_per_channel( + x_fp32, scales, zero_points, ch_axis, quant_min, quant_max, torch.int8 + ) + return out_i8 + +def _reference_quantize_per_channel_int8(x_fp32, scales, zero_points, ch_axis, quant_min, quant_max): + x_fp32 = torch.transpose(x_fp32, ch_axis, -1) + out_i32 = torch.ops.aten.clamp(torch.round(x_fp32 / scales).to(torch.int32) + zero_points, quant_min, quant_max) + out_i32 = torch.transpose(out_i32, ch_axis, -1) + return out_i32.to(torch.int8) + +_DEQUANTIZE_PER_CHANNEL_INT8_EXAMPLE_INPUTS = ( + torch.randint(-128, 127, (1, 3, 3, 3), dtype=torch.int8), + torch.randn(3, dtype=torch.float), + torch.zeros(3, dtype=torch.int), + 1, + -128, + 127, +) + +def _dequantize_per_channel_int8(x_i8, scales, zero_points, ch_axis, quant_min, quant_max): + # the following will be replaced as placeholders + out_fp32 = torch.ops.quantized_decomposed.dequantize_per_channel( + x_i8, scales, zero_points, ch_axis, quant_min, quant_max, torch.int8 + ) + return out_fp32 + +def _reference_dequantize_per_channel_int8(x_i8, scales, zero_points, ch_axis, quant_min, quant_max): + # the following will be replaced as placeholders + # in order to preserve the quant_min/quant_max args for pattern matching (e.g. matching for int4 quantized ops) + # we call a torch.ops.aten.clamp here + x_i8 = torch.ops.aten.clamp(x_i8, quant_min, quant_max) + x_i8 = torch.transpose(x_i8, ch_axis, -1) + x_i32 = x_i8.to(torch.int32) + out_fp32 = (x_i32 - zero_points).to(torch.float) * scales + out_fp32 = torch.transpose(out_fp32, ch_axis, -1) + return out_fp32 + +def _replace_ph_qdq_per_channel_replacement(gm: torch.fx.GraphModule): + return _replace_literals_with_existing_placeholders( + gm, + exclude_literals=[-1], + literal_to_ph_idx={1: 3, -128: 4, 127: 5} + ) + + +@dataclass +class _RewriteInfo: + """Data needed for rewrite, this includes example inputs, pattern and replacement functions + and post transformation functions for the exported pattern and replacement GraphModule + """ + + # example inputs used for exporting the pattern into GraphModule + example_inputs: Tuple[Any, ...] + pattern: Callable + replacement: Callable + # post transformation on the exported pattern and replacement GraphModule + pattern_post_trans: Optional[Callable[[GraphModule], GraphModule]] = None + replacement_post_trans: Optional[Callable[[GraphModule], GraphModule]] = None + +_REWRITE_INFO_LIST = [ + _RewriteInfo( + _DYNAMIC_QUANTIZED_LINEAR_EXAMPLE_INPUTS, + _WrapperModule(_qdq_dynamic_quantized_linear), + _WrapperModule(_reference_dynamic_quantized_linear), + partial( + _replace_literals_with_existing_placeholders, + literal_to_ph_idx={ + -128: 1, + 127: 2, + torch.finfo(torch.float32).eps: 3 + } + ), + partial( + _replace_literals_with_existing_placeholders, + literal_to_ph_idx={ + -128: 1, + 127: 2, + torch.finfo(torch.float32).eps: 3 + } + ), + ), + _RewriteInfo( + _QUANTIZED_LINEAR_EXAMPLE_INPUTS, + _WrapperModule(_qdq_quantized_linear), + _WrapperModule(_reference_quantized_linear), + _replace_literals_with_new_placeholders, + _replace_literals_with_new_placeholders, + ), + _RewriteInfo( + _QUANTIZED_CONV2d_EXAMPLE_INPUTS, + _WrapperModule(_qdq_quantized_conv2d), + _WrapperModule(_reference_quantized_conv2d), + partial(_replace_literals_with_new_placeholders, exclude_literals=[-1]), + partial(_replace_literals_with_new_placeholders, exclude_literals=[-1]), + ), + _RewriteInfo( + _QUANTIZED_ADD_OR_ADD_RELU_EXAMPLE_INPUTS, + _WrapperModule(_qdq_quantized_add_relu), + _WrapperModule(_reference_quantized_add_relu), + ), + _RewriteInfo( + _QUANTIZED_ADD_OR_ADD_RELU_EXAMPLE_INPUTS, + _WrapperModule(_qdq_quantized_add), + _WrapperModule(_reference_quantized_add), + ), + _RewriteInfo( + _QUANTIZED_MAX_POOL2D_EXAMPLE_INPUTS, + _WrapperModule(_qdq_quantized_max_pool2d), + _WrapperModule(_reference_quantized_max_pool2d), + _replace_literals_with_new_placeholders, + _replace_literals_with_new_placeholders + ), + _RewriteInfo( + _QUANTIZE_PER_TENSOR_INT8_EXAMPLE_INPUTS, + _WrapperModule(_quantize_per_tensor_int8), + _WrapperModule(_reference_quantize_per_tensor_int8), + ), + _RewriteInfo( + _DEQUANTIZE_PER_TENSOR_INT8_EXAMPLE_INPUTS, + _WrapperModule(_dequantize_per_tensor_int8), + _WrapperModule(_reference_dequantize_per_tensor_int8), + ), + _RewriteInfo( + _QUANTIZE_PER_CHANNEL_INT8_EXAMPLE_INPUTS, + _WrapperModule(_quantize_per_channel_int8), + _WrapperModule(_reference_quantize_per_channel_int8), + _replace_ph_qdq_per_channel_replacement, + _replace_ph_qdq_per_channel_replacement + ), + _RewriteInfo( + _DEQUANTIZE_PER_CHANNEL_INT8_EXAMPLE_INPUTS, + _WrapperModule(_dequantize_per_channel_int8), + _WrapperModule(_reference_dequantize_per_channel_int8), + _replace_ph_qdq_per_channel_replacement, + _replace_ph_qdq_per_channel_replacement + ), +] + +def reference_representation_rewrite(model: GraphModule) -> GraphModule: + remove_tensor_overload_for_qdq_ops(model) + for rewrite_info in _REWRITE_INFO_LIST: + example_inputs = rewrite_info.example_inputs + pattern = rewrite_info.pattern + replacement = rewrite_info.replacement + pattern_post_trans = rewrite_info.pattern_post_trans + replacement_post_trans = rewrite_info.replacement_post_trans + pattern = _get_aten_graph_module_for_pattern(pattern, example_inputs) # type: ignore[arg-type, assignment] + remove_tensor_overload_for_qdq_ops(pattern) # type: ignore[arg-type] + replacement = _get_aten_graph_module_for_pattern(replacement, example_inputs) # type: ignore[arg-type, assignment] + remove_tensor_overload_for_qdq_ops(replacement) # type: ignore[arg-type] + if pattern_post_trans: + pattern = pattern_post_trans(pattern) + if replacement_post_trans: + replacement = replacement_post_trans(replacement) + pattern.recompile() # type: ignore[attr-defined] + replacement.recompile() # type: ignore[attr-defined] + matches = replace_pattern(model, pattern, replacement) + return model diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/qconfig_mapping.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/qconfig_mapping.py new file mode 100644 index 0000000000000000000000000000000000000000..37f71465afeae138bc4e561df844ecc7c3ba598c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/qconfig_mapping.py @@ -0,0 +1,351 @@ +# mypy: allow-untyped-defs +from __future__ import annotations +from collections import OrderedDict +from typing import Any, Callable, Dict, Tuple, Union, List + +import torch + +from .fake_quantize import ( + default_weight_fake_quant, + FixedQParamsFakeQuantize, +) +from .observer import ( + _PartialWrapper, + default_fixed_qparams_range_0to1_observer, + default_fixed_qparams_range_neg1to1_observer, + default_placeholder_observer, + default_weight_observer, +) +from .qconfig import ( + default_reuse_input_qconfig, + default_symmetric_qnnpack_qconfig, + default_symmetric_qnnpack_qat_qconfig, + get_default_qconfig, + get_default_qat_qconfig, + QConfig, + QConfigAny, + default_quint8_weight_qconfig +) + + +__all__ = [ + "get_default_qconfig_mapping", + "get_default_qat_qconfig_mapping", + "QConfigMapping", +] + + +# TODO: replace all usages with these constants +_GLOBAL_DICT_KEY = "" +_OBJECT_TYPE_DICT_KEY = "object_type" +_MODULE_NAME_REGEX_DICT_KEY = "module_name_regex" +_MODULE_NAME_DICT_KEY = "module_name" +_MODULE_NAME_OBJECT_TYPE_ORDER_DICT_KEY = "module_name_object_type_order" + +# TODO: derive this map from the BackendConfig +_FIXED_QPARAMS_OP_TO_OBSERVER: Dict[Union[Callable, str], _PartialWrapper] = { + torch.nn.Hardsigmoid: default_fixed_qparams_range_0to1_observer, + torch.nn.functional.hardsigmoid: default_fixed_qparams_range_0to1_observer, + "hardsigmoid": default_fixed_qparams_range_0to1_observer, + "hardsigmoid_": default_fixed_qparams_range_0to1_observer, + torch.nn.Sigmoid: default_fixed_qparams_range_0to1_observer, + torch.sigmoid: default_fixed_qparams_range_0to1_observer, + "sigmoid": default_fixed_qparams_range_0to1_observer, + "sigmoid_": default_fixed_qparams_range_0to1_observer, + torch.nn.Softmax: default_fixed_qparams_range_0to1_observer, + torch.nn.Tanh: default_fixed_qparams_range_neg1to1_observer, + torch.tanh: default_fixed_qparams_range_neg1to1_observer, + "tanh": default_fixed_qparams_range_neg1to1_observer, + "tanh_": default_fixed_qparams_range_neg1to1_observer, +} + + +def _get_default_qconfig_mapping(is_qat: bool, backend: str, version: int) -> QConfigMapping: + """ + Return the default QConfigMapping for the given quantization type and backend. + """ + if is_qat: + qconfig = get_default_qat_qconfig(backend, version) + else: + qconfig = get_default_qconfig(backend, version) + default_weight = default_weight_fake_quant if is_qat else default_weight_observer + + # default_per_channel_weight_observer is not currently compatible with fbgemm backend + # so we have to modify the weight observer to default_weight_observer or another + # per tensor supported observer. + # see https://github.com/pytorch/pytorch/issues/47535 + if backend in ("fbgemm", "x86"): + qconfig_transpose = QConfig(activation=qconfig.activation, weight=default_weight) + else: + qconfig_transpose = qconfig + + # currently layernorm only supports float weights + # we have to add this because otherwise there will be a extra quantize-dequantize pair + qconfig_layernorm = QConfig(activation=qconfig.activation, weight=default_placeholder_observer) + + qconfig_mapping = QConfigMapping() \ + .set_global(qconfig) \ + .set_object_type("reshape", default_reuse_input_qconfig) \ + .set_object_type(torch.nn.ConvTranspose1d, qconfig_transpose) \ + .set_object_type(torch.nn.ConvTranspose2d, qconfig_transpose) \ + .set_object_type(torch.nn.ConvTranspose3d, qconfig_transpose) \ + .set_object_type(torch.nn.functional.conv_transpose1d, qconfig_transpose) \ + .set_object_type(torch.nn.functional.conv_transpose2d, qconfig_transpose) \ + .set_object_type(torch.nn.functional.conv_transpose3d, qconfig_transpose) \ + .set_object_type(torch.nn.functional.layer_norm, qconfig_layernorm) \ + .set_object_type(torch.nn.LayerNorm, qconfig_layernorm) \ + .set_object_type(torch.nn.PReLU, default_quint8_weight_qconfig) \ + + # Use special observers for ops with fixed qparams + fixed_qparams_observer_to_qconfig: Dict[Any, QConfigAny] = {} + for fixed_qparams_op, observer in _FIXED_QPARAMS_OP_TO_OBSERVER.items(): + if observer in fixed_qparams_observer_to_qconfig: + fixed_qparams_qconfig = fixed_qparams_observer_to_qconfig[observer] + else: + if is_qat: + activation = FixedQParamsFakeQuantize.with_args(observer=observer) + else: + activation = observer + fixed_qparams_qconfig = QConfig(activation=activation, weight=default_weight) + fixed_qparams_observer_to_qconfig[observer] = fixed_qparams_qconfig + qconfig_mapping.set_object_type(fixed_qparams_op, fixed_qparams_qconfig) + + # TODO Currently it's required that separate ops in a fused op/module have the same qconfig. + # Need to be able to support fusion of ops with different qconfigs + + return qconfig_mapping + +def get_default_qconfig_mapping(backend="x86", version=0) -> QConfigMapping: + """ + Return the default QConfigMapping for post training quantization. + + Args: + * ``backend`` (str) : the quantization backend for the default qconfig mapping, should be + one of ["x86" (default), "fbgemm", "qnnpack", "onednn"] + * ``version`` (int) : the version for the default qconfig mapping + """ + # TODO: add assert for backend choices + return _get_default_qconfig_mapping(False, backend, version) + +def get_default_qat_qconfig_mapping(backend="x86", version=1) -> QConfigMapping: + """ + Return the default QConfigMapping for quantization aware training. + + Args: + * ``backend`` (str) : the quantization backend for the default qconfig mapping, should be + one of ["x86" (default), "fbgemm", "qnnpack", "onednn"] + * ``version`` (int) : the version for the default qconfig mapping + """ + return _get_default_qconfig_mapping(True, backend, version) + +def _get_symmetric_qnnpack_qconfig_mapping() -> QConfigMapping: + """ + Return a QConfigMapping that uses `torch.ao.quantization.default_symmetric_qnnpack_qconfig` + as the default QConfig. + """ + default_qconfig = default_symmetric_qnnpack_qconfig + return _get_default_qconfig_mapping_with_default_qconfig(False, "qnnpack", default_qconfig) + +def _get_symmetric_qnnpack_qat_qconfig_mapping() -> QConfigMapping: + """ + Return a QConfigMapping that uses `torch.ao.quantization.default_symmetric_qnnpack_qat_qconfig` + as the default QConfig. + """ + default_qconfig = default_symmetric_qnnpack_qat_qconfig + return _get_default_qconfig_mapping_with_default_qconfig(True, "qnnpack", default_qconfig) + +def _get_default_qconfig_mapping_with_default_qconfig( + is_qat: bool, + backend: str, + default_qconfig: QConfig, +) -> QConfigMapping: + """ + Return a QConfigMapping that uses the provided qconfig as the default QConfig. + """ + if is_qat: + qconfig_mapping = get_default_qat_qconfig_mapping(backend) + else: + qconfig_mapping = get_default_qconfig_mapping(backend) + qconfig_mapping.set_global(default_qconfig) + for pattern in qconfig_mapping.object_type_qconfigs.keys(): + if pattern not in _FIXED_QPARAMS_OP_TO_OBSERVER: + qconfig_mapping.set_object_type(pattern, default_qconfig) + return qconfig_mapping + +_QCONFIG_STYLE_ORDER: List[str] = [ + "global_qconfig", + "object_type_qconfigs", + "module_name_regex_qconfigs", + "module_name_qconfigs", + "module_name_object_type_order_qconfigs", +] + +class QConfigMapping: + """ + Mapping from model ops to :class:`torch.ao.quantization.QConfig` s. + + The user can specify QConfigs using the following methods (in increasing match priority): + + ``set_global`` : sets the global (default) QConfig + + ``set_object_type`` : sets the QConfig for a given module type, function, or method name + + ``set_module_name_regex`` : sets the QConfig for modules matching the given regex string + + ``set_module_name`` : sets the QConfig for modules matching the given module name + + ``set_module_name_object_type_order`` : sets the QConfig for modules matching a combination + of the given module name, object type, and the index at which the module appears + + Example usage:: + + qconfig_mapping = QConfigMapping() + .set_global(global_qconfig) + .set_object_type(torch.nn.Linear, qconfig1) + .set_object_type(torch.nn.ReLU, qconfig1) + .set_module_name_regex("foo.*bar.*conv[0-9]+", qconfig1) + .set_module_name_regex("foo.*", qconfig2) + .set_module_name("module1", qconfig1) + .set_module_name("module2", qconfig2) + .set_module_name_object_type_order("foo.bar", torch.nn.functional.linear, 0, qconfig3) + + """ + + def __init__(self): + # In increasing match priority: + self.global_qconfig: QConfigAny = None + self.object_type_qconfigs: OrderedDict[Union[Callable, str], QConfigAny] = OrderedDict() + self.module_name_regex_qconfigs: OrderedDict[str, QConfigAny] = OrderedDict() + self.module_name_qconfigs: OrderedDict[str, QConfigAny] = OrderedDict() + self.module_name_object_type_order_qconfigs: OrderedDict[Tuple[str, Callable, int], QConfigAny] =\ + OrderedDict() + + def set_global(self, global_qconfig: QConfigAny) -> QConfigMapping: + """ + Set the global (default) QConfig. + """ + self.global_qconfig = global_qconfig + return self + + def set_object_type(self, object_type: Union[Callable, str], qconfig: QConfigAny) -> QConfigMapping: + """ + Set the QConfig for a given module type, function, or method name. + If the QConfig for an existing object type was already set, the new QConfig will override the old one. + """ + self.object_type_qconfigs[object_type] = qconfig + return self + + def set_module_name_regex(self, module_name_regex: str, qconfig: QConfigAny) -> QConfigMapping: + """ + Set the QConfig for modules matching the given regex string. + + Regexes will be matched in the order in which they are registered through this method. + Thus, the caller should register more specific patterns first, e.g.:: + + qconfig_mapping = QConfigMapping() + .set_module_name_regex("foo.*bar.*conv[0-9]+", qconfig1) + .set_module_name_regex("foo.*bar.*", qconfig2) + .set_module_name_regex("foo.*", qconfig3) + + In this example, "foo.bar.conv0" would match qconfig1, "foo.bar.linear" would match qconfig2, + and "foo.baz.relu" would match qconfig3. + + If the QConfig for an existing module name regex was already set, the new QConfig will override the + old one while preserving the order in which the regexes were originally registered. + """ + self.module_name_regex_qconfigs[module_name_regex] = qconfig + return self + + def set_module_name(self, module_name: str, qconfig: QConfigAny) -> QConfigMapping: + """ + Set the QConfig for modules matching the given module name. + If the QConfig for an existing module name was already set, the new QConfig will override the old one. + """ + self.module_name_qconfigs[module_name] = qconfig + return self + + def set_module_name_object_type_order( + self, + module_name: str, + object_type: Callable, + index: int, + qconfig: QConfigAny) -> QConfigMapping: + """ + Set the QConfig for modules matching a combination of the given module name, object type, + and the index at which the module appears. + + If the QConfig for an existing (module name, object type, index) was already set, the new QConfig + will override the old one. + """ + self.module_name_object_type_order_qconfigs[(module_name, object_type, index)] = qconfig + return self + + def __repr__(self) -> str: + output = self.__class__.__name__ + " (" + for style_name in _QCONFIG_STYLE_ORDER: + output += f"\n {style_name}" + qconfigs = getattr(self, style_name) + if isinstance(qconfigs, OrderedDict) and len(qconfigs) > 0: + for key, qconfig in qconfigs.items(): + output += f"\n {key}: {qconfig}" + else: + output += f"\n {qconfigs}" + return output + "\n)" + + # TODO: remove this + def to_dict(self) -> Dict[str, Any]: + """ + Convert this ``QConfigMapping`` to a dictionary with the following keys: + + "" (for global QConfig) + + "object_type" + + "module_name_regex" + + "module_name" + + "module_name_object_type_order" + + The values of this dictionary are lists of tuples. + """ + return { + _GLOBAL_DICT_KEY: self.global_qconfig, + _OBJECT_TYPE_DICT_KEY: list(self.object_type_qconfigs.items()), + _MODULE_NAME_REGEX_DICT_KEY: list(self.module_name_regex_qconfigs.items()), + _MODULE_NAME_DICT_KEY: list(self.module_name_qconfigs.items()), + _MODULE_NAME_OBJECT_TYPE_ORDER_DICT_KEY: [ + (*k, v) for k, v in self.module_name_object_type_order_qconfigs.items() + ], + } + + # TODO: remove this + @classmethod + def from_dict(cls, qconfig_dict: Dict[str, Any]) -> QConfigMapping: + """ + Create a ``QConfigMapping`` from a dictionary with the following keys (all optional): + + "" (for global QConfig) + + "object_type" + + "module_name_regex" + + "module_name" + + "module_name_object_type_order" + + The values of this dictionary are expected to be lists of tuples. + """ + conf = cls() + if _GLOBAL_DICT_KEY in qconfig_dict: + conf.set_global(qconfig_dict[_GLOBAL_DICT_KEY]) + for object_type, qconfig in qconfig_dict.get(_OBJECT_TYPE_DICT_KEY, []): + conf.set_object_type(object_type, qconfig) + for module_name_regex, qconfig in qconfig_dict.get(_MODULE_NAME_REGEX_DICT_KEY, []): + conf.set_module_name_regex(module_name_regex, qconfig) + for module_name, qconfig in qconfig_dict.get(_MODULE_NAME_DICT_KEY, []): + conf.set_module_name(module_name, qconfig) + for module_name, object_type, index, qconfig in qconfig_dict.get(_MODULE_NAME_OBJECT_TYPE_ORDER_DICT_KEY, []): + conf.set_module_name_object_type_order(module_name, object_type, index, qconfig) + return conf diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/quant_type.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/quant_type.py new file mode 100644 index 0000000000000000000000000000000000000000..1448a6270e2ba9ba49b13a9d4878180f56a5bba7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/quant_type.py @@ -0,0 +1,30 @@ +import enum + +__all__ = [ + "QuantType", +] + +# Quantization type (dynamic quantization, static quantization). +# Should match the c++ enum in quantization_type.h +class QuantType(enum.IntEnum): + DYNAMIC = 0 + STATIC = 1 + QAT = 2 + WEIGHT_ONLY = 3 + +_quant_type_to_str = { + QuantType.STATIC: "static", + QuantType.DYNAMIC: "dynamic", + QuantType.QAT: "qat", + QuantType.WEIGHT_ONLY: "weight_only", +} + +# TODO: make this private +def _get_quant_type_to_str(quant_type: QuantType) -> str: + return _quant_type_to_str[quant_type] + +def _quant_type_from_str(name: str) -> QuantType: + for quant_type, s in _quant_type_to_str.items(): + if name == s: + return quant_type + raise ValueError(f"Unknown QuantType name '{name}'") diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/quantization_mappings.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/quantization_mappings.py new file mode 100644 index 0000000000000000000000000000000000000000..179cddca27427bae08139b5b777207cdd31650e8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/quantization_mappings.py @@ -0,0 +1,348 @@ +import copy + +import torch +from torch import nn + +import torch.nn.functional as F +import torch.ao.nn.intrinsic as nni +import torch.ao.nn.intrinsic.quantized as nniq +import torch.ao.nn.intrinsic.quantized.dynamic as nniqd +import torch.ao.nn.intrinsic.qat as nniqat +import torch.ao.nn.quantized as nnq +import torch.ao.nn.quantized.reference as nnqr +import torch.ao.nn.quantized.dynamic as nnqd +import torch.ao.nn.qat as nnqat +import torch.ao.nn.qat.dynamic as nnqatd + +from typing import Optional, Union, Dict, Set, Callable, Any + +# Because `torch.ao.nn` uses lazy imports, we need to make +# sure we import the contents explicitly here. +import torch.ao.nn.sparse +import torch.ao.nn as ao_nn +from torch.ao.quantization.stubs import QuantStub, DeQuantStub +from torch.ao.quantization.fake_quantize import ( + default_fixed_qparams_range_0to1_fake_quant, + default_fixed_qparams_range_neg1to1_fake_quant, +) +from torch.ao.quantization.utils import get_combined_dict +from torch.nn.utils.parametrize import type_before_parametrizations + +__all__ = [ + "DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS", + "DEFAULT_STATIC_QUANT_MODULE_MAPPINGS", + "DEFAULT_QAT_MODULE_MAPPINGS", + "DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS", + "DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS", + "DEFAULT_MODULE_TO_ACT_POST_PROCESS", + "DEFAULT_STATIC_SPARSE_QUANT_MODULE_MAPPINGS", + "DEFAULT_DYNAMIC_SPARSE_QUANT_MODULE_MAPPINGS", + "no_observer_set", + "get_default_static_quant_module_mappings", + "get_default_static_quant_reference_module_mappings", + "get_embedding_static_quant_module_mappings", + "get_default_static_sparse_quant_module_mappings", + "get_static_quant_module_class", + "get_dynamic_quant_module_class", + "get_default_qat_module_mappings", + "get_embedding_qat_module_mappings", + "get_default_dynamic_quant_module_mappings", + "get_default_dynamic_sparse_quant_module_mappings", + "get_default_qconfig_propagation_list", + "get_default_compare_output_module_list", + "get_default_float_to_quantized_operator_mappings", + "get_quantized_operator", +] + +# Default map for swapping float module to reference quantized modules +DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS : Dict[Callable, Any] = { + QuantStub: nnq.Quantize, + DeQuantStub: nnq.DeQuantize, + nn.Linear: nnqr.Linear, + nn.Conv1d: nnqr.Conv1d, + nn.Conv2d: nnqr.Conv2d, + nn.Conv3d: nnqr.Conv3d, + nn.ConvTranspose1d: nnqr.ConvTranspose1d, + nn.ConvTranspose2d: nnqr.ConvTranspose2d, + nn.ConvTranspose3d: nnqr.ConvTranspose3d, + nn.Embedding: nnqr.Embedding, + nn.EmbeddingBag: nnqr.EmbeddingBag, + nn.GRUCell: nnqr.GRUCell, + nn.LSTMCell: nnqr.LSTMCell, + nn.RNNCell: nnqr.RNNCell, + nn.LSTM: nnqr.LSTM, +} + +# Default map for swapping float module to quantized ones +DEFAULT_STATIC_QUANT_MODULE_MAPPINGS : Dict[Callable, Any] = { + QuantStub: nnq.Quantize, + DeQuantStub: nnq.DeQuantize, + nn.BatchNorm2d: nnq.BatchNorm2d, + nn.BatchNorm3d: nnq.BatchNorm3d, + nn.Dropout: nnq.Dropout, + nn.Conv1d: nnq.Conv1d, + nn.Conv2d: nnq.Conv2d, + nn.Conv3d: nnq.Conv3d, + nn.ConvTranspose1d: nnq.ConvTranspose1d, + nn.ConvTranspose2d: nnq.ConvTranspose2d, + nn.ConvTranspose3d: nnq.ConvTranspose3d, + nn.ELU: nnq.ELU, + nn.Embedding: nnq.Embedding, + nn.EmbeddingBag: nnq.EmbeddingBag, + nn.GroupNorm: nnq.GroupNorm, + nn.Hardswish: nnq.Hardswish, + nn.InstanceNorm1d: nnq.InstanceNorm1d, + nn.InstanceNorm2d: nnq.InstanceNorm2d, + nn.InstanceNorm3d: nnq.InstanceNorm3d, + nn.LayerNorm: nnq.LayerNorm, + nn.LeakyReLU: nnq.LeakyReLU, + nn.modules.linear.NonDynamicallyQuantizableLinear: nnq.Linear, + nn.Linear: nnq.Linear, + nn.ReLU6: nnq.ReLU6, + nn.Dropout: nnq.Dropout, + nn.PReLU: nnq.PReLU, + # Wrapper Modules: + nnq.FloatFunctional: nnq.QFunctional, + # Intrinsic modules: + nni.BNReLU2d: nniq.BNReLU2d, + nni.BNReLU3d: nniq.BNReLU3d, + nni.ConvReLU1d: nniq.ConvReLU1d, + nni.ConvReLU2d: nniq.ConvReLU2d, + nni.ConvReLU3d: nniq.ConvReLU3d, + nni.ConvAdd2d: nniq.ConvAdd2d, + nni.ConvAddReLU2d: nniq.ConvAddReLU2d, + nni.LinearReLU: nniq.LinearReLU, + nni.LinearLeakyReLU: nniq.LinearLeakyReLU, + nni.LinearTanh: nniq.LinearTanh, + nniqat.ConvBn1d: nnq.Conv1d, + nniqat.ConvBn2d: nnq.Conv2d, + nniqat.ConvBn3d: nnq.Conv3d, + nniqat.ConvBnReLU1d: nniq.ConvReLU1d, + nniqat.ConvBnReLU2d: nniq.ConvReLU2d, + nniqat.ConvBnReLU3d: nniq.ConvReLU3d, + nniqat.ConvReLU2d: nniq.ConvReLU2d, + nniqat.ConvReLU3d: nniq.ConvReLU3d, + nniqat.LinearReLU: nniq.LinearReLU, + nniqat.LinearBn1d: nnq.Linear, + # QAT modules: + nnqat.Linear: nnq.Linear, + nnqat.Conv2d: nnq.Conv2d, + nnqat.Conv3d: nnq.Conv3d, +} + +# Default map for swapping float module to qat modules +DEFAULT_QAT_MODULE_MAPPINGS : Dict[Callable, Any] = { + nn.Conv2d: nnqat.Conv2d, + nn.Conv3d: nnqat.Conv3d, + nn.Linear: nnqat.Linear, + nn.modules.linear.NonDynamicallyQuantizableLinear: nnqat.Linear, + # Intrinsic modules: + nni.ConvBn1d: nniqat.ConvBn1d, + nni.ConvBn2d: nniqat.ConvBn2d, + nni.ConvBn3d: nniqat.ConvBn3d, + nni.ConvBnReLU1d: nniqat.ConvBnReLU1d, + nni.ConvBnReLU2d: nniqat.ConvBnReLU2d, + nni.ConvBnReLU3d: nniqat.ConvBnReLU3d, + nni.ConvReLU2d: nniqat.ConvReLU2d, + nni.ConvReLU3d: nniqat.ConvReLU3d, + nni.LinearReLU: nniqat.LinearReLU, + nni.LinearBn1d: nniqat.LinearBn1d, +} + +# Default map for swapping dynamic modules +DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS : Dict[Callable, Any] = { + nn.GRUCell: nnqd.GRUCell, + nn.Linear: nnqd.Linear, + nnqatd.Linear: nnqd.Linear, + nn.modules.linear.NonDynamicallyQuantizableLinear: nnqd.Linear, + nn.LSTM: nnqd.LSTM, + nn.GRU: nnqd.GRU, + nn.LSTMCell: nnqd.LSTMCell, + nn.RNNCell: nnqd.RNNCell, + nni.LinearReLU: nniqd.LinearReLU, + nn.EmbeddingBag: nnq.EmbeddingBag, + nn.Embedding: nnq.Embedding, + # Don't want to enable these by default because the numerical + # accuracy is poor compared to other dynamic ops + # nn.Conv1d: nnqd.Conv1d, + # nn.Conv2d: nnqd.Conv2d, + # nn.Conv3d: nnqd.Conv3d, + # nn.ConvTranspose1d: nnqd.ConvTranspose1d, + # nn.ConvTranspose2d: nnqd.ConvTranspose2d, + # nn.ConvTranspose3d: nnqd.ConvTranspose3d, +} + +# Allowlist for propagating the qconfig +_INCLUDE_QCONFIG_PROPAGATE_LIST : Set[Callable] = { + nn.Sequential, +} + +# Default mapping from floating point function or torch ops to quantized ops +# TODO: merge with default static mapping +DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS : Dict[Union[Callable, str], Callable] = { + F.elu: torch.ops.quantized.elu, + F.hardswish: torch.ops.quantized.hardswish, + F.instance_norm: torch.ops.quantized.instance_norm, + F.layer_norm: torch.ops.quantized.layer_norm, + F.leaky_relu: torch.ops.quantized.leaky_relu, + F.dropout: torch.ops.quantized.dropout, +} + +# mapping from module to output activation post process class +DEFAULT_MODULE_TO_ACT_POST_PROCESS : Dict[Callable, Callable] = { + nn.Hardsigmoid: default_fixed_qparams_range_0to1_fake_quant, + nn.Sigmoid: default_fixed_qparams_range_0to1_fake_quant, + nn.Softmax: default_fixed_qparams_range_0to1_fake_quant, + nn.Tanh: default_fixed_qparams_range_neg1to1_fake_quant, +} + +# Default map for swapping float module to static sparse quantized ones +DEFAULT_STATIC_SPARSE_QUANT_MODULE_MAPPINGS : Dict[Callable, Any] = { + nn.Linear: ao_nn.sparse.quantized.Linear +} + +# Default map for swapping float module to dynamic sparse quantized ones +DEFAULT_DYNAMIC_SPARSE_QUANT_MODULE_MAPPINGS : Dict[Callable, Any] = { + nn.Linear: ao_nn.sparse.quantized.dynamic.Linear +} + +def no_observer_set() -> Set[Any]: + r"""These modules cannot have observers inserted by default.""" + no_observers = { + nn.quantizable.LSTM, + nn.quantizable.MultiheadAttention + } + return no_observers + +def get_default_static_quant_module_mappings() -> Dict[Callable, Any]: + ''' Get module mapping for post training static quantization + ''' + return copy.deepcopy(DEFAULT_STATIC_QUANT_MODULE_MAPPINGS) + +def get_default_static_quant_reference_module_mappings() -> Dict[Callable, Any]: + ''' Get reference module mapping for post training static quantization + ''' + return copy.deepcopy(DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS) + +def get_embedding_static_quant_module_mappings() -> Dict[Callable, Any]: + ''' Get module mapping, including mapping for embedding QAT + ''' + mapping = copy.deepcopy(DEFAULT_STATIC_QUANT_MODULE_MAPPINGS) + mapping[nnqat.EmbeddingBag] = nnq.EmbeddingBag + mapping[nnqat.Embedding] = nnq.Embedding + return mapping + +def get_default_static_sparse_quant_module_mappings() -> Dict[Callable, Any]: + ''' Get module mapping for post training static sparse quantization + ''' + return copy.deepcopy(DEFAULT_STATIC_SPARSE_QUANT_MODULE_MAPPINGS) + +def get_static_quant_module_class( + float_module_class: Callable, + additional_static_quant_mapping: Optional[Dict[Callable, Any]] = None, + is_reference: bool = False) -> Any: + r"""n Get the statically quantized module class corresponding to + the floating point module class + """ + if additional_static_quant_mapping is None: + additional_static_quant_mapping = {} + all_mappings = get_combined_dict( + DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS if is_reference + else DEFAULT_STATIC_QUANT_MODULE_MAPPINGS, additional_static_quant_mapping) + static_quant_module_class = all_mappings.get(float_module_class, None) + assert static_quant_module_class is not None, \ + f"Floating point module class {str(float_module_class)}" + \ + " does not have a corresponding quantized module class" + return copy.deepcopy(static_quant_module_class) + +def get_dynamic_quant_module_class( + float_module_class: Callable, + additional_dynamic_quant_mapping: Optional[Dict[Callable, Any]] = None) -> Any: + r"""n Get the dynamically quantized module class corresponding to + the floating point module class + """ + if additional_dynamic_quant_mapping is None: + additional_dynamic_quant_mapping = {} + all_mappings = get_combined_dict(DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS, additional_dynamic_quant_mapping) + dynamic_quant_module_class = all_mappings.get(float_module_class, None) + assert dynamic_quant_module_class is not None, \ + f"Floating point module class {str(float_module_class)}" + \ + " does not have a corresponding quantized module class" + return copy.deepcopy(dynamic_quant_module_class) + +def get_default_qat_module_mappings() -> Dict[Callable, Any]: + ''' Get default module mapping for quantization aware training + ''' + return copy.deepcopy(DEFAULT_QAT_MODULE_MAPPINGS) + +def get_embedding_qat_module_mappings() -> Dict[Callable, Any]: + ''' Get module mapping for quantization aware training + This is includes default values in addition to + enabling qat for embeddings. + ''' + mapping = copy.deepcopy(DEFAULT_QAT_MODULE_MAPPINGS) + mapping[nn.EmbeddingBag] = nnqat.EmbeddingBag + mapping[nn.Embedding] = nnqat.Embedding + return mapping + +def get_default_dynamic_quant_module_mappings() -> Dict[Callable, Any]: + ''' Get module mapping for post training dynamic quantization + ''' + return DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS + +def get_default_dynamic_sparse_quant_module_mappings() -> Dict[Callable, Any]: + ''' Get module mapping for post training dynamic sparse quantization + ''' + return DEFAULT_DYNAMIC_SPARSE_QUANT_MODULE_MAPPINGS + +def get_default_qconfig_propagation_list() -> Set[Callable]: + ''' Get the default list of module types that we'll attach qconfig + attribute to in prepare + ''' + QCONFIG_PROPAGATE_MODULE_CLASS_LIST = ( + set(DEFAULT_STATIC_QUANT_MODULE_MAPPINGS.keys()) | + set(DEFAULT_QAT_MODULE_MAPPINGS.keys()) | + set(DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS.keys()) | + _INCLUDE_QCONFIG_PROPAGATE_LIST + ) + return copy.deepcopy(QCONFIG_PROPAGATE_MODULE_CLASS_LIST) + +def get_default_compare_output_module_list() -> Set[Callable]: + ''' Get list of module class types that we will record output + in numeric suite + ''' + NUMERIC_SUITE_COMPARE_MODEL_OUTPUT_MODULE_LIST = ( + set(DEFAULT_STATIC_QUANT_MODULE_MAPPINGS.values()) + | set(DEFAULT_QAT_MODULE_MAPPINGS.values()) + | set(DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS.values()) + | set(DEFAULT_STATIC_QUANT_MODULE_MAPPINGS.keys()) + | set(DEFAULT_QAT_MODULE_MAPPINGS.keys()) + | set(DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS.keys()) + | _INCLUDE_QCONFIG_PROPAGATE_LIST + ) + return copy.deepcopy(NUMERIC_SUITE_COMPARE_MODEL_OUTPUT_MODULE_LIST) + +def get_default_float_to_quantized_operator_mappings( +) -> Dict[Union[Callable, str], Callable]: + return copy.deepcopy(DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS) + +# TODO: merge with get_static_quant_module_class +def get_quantized_operator(float_op: Union[Callable, str]) -> Callable: + ''' Get the quantized operator corresponding to the float operator + ''' + quantized_op = DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS.get(float_op, None) + assert quantized_op is not None, \ + f'Operator {str(float_op)} does not have corresponding quantized op' + return quantized_op + +def _get_special_act_post_process(module: torch.nn.Module) -> Optional[Callable]: + r""" Get the special activation post process for `module`, this has + higher priority than the activation post process in `qconfig` + e.g. + input: torch.nn.Sigmoid + output: default_affine_fixed_qparam_fake_quant + """ + return DEFAULT_MODULE_TO_ACT_POST_PROCESS.get(type_before_parametrizations(module), None) + +def _has_special_act_post_process(module: torch.nn.Module) -> bool: + return module.training and type(module) in DEFAULT_MODULE_TO_ACT_POST_PROCESS diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/quantize.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/quantize.py new file mode 100644 index 0000000000000000000000000000000000000000..be00be0e295bb7b9e467b0a7e5cd2619c542b4ec --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/quantize.py @@ -0,0 +1,682 @@ +# mypy: allow-untyped-defs +import copy +import itertools +import warnings +import inspect +import torch +import torch.nn as nn +import torch.ao.nn.quantized as nnq +from torch.ao.nn.intrinsic import _FusedModule + +from torch.ao.quantization.quantization_mappings import ( + get_default_dynamic_quant_module_mappings, + get_default_static_quant_module_mappings, + get_default_static_quant_reference_module_mappings, + get_default_qat_module_mappings, + get_default_qconfig_propagation_list, + no_observer_set, + _has_special_act_post_process, + _get_special_act_post_process, +) +from .utils import get_qparam_dict, has_no_children_ignoring_parametrizations +from torch.ao.quantization.stubs import DeQuantStub, QuantWrapper +from torch.ao.quantization.qconfig import ( + _add_module_to_qconfig_obs_ctr, + default_dynamic_qconfig, + float16_dynamic_qconfig, + float_qparams_weight_only_qconfig, + float_qparams_weight_only_qconfig_4bit, + _activation_is_memoryless) +from torch.nn.utils.parametrize import type_before_parametrizations +from torch.ao.quantization.observer import _is_activation_post_process + +# TODO remove this once BC is no longer required to avoid a SEV +from torch.ao.quantization.observer import ( # noqa: F401 + _is_activation_post_process as is_activation_post_process +) + +__all__ = [ + "get_default_custom_config_dict", + "propagate_qconfig_", + "add_quant_dequant", + "prepare", + "quantize", + "quantize_dynamic", + "prepare_qat", + "quantize_qat", + "convert", + "swap_module", +] + +_DEFAULT_CUSTOM_CONFIG_DICT = { + 'float_to_observed_custom_module_class': { + nn.LSTM: nn.quantizable.LSTM, + nn.MultiheadAttention: nn.quantizable.MultiheadAttention, + }, + 'observed_to_quantized_custom_module_class': { + nn.quantizable.LSTM: nn.quantized.LSTM, + nn.quantizable.MultiheadAttention: nn.quantized.MultiheadAttention, + } +} + +def get_default_custom_config_dict(): + r"""Defines the default custom config dict. + """ + return _DEFAULT_CUSTOM_CONFIG_DICT + +def _propagate_qconfig_helper(module, qconfig_dict, + qconfig_parent=None, prefix='', prepare_custom_config_dict=None): + r"""This is a helper function for `propagate_qconfig_` + + Args: + module: input module + qconfig_dict: dictionary that maps from name of submodule to quantization + configuration + qconfig_parent: quantization config of parent module, we will fallback to + this config when there is no specified config for current + module + prefix: corresponding prefix of the current module, used as key in + qconfig_dict + prepare_custom_config_dict: dictionary for custom handling of modules + see docs for :func:`~torch.ao.quantization.prepare_fx` + + Return: + None, module is modified inplace with qconfig attached + """ + + module_qconfig = qconfig_dict.get(type_before_parametrizations(module), qconfig_parent) + module_qconfig = qconfig_dict.get(prefix, module_qconfig) + module_qconfig = getattr(module, 'qconfig', module_qconfig) + + torch.ao.quantization.qconfig._assert_valid_qconfig(module_qconfig, module) + + qconfig_with_device_check = _add_module_to_qconfig_obs_ctr(module_qconfig, module) + module.qconfig = qconfig_with_device_check + + for name, child in module.named_children(): + module_prefix = prefix + '.' + name if prefix else name + # do no not propagate qconfig to child if child is non traceable + if prepare_custom_config_dict is None or not ( + name in prepare_custom_config_dict.get("non_traceable_module_name", []) + or type(child) in prepare_custom_config_dict.get("non_traceable_module_class", []) + ): + _propagate_qconfig_helper( + child, qconfig_dict, qconfig_with_device_check, module_prefix + ) + +def propagate_qconfig_(module, qconfig_dict=None, prepare_custom_config_dict=None): + r"""Propagate qconfig through the module hierarchy and assign `qconfig` + attribute on each leaf module + + Args: + module: input module + qconfig_dict: dictionary that maps from name or type of submodule to + quantization configuration, qconfig applies to all submodules of a + given module unless qconfig for the submodules are specified (when + the submodule already has qconfig attribute) + prepare_custom_config_dict: dictionary for custom handling of modules + see docs for :func:`~torch.ao.quantization.prepare_fx` + + Return: + None, module is modified inplace with qconfig attached + """ + if qconfig_dict is None: + qconfig_dict = {} + if prepare_custom_config_dict is None: + prepare_custom_config_dict = {} + _propagate_qconfig_helper(module, qconfig_dict, prepare_custom_config_dict=prepare_custom_config_dict) + +def _observer_forward_hook(self, input, output): + r"""Forward hook that calls observer on the output + """ + return self.activation_post_process(output) + +def _observer_forward_pre_hook(self, input): + r"""Forward pre hook that calls observer on the output + """ + return self.activation_post_process(input[0]) + +def _register_activation_post_process_hook(module, pre_hook=False): + assert hasattr(module, 'activation_post_process'), \ + 'Expect activation_post_process attribute already attached to the module' + if pre_hook: + handle = module.register_forward_pre_hook( + _observer_forward_pre_hook, prepend=True + ) + else: + handle = module.register_forward_hook( + _observer_forward_hook, prepend=True + ) + + +def _add_observer_(module, qconfig_propagation_list=None, non_leaf_module_list=None, device=None, custom_module_class_mapping=None): + r"""Add observer for the leaf child of the module. + + This function insert observer module to all leaf child module that + has a valid qconfig attribute. + + Args: + module: input module with qconfig attributes for all the leaf modules that we want to quantize + qconfig_propagation_list: a list of quantizable modules that will have observers added to them + if they are leaf nodes + device: parent device, if any + non_leaf_module_list: list of non-leaf modules we want to add observer + + Return: + None, module is modified inplace with added observer modules and forward_hooks + """ + if qconfig_propagation_list is None: + qconfig_propagation_list = get_default_qconfig_propagation_list() + + if custom_module_class_mapping is None: + custom_module_class_mapping = {} + + # respect device affinity when adding observers + if device is None: + devices = _get_unique_devices_(module) + assert len(devices) <= 1, ( + f"_add_observer_ only works with cpu or single-device CUDA modules, but got devices {devices}" + ) + device = next(iter(devices)) if len(devices) > 0 else None + + def get_activation_post_process(qconfig, device, special_act_post_process=None): + activation = qconfig.activation() if special_act_post_process is None else special_act_post_process() + if device is not None: + activation.to(device) + return activation + + def needs_observation(m): + return hasattr(m, 'qconfig') and m.qconfig is not None + + def insert_activation_post_process(m, special_act_post_process=None): + """ Adds an activation post process module and register + a pre or post hook that calls the module + """ + # We don't insert observer/fake_quantize for DeQuantStub + if needs_observation(m) and not isinstance(m, DeQuantStub): + # observer and hook will be gone after we swap the module + m.add_module('activation_post_process', get_activation_post_process( + m.qconfig, device, special_act_post_process)) + # Register observer as the first entry in the hook list + # All post forward hooks are preserved and will be executed after the observer before convert + _register_activation_post_process_hook(m, pre_hook=_activation_is_memoryless(m.qconfig)) + + for name, child in module.named_children(): + # TODO remove Dropout special after codebase stable + if type_before_parametrizations(child) in [nn.Dropout]: + continue + elif issubclass(type_before_parametrizations(child), (nnq.FloatFunctional, nnq.QFunctional)): + if needs_observation(child): + assert hasattr(child, "activation_post_process"), ( + f"functional class {type_before_parametrizations(child)} has no pre-defined `activation_post_process`" + ) + child.activation_post_process = get_activation_post_process(child.qconfig, device) + elif isinstance(child, _FusedModule): + # activation_post_process are now added directly to nn.Sequential/_FusedModule + if needs_observation(child): + insert_activation_post_process(child) + elif non_leaf_module_list is not None and type_before_parametrizations(child) in non_leaf_module_list: + if needs_observation(child): + insert_activation_post_process(child) + elif _has_special_act_post_process(child): + special_act_post_process = _get_special_act_post_process(child) + insert_activation_post_process(child, special_act_post_process) + elif needs_observation(child) and type_before_parametrizations(child) in custom_module_class_mapping: + observed_child = custom_module_class_mapping[type_before_parametrizations(child)].from_float(child) + setattr(module, name, observed_child) + # TODO: These are the modules that cannot be observed + # Once there are more, we should move them to a separate list + if custom_module_class_mapping[type_before_parametrizations(child)] not in no_observer_set(): + insert_activation_post_process(observed_child) + else: + _add_observer_(child, qconfig_propagation_list, non_leaf_module_list, device, custom_module_class_mapping) + + # Insert observers only for leaf nodes, note that this observer is for + # the output of the module, for input QuantStub will observe them + if has_no_children_ignoring_parametrizations(module) and not isinstance(module, torch.nn.Sequential) \ + and type_before_parametrizations(module) in qconfig_propagation_list: + insert_activation_post_process(module) + # This is a special case for AdaRound eager mode + # AdaRound contains weight_fake_quant to be propagated from API to convert + # leaf node check with a number of children looks naive assumption that blocks + # Adding an exception case for AdaRound + if hasattr(module, "weight_fake_quant") and not isinstance(module, torch.nn.Sequential) \ + and type_before_parametrizations(module) in qconfig_propagation_list: + insert_activation_post_process(module) + +def _get_unique_devices_(module): + return {p.device for p in module.parameters()} | \ + {p.device for p in module.buffers()} + +def add_quant_dequant(module): + r"""Wrap the leaf child module in QuantWrapper if it has a valid qconfig + Note that this function will modify the children of module inplace and it + can return a new module which wraps the input module as well. + + Args: + module: input module with qconfig attributes for all the leaf modules + that we want to quantize + + Return: + Either the inplace modified module with submodules wrapped in + `QuantWrapper` based on qconfig or a new `QuantWrapper` module which + wraps the input module, the latter case only happens when the input + module is a leaf module and we want to quantize it. + """ + if has_no_children_ignoring_parametrizations(module) and hasattr(module, 'qconfig') and module.qconfig: + return QuantWrapper(module) + + for name, child in module.named_children(): + module._modules[name] = add_quant_dequant(child) + return module + +def prepare(model, inplace=False, allow_list=None, + observer_non_leaf_module_list=None, + prepare_custom_config_dict=None): + r"""Prepares a copy of the model for quantization calibration or quantization-aware training. + + Quantization configuration should be assigned preemptively + to individual submodules in `.qconfig` attribute. + + The model will be attached with observer or fake quant modules, and qconfig + will be propagated. + + Args: + `model`: input model to be modified in-place + `inplace`: carry out model transformations in-place, the original module is mutated + `allow_list`: list of quantizable modules + `observer_non_leaf_module_list`: list of non-leaf modules we want to add observer + `prepare_custom_config_dict`: customization configuration dictionary for prepare function + + .. code-block:: python + + # Example of prepare_custom_config_dict: + prepare_custom_config_dict = { + # user will manually define the corresponding observed + # module class which has a from_float class method that converts + # float custom module to observed custom module + "float_to_observed_custom_module_class": { + CustomModule: ObservedCustomModule + } + } + + """ + torch._C._log_api_usage_once("quantization_api.quantize.prepare") + if prepare_custom_config_dict is None: + prepare_custom_config_dict = get_default_custom_config_dict() + custom_module_class_mapping = prepare_custom_config_dict.get("float_to_observed_custom_module_class", {}) + + if not inplace: + model = copy.deepcopy(model) + + # TODO: remove allow_list + qconfig_propagation_list = allow_list + if allow_list is None: + qconfig_propagation_list = get_default_qconfig_propagation_list() + propagate_qconfig_(model, qconfig_dict=None) + + # sanity check common API misusage + if not any(hasattr(m, 'qconfig') and m.qconfig for m in model.modules()): + warnings.warn("None of the submodule got qconfig applied. Make sure you " + "passed correct configuration through `qconfig_dict` or " + "by assigning the `.qconfig` attribute directly on submodules") + + _add_observer_( + model, qconfig_propagation_list, observer_non_leaf_module_list, + custom_module_class_mapping=custom_module_class_mapping) + return model + +def _remove_activation_post_process(module): + # TODO: maybe we should change activation_post_process to _activation_post_process + # to prevent it from being used by user + if hasattr(module, 'activation_post_process') and \ + _is_activation_post_process(module.activation_post_process): + delattr(module, 'activation_post_process') + + # remove activation_post_process pre and post hooks + def remove_hooks(pre_hook=False): + hook_map = module._forward_pre_hooks if pre_hook else module._forward_hooks + observer_hook = _observer_forward_pre_hook if pre_hook else _observer_forward_hook + handle_ids_to_remove = set() + for handle_id, hook_fn in hook_map.items(): + if hook_fn is observer_hook: + handle_ids_to_remove.add(handle_id) + for handle_id in handle_ids_to_remove: + hook_map.pop(handle_id) + + remove_hooks(pre_hook=True) + remove_hooks(pre_hook=False) + +# TODO: rename to something more general +def _remove_qconfig(module): + r"""Clean up the qconfig left in the module so that new qconfig can be + propagated. + + Args: + module: module to be cleaned up + """ + for child in module.children(): + _remove_qconfig(child) + + if hasattr(module, "qconfig"): + del module.qconfig + + _remove_activation_post_process(module) + +def quantize(model, run_fn, run_args, mapping=None, inplace=False): + r"""Quantize the input float model with post training static quantization. + + First it will prepare the model for calibration, then it calls + `run_fn` which will run the calibration step, after that we will + convert the model to a quantized model. + + Args: + model: input float model + run_fn: a calibration function for calibrating the prepared model + run_args: positional arguments for `run_fn` + inplace: carry out model transformations in-place, the original module is mutated + mapping: correspondence between original module types and quantized counterparts + + Return: + Quantized model. + """ + torch._C._log_api_usage_once("quantization_api.quantize.quantize") + if mapping is None: + mapping = get_default_static_quant_module_mappings() + if not inplace: + model = copy.deepcopy(model) + model.eval() + prepare(model, inplace=True) + run_fn(model, *run_args) + convert(model, mapping, inplace=True) + return model + +def quantize_dynamic(model, qconfig_spec=None, dtype=torch.qint8, + mapping=None, inplace=False): + r"""Converts a float model to dynamic (i.e. weights-only) quantized model. + + Replaces specified modules with dynamic weight-only quantized versions and output the quantized model. + + For simplest usage provide `dtype` argument that can be float16 or qint8. Weight-only quantization + by default is performed for layers with large weights size - i.e. Linear and RNN variants. + + Fine grained control is possible with `qconfig` and `mapping` that act similarly to `quantize()`. + If `qconfig` is provided, the `dtype` argument is ignored. + + Args: + model: input model + qconfig_spec: Either: + + - A dictionary that maps from name or type of submodule to quantization + configuration, qconfig applies to all submodules of a given + module unless qconfig for the submodules are specified (when the + submodule already has qconfig attribute). Entries in the dictionary + need to be QConfig instances. + + - A set of types and/or submodule names to apply dynamic quantization to, + in which case the `dtype` argument is used to specify the bit-width + + inplace: carry out model transformations in-place, the original module is mutated + mapping: maps type of a submodule to a type of corresponding dynamically quantized version + with which the submodule needs to be replaced + + """ + torch._C._log_api_usage_once("quantization_api.quantize.quantize_dynamic") + if qconfig_spec is None: + if dtype == torch.qint8: + qconfig_spec = { + nn.Linear : default_dynamic_qconfig, + nn.LSTM : default_dynamic_qconfig, + nn.GRU : default_dynamic_qconfig, + nn.LSTMCell : default_dynamic_qconfig, + nn.RNNCell : default_dynamic_qconfig, + nn.GRUCell : default_dynamic_qconfig, + } + elif dtype == torch.float16: + qconfig_spec = { + nn.Linear : float16_dynamic_qconfig, + nn.LSTM : float16_dynamic_qconfig, + nn.GRU : float16_dynamic_qconfig, + nn.LSTMCell : float16_dynamic_qconfig, + nn.RNNCell : float16_dynamic_qconfig, + nn.GRUCell : float16_dynamic_qconfig, + } + elif dtype == torch.quint8: + qconfig_spec = { + nn.EmbeddingBag : float_qparams_weight_only_qconfig, + nn.Embedding : float_qparams_weight_only_qconfig, + } + elif dtype == torch.quint4x2: + qconfig_spec = { + nn.EmbeddingBag : float_qparams_weight_only_qconfig_4bit, + } + else: + raise ValueError( + f"Don't know how to quantize with default settings for {dtype}. Provide full qconfig please") + elif isinstance(qconfig_spec, set): + if dtype is torch.qint8: + default_qconfig = default_dynamic_qconfig + elif dtype is torch.float16: + default_qconfig = float16_dynamic_qconfig + elif dtype is torch.quint8: + default_qconfig = float_qparams_weight_only_qconfig + elif dtype is torch.quint4x2: + default_qconfig = float_qparams_weight_only_qconfig_4bit + else: + raise RuntimeError('Unknown dtype specified for quantize_dynamic: ', str(dtype)) + qconfig_spec = dict(zip(qconfig_spec, itertools.repeat(default_qconfig))) + + if mapping is None: + mapping = get_default_dynamic_quant_module_mappings() + + if not inplace: + model = copy.deepcopy(model) + model.eval() + propagate_qconfig_(model, qconfig_spec) + convert(model, mapping, inplace=True) + return model + +def prepare_qat(model, mapping=None, inplace=False): + r""" + Prepares a copy of the model for quantization calibration or + quantization-aware training and converts it to quantized version. + + Quantization configuration should be assigned preemptively + to individual submodules in `.qconfig` attribute. + + Args: + model: input model to be modified in-place + mapping: dictionary that maps float modules to quantized modules to be + replaced. + inplace: carry out model transformations in-place, the original module + is mutated + """ + torch._C._log_api_usage_once("quantization_api.quantize.prepare_qat") + assert model.training, "prepare_qat only works on models in training mode" + if mapping is None: + mapping = get_default_qat_module_mappings() + + if not inplace: + model = copy.deepcopy(model) + + propagate_qconfig_(model, qconfig_dict=None) + convert(model, mapping=mapping, inplace=True, remove_qconfig=False) + prepare(model, observer_non_leaf_module_list=set(mapping.values()), inplace=True) + return model + +def quantize_qat(model, run_fn, run_args, inplace=False): + r"""Do quantization aware training and output a quantized model + + Args: + model: input model + run_fn: a function for evaluating the prepared model, can be a + function that simply runs the prepared model or a training + loop + run_args: positional arguments for `run_fn` + + Return: + Quantized model. + """ + torch._C._log_api_usage_once("quantization_api.quantize.quantize_qat") + if not inplace: + model = copy.deepcopy(model) + model.train() + prepare_qat(model, inplace=True) + run_fn(model, *run_args) + convert(model, inplace=True) + return model + +def convert( + module, mapping=None, inplace=False, remove_qconfig=True, + is_reference=False, convert_custom_config_dict=None, + use_precomputed_fake_quant=False): + r"""Converts submodules in input module to a different module according to `mapping` + by calling `from_float` method on the target module class. And remove qconfig at the + end if remove_qconfig is set to True. + + Args: + `module`: prepared and calibrated module + `mapping`: a dictionary that maps from source module type to target + module type, can be overwritten to allow swapping user defined + Modules + `inplace`: carry out model transformations in-place, the original module + is mutated + `convert_custom_config_dict`: custom configuration dictionary for convert function + `use_precomputed_fake_quant`: a flag to enable use of precomputed fake quant + + .. code-block:: python + + # Example of convert_custom_config_dict: + convert_custom_config_dict = { + # user will manually define the corresponding quantized + # module class which has a from_observed class method that converts + # observed custom module to quantized custom module + "observed_to_quantized_custom_module_class": { + ObservedCustomModule: QuantizedCustomModule + } + } + + """ + torch._C._log_api_usage_once("quantization_api.quantize.convert") + if not inplace: + module = copy.deepcopy(module) + _convert( + module, mapping, inplace=True, is_reference=is_reference, + convert_custom_config_dict=convert_custom_config_dict, + use_precomputed_fake_quant=use_precomputed_fake_quant) + if remove_qconfig: + _remove_qconfig(module) + return module + +def _convert( + module, mapping=None, inplace=False, + is_reference=False, convert_custom_config_dict=None, + use_precomputed_fake_quant=False): + r"""Converts submodules in input module to a different module according to `mapping` + by calling `from_float` method on the target module class + + Args: + module: input module + mapping: a dictionary that maps from source module type to target + module type, can be overwritten to allow swapping user defined + Modules + inplace: carry out model transformations in-place, the original module + is mutated + is_reference: a flag to enable quantized reference module + use_precomputed_fake_quant: a flag to enable use of precomputed fake quant + + """ + if mapping is None: + mapping = get_default_static_quant_reference_module_mappings() if is_reference \ + else get_default_static_quant_module_mappings() + if convert_custom_config_dict is None: + convert_custom_config_dict = get_default_custom_config_dict() + custom_module_class_mapping = convert_custom_config_dict.get("observed_to_quantized_custom_module_class", {}) + + if not inplace: + module = copy.deepcopy(module) + reassign = {} + for name, mod in module.named_children(): + # both fused modules and observed custom modules are + # swapped as one unit + if not isinstance(mod, _FusedModule) and \ + type_before_parametrizations(mod) not in custom_module_class_mapping: + _convert(mod, mapping, True, # inplace + is_reference, convert_custom_config_dict, + use_precomputed_fake_quant=use_precomputed_fake_quant) + reassign[name] = swap_module(mod, mapping, custom_module_class_mapping, use_precomputed_fake_quant) + + for key, value in reassign.items(): + module._modules[key] = value + + return module + +def swap_module(mod, mapping, custom_module_class_mapping, use_precomputed_fake_quant=False): + r"""Swaps the module if it has a quantized counterpart and it has an + `observer` attached. + + Args: + mod: input module + mapping: a dictionary that maps from nn module to nnq module + + Return: + The corresponding quantized module of `mod` + """ + new_mod = mod + if hasattr(mod, 'qconfig') and mod.qconfig is not None: + swapped = False + if type_before_parametrizations(mod) in custom_module_class_mapping: + new_mod = custom_module_class_mapping[type_before_parametrizations(mod)].from_observed(mod) + swapped = True + elif type_before_parametrizations(mod) in mapping: + qmod = mapping[type_before_parametrizations(mod)] + if hasattr(qmod, '_IS_REFERENCE') and qmod._IS_REFERENCE: + assert mod.qconfig is not None + weight_post_process = mod.qconfig.weight() + weight_post_process(mod.weight) + weight_qparams = get_qparam_dict(weight_post_process) + new_mod = qmod.from_float(mod, weight_qparams) + else: + sig = inspect.signature(qmod.from_float) + if 'use_precomputed_fake_quant' in sig.parameters: + new_mod = qmod.from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant) + else: + new_mod = qmod.from_float(mod) + swapped = True + + if swapped: + # Preserve module's pre forward hooks. They'll be called on quantized input + for pre_hook_fn in mod._forward_pre_hooks.values(): + new_mod.register_forward_pre_hook(pre_hook_fn) + # Preserve module's post forward hooks except _observer_forward_hook + # After convert they'll work with quantized output + for hook_fn in mod._forward_hooks.values(): + if hook_fn is not _observer_forward_hook: + new_mod.register_forward_hook(hook_fn) + + # respect device affinity when swapping modules + devices = _get_unique_devices_(mod) + assert len(devices) <= 1, ( + f"swap_module only works with cpu or single-device CUDA modules, but got devices {devices}" + ) + device = next(iter(devices)) if len(devices) > 0 else None + if device: + new_mod.to(device) + return new_mod + +def _get_observer_dict(mod, target_dict, prefix=""): + r"""Traverse the modules and save all observers into dict. + This is mainly used for quantization accuracy debug + Args: + mod: the top module we want to save all observers + prefix: the prefix for the current module + target_dict: the dictionary used to save all the observers + """ + def get_prefix(prefix): + return prefix if prefix == "" else prefix + '.' + + if hasattr(mod, 'activation_post_process'): + target_dict[get_prefix(prefix) + 'activation_post_process'] = mod.activation_post_process + for name, child in mod.named_children(): + module_prefix = get_prefix(prefix) + name if prefix else name + _get_observer_dict(child, target_dict, module_prefix) diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/quantize_fx.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/quantize_fx.py new file mode 100644 index 0000000000000000000000000000000000000000..5767a525342e4f76e8713f5b937e45066a3542f2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/quantize_fx.py @@ -0,0 +1,735 @@ +from typing import Any, Dict, Optional, Tuple, Union +import warnings + +import torch +import copy +from torch.fx import GraphModule +from torch.fx.graph_module import _USER_PRESERVED_ATTRIBUTES_KEY +from .fx.tracer import QuantizationTracer +from .fx.tracer import ( # noqa: F401 + Scope, + ScopeContextManager +) +from .fx.fuse import fuse # noqa: F401 +from .fx.prepare import prepare # noqa: F401 +from .fx.convert import convert +from .backend_config import ( # noqa: F401 + BackendConfig, + get_tensorrt_backend_config, +) +from .fx.graph_module import ObservedGraphModule # noqa: F401 +from .fx.custom_config import ( + ConvertCustomConfig, + FuseCustomConfig, + PrepareCustomConfig, +) +from .fx.utils import get_custom_module_class_keys # noqa: F401 +from .fx.utils import get_skipped_module_name_and_classes +from .qconfig_mapping import QConfigMapping + +def attach_preserved_attrs_to_model( + model: Union[GraphModule, torch.nn.Module], + preserved_attrs: Dict[str, Any], +) -> None: + """ Store preserved attributes to the model.meta so that it can be preserved during deepcopy + """ + model.meta[_USER_PRESERVED_ATTRIBUTES_KEY] = copy.copy(preserved_attrs) # type: ignore[operator, index, assignment] + # set the preserved attributes in the model so that user can call + # model.attr as they do before calling fx graph mode quantization + for attr_name, attr in model.meta[_USER_PRESERVED_ATTRIBUTES_KEY].items(): # type: ignore[index, union-attr] + setattr(model, attr_name, attr) + +def _check_is_graph_module(model: torch.nn.Module) -> None: + if not isinstance(model, GraphModule): + raise ValueError( + "input model must be a GraphModule, " + + "Got type:" + + str(type(model)) + + " Please make " + + "sure to follow the tutorials." + ) + +def _attach_meta_to_node_if_not_exist(model: GraphModule) -> None: + """ Attach meta field to all nodes of the graph if it does not exist, + meta field is a field stores some meta information about the node, such + as dtype and shape information for output of the node, this only exists + if the program is captured by make_fx (used in quantize_pt2e flow), if + the program is captured by torch.fx symbolic tracing, this field may not exist, + so we add it here to avoid checking this all over the places + """ + for node in model.graph.nodes: + if not hasattr(node, "meta"): + node.meta = {} + +def _swap_ff_with_fxff(model: torch.nn.Module) -> None: + r""" Swap FloatFunctional with FXFloatFunctional + """ + modules_to_swap = [] + for name, module in model.named_children(): + if isinstance(module, torch.ao.nn.quantized.FloatFunctional): + modules_to_swap.append(name) + else: + _swap_ff_with_fxff(module) + + for name in modules_to_swap: + del model._modules[name] + model._modules[name] = torch.ao.nn.quantized.FXFloatFunctional() + + +def _fuse_fx( + model: GraphModule, + is_qat: bool, + fuse_custom_config: Union[FuseCustomConfig, Dict[str, Any], None] = None, + backend_config: Union[BackendConfig, Dict[str, Any], None] = None, +) -> GraphModule: + r""" Internal helper function to fuse modules in preparation for quantization + + Args: + model: GraphModule object from symbolic tracing (torch.fx.symbolic_trace) + """ + _check_is_graph_module(model) + return fuse( + model, is_qat, fuse_custom_config, backend_config) # type: ignore[operator] + +def _prepare_fx( + model: torch.nn.Module, + qconfig_mapping: Union[QConfigMapping, Dict[str, Any]], + is_qat: bool, + example_inputs: Tuple[Any, ...], + prepare_custom_config: Union[PrepareCustomConfig, Dict[str, Any], None] = None, + _equalization_config: Optional[Union[QConfigMapping, Dict[str, Any]]] = None, + backend_config: Union[BackendConfig, Dict[str, Any], None] = None, + is_standalone_module: bool = False, +) -> GraphModule: + r""" Internal helper function for prepare_fx + Args: + `model`, `qconfig_mapping`, `prepare_custom_config`, `_equalization_config`: + see docs for :func:`~torch.ao.quantization.prepare_fx` + `is_standalone_module`: a boolean flag indicates whether we are + quantizing a standalone module or not, a standalone module + is a submodule of the parent module that is not inlined in the +forward graph of the parent module, + the way we quantize standalone module is described in: + :func:`~torch.ao.quantization._prepare_standalone_module_fx` + """ + if prepare_custom_config is None: + prepare_custom_config = PrepareCustomConfig() + if _equalization_config is None: + _equalization_config = QConfigMapping() + + if isinstance(prepare_custom_config, dict): + warnings.warn( + "Passing a prepare_custom_config_dict to prepare is deprecated and will not be supported " + "in a future version. Please pass in a PrepareCustomConfig instead.", + FutureWarning, + stacklevel=3, + ) + prepare_custom_config = PrepareCustomConfig.from_dict(prepare_custom_config) + + # swap FloatFunctional with FXFloatFunctional + _swap_ff_with_fxff(model) + + skipped_module_names, skipped_module_classes = \ + get_skipped_module_name_and_classes(prepare_custom_config, is_standalone_module) + preserved_attr_names = prepare_custom_config.preserved_attributes + preserved_attrs = {attr: getattr(model, attr) for attr in preserved_attr_names if hasattr(model, attr)} + # symbolically trace the model + tracer = QuantizationTracer(skipped_module_names, skipped_module_classes) # type: ignore[arg-type] + graph_module = GraphModule(model, tracer.trace(model)) + _attach_meta_to_node_if_not_exist(graph_module) + + fuse_custom_config = FuseCustomConfig().set_preserved_attributes(prepare_custom_config.preserved_attributes) + graph_module = _fuse_fx( + graph_module, + is_qat, + fuse_custom_config, + backend_config) + prepared = prepare( + graph_module, + qconfig_mapping, + is_qat, + tracer.node_name_to_scope, + example_inputs=example_inputs, + prepare_custom_config=prepare_custom_config, + _equalization_config=_equalization_config, + backend_config=backend_config, + is_standalone_module=is_standalone_module, + ) # type: ignore[operator] + + attach_preserved_attrs_to_model(prepared, preserved_attrs) + return prepared + + +def _prepare_standalone_module_fx( + model: torch.nn.Module, + qconfig_mapping: Union[QConfigMapping, Dict[str, Any]], + is_qat: bool, + example_inputs: Tuple[Any, ...], + prepare_custom_config: Union[PrepareCustomConfig, Dict[str, Any], None] = None, + backend_config: Union[BackendConfig, Dict[str, Any], None] = None, +) -> GraphModule: + r""" [Internal use only] Prepare a standalone module, so that it can be used when quantizing the + parent module. + standalone_module means it a submodule that is not inlined in parent module, + and will be quantized separately as one unit. + + How the standalone module is observed is specified by `input_quantized_idxs` and + `output_quantized_idxs` in the prepare_custom_config for the standalone module + + Returns: + + * model(GraphModule): prepared standalone module. It has these attributes in + model.meta: + + * `standalone_module_input_quantized_idxs(List[Int])`: a list of + indexes for the graph input that is expected to be quantized, + same as input_quantized_idxs configuration provided + for the standalone module + * `standalone_module_output_quantized_idxs(List[Int])`: a list of + indexs for the graph output that is quantized + same as input_quantized_idxs configuration provided + for the standalone module + + """ + return _prepare_fx( + model, + qconfig_mapping, + is_qat, + example_inputs, + prepare_custom_config, + backend_config=backend_config, + is_standalone_module=True, + ) + + +def fuse_fx( + model: torch.nn.Module, + fuse_custom_config: Union[FuseCustomConfig, Dict[str, Any], None] = None, + backend_config: Union[BackendConfig, Dict[str, Any], None] = None, +) -> GraphModule: + r""" Fuse modules like conv+bn, conv+bn+relu etc, model must be in eval mode. + Fusion rules are defined in torch.ao.quantization.fx.fusion_pattern.py + + Args: + + * `model` (torch.nn.Module): a torch.nn.Module model + * `fuse_custom_config` (FuseCustomConfig): custom configurations for fuse_fx. + See :class:`~torch.ao.quantization.fx.custom_config.FuseCustomConfig` for more details + Example:: + + from torch.ao.quantization import fuse_fx + m = Model().eval() + m = fuse_fx(m) + + """ + if fuse_custom_config is None: + fuse_custom_config = FuseCustomConfig() + + if isinstance(fuse_custom_config, dict): + warnings.warn( + "Passing a fuse_custom_config_dict to fuse is deprecated and will not be supported " + "in a future version. Please pass in a FuseCustomConfig instead.", + FutureWarning, + stacklevel=2, + ) + fuse_custom_config = FuseCustomConfig.from_dict(fuse_custom_config) + + torch._C._log_api_usage_once("quantization_api.quantize_fx.fuse_fx") + preserved_attr_names = fuse_custom_config.preserved_attributes + preserved_attrs = {attr: getattr(model, attr) for attr in preserved_attr_names if hasattr(model, attr)} + + graph_module = torch.fx.symbolic_trace(model) + _attach_meta_to_node_if_not_exist(graph_module) + graph_module = _fuse_fx(graph_module, False, fuse_custom_config, backend_config) + + attach_preserved_attrs_to_model(graph_module, preserved_attrs) + return graph_module + +def prepare_fx( + model: torch.nn.Module, + qconfig_mapping: Union[QConfigMapping, Dict[str, Any]], + example_inputs: Tuple[Any, ...], + prepare_custom_config: Union[PrepareCustomConfig, Dict[str, Any], None] = None, + _equalization_config: Optional[Union[QConfigMapping, Dict[str, Any]]] = None, + backend_config: Union[BackendConfig, Dict[str, Any], None] = None, +) -> GraphModule: + r""" Prepare a model for post training quantization + + Args: + * `model` (torch.nn.Module): torch.nn.Module model + + * `qconfig_mapping` (QConfigMapping): QConfigMapping object to configure how a model is + quantized, see :class:`~torch.ao.quantization.qconfig_mapping.QConfigMapping` + for more details + + * `example_inputs` (Tuple[Any, ...]): Example inputs for forward function of the model, + Tuple of positional args (keyword args can be passed as positional args as well) + + * `prepare_custom_config` (PrepareCustomConfig): customization configuration for quantization tool. + See :class:`~torch.ao.quantization.fx.custom_config.PrepareCustomConfig` for more details + + * `_equalization_config`: config for specifying how to perform equalization on the model + + * `backend_config` (BackendConfig): config that specifies how operators are quantized + in a backend, this includes how the operators are observed, + supported fusion patterns, how quantize/dequantize ops are + inserted, supported dtypes etc. See :class:`~torch.ao.quantization.backend_config.BackendConfig` for more details + + Return: + A GraphModule with observer (configured by qconfig_mapping), ready for calibration + + Example:: + + import torch + from torch.ao.quantization import get_default_qconfig_mapping + from torch.ao.quantization.quantize_fx import prepare_fx + + class Submodule(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear = torch.nn.Linear(5, 5) + def forward(self, x): + x = self.linear(x) + return x + + class M(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear = torch.nn.Linear(5, 5) + self.sub = Submodule() + + def forward(self, x): + x = self.linear(x) + x = self.sub(x) + x + return x + + # initialize a floating point model + float_model = M().eval() + + # define calibration function + def calibrate(model, data_loader): + model.eval() + with torch.no_grad(): + for image, target in data_loader: + model(image) + + # qconfig is the configuration for how we insert observers for a particular + # operator + # qconfig = get_default_qconfig("fbgemm") + # Example of customizing qconfig: + # qconfig = torch.ao.quantization.QConfig( + # activation=MinMaxObserver.with_args(dtype=torch.qint8), + # weight=MinMaxObserver.with_args(dtype=torch.qint8)) + # `activation` and `weight` are constructors of observer module + + # qconfig_mapping is a collection of quantization configurations, user can + # set the qconfig for each operator (torch op calls, functional calls, module calls) + # in the model through qconfig_mapping + # the following call will get the qconfig_mapping that works best for models + # that target "fbgemm" backend + qconfig_mapping = get_default_qconfig_mapping("fbgemm") + + # We can customize qconfig_mapping in different ways. + # e.g. set the global qconfig, which means we will use the same qconfig for + # all operators in the model, this can be overwritten by other settings + # qconfig_mapping = QConfigMapping().set_global(qconfig) + # e.g. quantize the linear submodule with a specific qconfig + # qconfig_mapping = QConfigMapping().set_module_name("linear", qconfig) + # e.g. quantize all nn.Linear modules with a specific qconfig + # qconfig_mapping = QConfigMapping().set_object_type(torch.nn.Linear, qconfig) + # for a more complete list, please see the docstring for :class:`torch.ao.quantization.QConfigMapping` + # argument + + # example_inputs is a tuple of inputs, that is used to infer the type of the + # outputs in the model + # currently it's not used, but please make sure model(*example_inputs) runs + example_inputs = (torch.randn(1, 3, 224, 224),) + + # TODO: add backend_config after we split the backend_config for fbgemm and qnnpack + # e.g. backend_config = get_default_backend_config("fbgemm") + # `prepare_fx` inserts observers in the model based on qconfig_mapping and + # backend_config. If the configuration for an operator in qconfig_mapping + # is supported in the backend_config (meaning it's supported by the target + # hardware), we'll insert observer modules according to the qconfig_mapping + # otherwise the configuration in qconfig_mapping will be ignored + # + # Example: + # in qconfig_mapping, user sets linear module to be quantized with quint8 for + # activation and qint8 for weight: + # qconfig = torch.ao.quantization.QConfig( + # observer=MinMaxObserver.with_args(dtype=torch.quint8), + # weight=MinMaxObserver.with-args(dtype=torch.qint8)) + # Note: current qconfig api does not support setting output observer, but + # we may extend this to support these more fine grained control in the + # future + # + # qconfig_mapping = QConfigMapping().set_object_type(torch.nn.Linear, qconfig) + # in backend config, linear module also supports in this configuration: + # weighted_int8_dtype_config = DTypeConfig( + # input_dtype=torch.quint8, + # output_dtype=torch.quint8, + # weight_dtype=torch.qint8, + # bias_type=torch.float) + + # linear_pattern_config = BackendPatternConfig(torch.nn.Linear) \ + # .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \ + # .add_dtype_config(weighted_int8_dtype_config) \ + # ... + + # backend_config = BackendConfig().set_backend_pattern_config(linear_pattern_config) + # `prepare_fx` will check that the setting requested by suer in qconfig_mapping + # is supported by the backend_config and insert observers and fake quant modules + # in the model + prepared_model = prepare_fx(float_model, qconfig_mapping, example_inputs) + # Run calibration + calibrate(prepared_model, sample_inference_data) + """ + torch._C._log_api_usage_once("quantization_api.quantize_fx.prepare_fx") + return _prepare_fx( + model, + qconfig_mapping, + False, # is_qat + example_inputs, + prepare_custom_config, + _equalization_config, + backend_config, + ) + + +def prepare_qat_fx( + model: torch.nn.Module, + qconfig_mapping: Union[QConfigMapping, Dict[str, Any]], + example_inputs: Tuple[Any, ...], + prepare_custom_config: Union[PrepareCustomConfig, Dict[str, Any], None] = None, + backend_config: Union[BackendConfig, Dict[str, Any], None] = None, +) -> GraphModule: + r""" Prepare a model for quantization aware training + + Args: + * `model` (torch.nn.Module): torch.nn.Module model + * `qconfig_mapping` (QConfigMapping): see :func:`~torch.ao.quantization.prepare_fx` + * `example_inputs` (Tuple[Any, ...]): see :func:`~torch.ao.quantization.prepare_fx` + * `prepare_custom_config` (PrepareCustomConfig): see :func:`~torch.ao.quantization.prepare_fx` + * `backend_config` (BackendConfig): see :func:`~torch.ao.quantization.prepare_fx` + + Return: + A GraphModule with fake quant modules (configured by qconfig_mapping and backend_config), ready for + quantization aware training + + Example:: + + import torch + from torch.ao.quantization import get_default_qat_qconfig_mapping + from torch.ao.quantization.quantize_fx import prepare_qat_fx + + class Submodule(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear = torch.nn.Linear(5, 5) + def forward(self, x): + x = self.linear(x) + return x + + class M(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear = torch.nn.Linear(5, 5) + self.sub = Submodule() + + def forward(self, x): + x = self.linear(x) + x = self.sub(x) + x + return x + + # initialize a floating point model + float_model = M().train() + # (optional, but preferred) load the weights from pretrained model + # float_model.load_weights(...) + + # define the training loop for quantization aware training + def train_loop(model, train_data): + model.train() + for image, target in data_loader: + ... + + # qconfig is the configuration for how we insert observers for a particular + # operator + # qconfig = get_default_qconfig("fbgemm") + # Example of customizing qconfig: + # qconfig = torch.ao.quantization.QConfig( + # activation=FakeQuantize.with_args(observer=MinMaxObserver.with_args(dtype=torch.qint8)), + # weight=FakeQuantize.with_args(observer=MinMaxObserver.with_args(dtype=torch.qint8))) + # `activation` and `weight` are constructors of observer module + + # qconfig_mapping is a collection of quantization configurations, user can + # set the qconfig for each operator (torch op calls, functional calls, module calls) + # in the model through qconfig_mapping + # the following call will get the qconfig_mapping that works best for models + # that target "fbgemm" backend + qconfig_mapping = get_default_qat_qconfig("fbgemm") + + # We can customize qconfig_mapping in different ways, please take a look at + # the docstring for :func:`~torch.ao.quantization.prepare_fx` for different ways + # to configure this + + # example_inputs is a tuple of inputs, that is used to infer the type of the + # outputs in the model + # currently it's not used, but please make sure model(*example_inputs) runs + example_inputs = (torch.randn(1, 3, 224, 224),) + + # TODO: add backend_config after we split the backend_config for fbgemm and qnnpack + # e.g. backend_config = get_default_backend_config("fbgemm") + # `prepare_qat_fx` inserts observers in the model based on qconfig_mapping and + # backend_config, if the configuration for an operator in qconfig_mapping + # is supported in the backend_config (meaning it's supported by the target + # hardware), we'll insert fake_quantize modules according to the qconfig_mapping + # otherwise the configuration in qconfig_mapping will be ignored + # see :func:`~torch.ao.quantization.prepare_fx` for a detailed explanation of + # how qconfig_mapping interacts with backend_config + prepared_model = prepare_qat_fx(float_model, qconfig_mapping, example_inputs) + # Run training + train_loop(prepared_model, train_loop) + + """ + torch._C._log_api_usage_once("quantization_api.quantize_fx.prepare_qat_fx") + return _prepare_fx( + model, + qconfig_mapping, + True, # is_qat + example_inputs, + prepare_custom_config, + backend_config=backend_config, + ) + + +def _convert_fx( + graph_module: GraphModule, + is_reference: bool, + convert_custom_config: Union[ConvertCustomConfig, Dict[str, Any], None] = None, + is_standalone_module: bool = False, + _remove_qconfig: bool = True, + qconfig_mapping: Union[QConfigMapping, Dict[str, Any], None] = None, + backend_config: Union[BackendConfig, Dict[str, Any], None] = None, + is_decomposed: bool = False, +) -> GraphModule: + """ `is_standalone_module`: see docs in :func:`~torch.ao.quantization.prepare_standalone_module_fx` + """ + if convert_custom_config is None: + convert_custom_config = ConvertCustomConfig() + + if isinstance(convert_custom_config, dict): + warnings.warn( + "Passing a convert_custom_config_dict to convert is deprecated and will not be supported " + "in a future version. Please pass in a ConvertCustomConfig instead.", + FutureWarning, + stacklevel=3, + ) + convert_custom_config = ConvertCustomConfig.from_dict(convert_custom_config) + + _check_is_graph_module(graph_module) + preserved_attr_names = convert_custom_config.preserved_attributes + preserved_attrs = {attr: getattr(graph_module, attr) for attr in preserved_attr_names if hasattr(graph_module, attr)} + + quantized = convert( + graph_module, + is_reference, + convert_custom_config, + is_standalone_module, + _remove_qconfig_flag=_remove_qconfig, + qconfig_mapping=qconfig_mapping, + backend_config=backend_config, + is_decomposed=is_decomposed, + ) + + attach_preserved_attrs_to_model(quantized, preserved_attrs) + return quantized + + +def convert_fx( + graph_module: GraphModule, + convert_custom_config: Union[ConvertCustomConfig, Dict[str, Any], None] = None, + _remove_qconfig: bool = True, + qconfig_mapping: Union[QConfigMapping, Dict[str, Any], None] = None, + backend_config: Union[BackendConfig, Dict[str, Any], None] = None, +) -> GraphModule: + r""" Convert a calibrated or trained model to a quantized model + + Args: + * `graph_module` (torch.fx.GraphModule): A prepared and calibrated/trained model (GraphModule) + + * `convert_custom_config` (ConvertCustomConfig): custom configurations for convert function. + See :class:`~torch.ao.quantization.fx.custom_config.ConvertCustomConfig` for more details + + * `_remove_qconfig` (bool): Option to remove the qconfig attributes in the model after convert. + + * `qconfig_mapping` (QConfigMapping): config for specifying how to convert a model for quantization. + + The keys must include the ones in the qconfig_mapping passed to `prepare_fx` or `prepare_qat_fx`, + with the same values or `None`. Additional keys can be specified with values set to `None`. + + For each entry whose value is set to None, we skip quantizing that entry in the model:: + + qconfig_mapping = QConfigMapping + .set_global(qconfig_from_prepare) + .set_object_type(torch.nn.functional.add, None) # skip quantizing torch.nn.functional.add + .set_object_type(torch.nn.functional.linear, qconfig_from_prepare) + .set_module_name("foo.bar", None) # skip quantizing module "foo.bar" + + * `backend_config` (BackendConfig): A configuration for the backend which describes how + operators should be quantized in the backend, this includes quantization + mode support (static/dynamic/weight_only), dtype support (quint8/qint8 etc.), + observer placement for each operators and fused operators. + See :class:`~torch.ao.quantization.backend_config.BackendConfig` for more details + + Return: + A quantized model (torch.nn.Module) + + Example:: + + # prepared_model: the model after prepare_fx/prepare_qat_fx and calibration/training + # convert_fx converts a calibrated/trained model to a quantized model for the + # target hardware, this includes converting the model first to a reference + # quantized model, and then lower the reference quantized model to a backend + # Currently, the supported backends are fbgemm (onednn), qnnpack (xnnpack) and + # they share the same set of quantized operators, so we are using the same + # lowering procedure + # + # backend_config defines the corresponding reference quantized module for + # the weighted modules in the model, e.g. nn.Linear + # TODO: add backend_config after we split the backend_config for fbgemm and qnnpack + # e.g. backend_config = get_default_backend_config("fbgemm") + quantized_model = convert_fx(prepared_model) + + """ + torch._C._log_api_usage_once("quantization_api.quantize_fx.convert_fx") + return _convert_fx( + graph_module, + is_reference=False, + convert_custom_config=convert_custom_config, + _remove_qconfig=_remove_qconfig, + qconfig_mapping=qconfig_mapping, + backend_config=backend_config, + ) + + +def convert_to_reference_fx( + graph_module: GraphModule, + convert_custom_config: Union[ConvertCustomConfig, Dict[str, Any], None] = None, + _remove_qconfig: bool = True, + qconfig_mapping: Union[QConfigMapping, Dict[str, Any], None] = None, + backend_config: Union[BackendConfig, Dict[str, Any], None] = None, +) -> GraphModule: + r""" Convert a calibrated or trained model to a reference quantized model, + see https://github.com/pytorch/rfcs/blob/master/RFC-0019-Extending-PyTorch-Quantization-to-Custom-Backends.md for more details, + reference quantized model is a standard representation of a quantized model provided + by FX Graph Mode Quantization, it can be further lowered to run on the target + hardware, like accelerators + + Args: + * `graph_module` (GraphModule): A prepared and calibrated/trained model (GraphModule) + + * `convert_custom_config` (ConvertCustomConfig): custom configurations for convert function. + See :func:`~torch.ao.quantization.quantize_fx.convert_fx` for more details. + + * `_remove_qconfig` (bool): Option to remove the qconfig attributes in the model after convert. + + * `qconfig_mapping` (QConfigMapping): config for specifying how to convert a model for quantization. + See :func:`~torch.ao.quantization.quantize_fx.convert_fx` for more details. + + * `backend_config` (BackendConfig): A configuration for the backend which describes how + operators should be quantized in the backend. See + :func:`~torch.ao.quantization.quantize_fx.convert_fx` for more details. + + Return: + A reference quantized model (GraphModule) + + Example:: + + # prepared_model: the model after prepare_fx/prepare_qat_fx and calibration/training + # TODO: add backend_config after we split the backend_config for fbgemm and qnnpack + # e.g. backend_config = get_default_backend_config("fbgemm") + reference_quantized_model = convert_to_reference_fx(prepared_model) + + """ + torch._C._log_api_usage_once("quantization_api.quantize_fx.convert_to_reference_fx") + return _convert_fx( + graph_module, + is_reference=True, + convert_custom_config=convert_custom_config, + _remove_qconfig=_remove_qconfig, + qconfig_mapping=qconfig_mapping, + backend_config=backend_config, + ) + +def _convert_to_reference_decomposed_fx( + graph_module: GraphModule, + convert_custom_config: Union[ConvertCustomConfig, Dict[str, Any], None] = None, + qconfig_mapping: Union[QConfigMapping, Dict[str, Any], None] = None, + backend_config: Union[BackendConfig, Dict[str, Any], None] = None, +) -> GraphModule: + r""" Convert a calibrated or trained model to a reference quantized model, with + decomposed representation for quantized Tensor + see https://github.com/pytorch/rfcs/blob/master/RFC-0019-Extending-PyTorch-Quantization-to-Custom-Backends.md for more details, + reference quantized model is a standard representation of a quantized model provided + by FX Graph Mode Quantization, it can be further lowered to run on the target + hardware, like accelerators + + Note: this is not public API + + Args: + * `graph_module` (GraphModule): A prepared and calibrated/trained model (GraphModule) + + * `convert_custom_config` (ConvertCustomConfig): custom configurations for convert function. + See :func:`~torch.ao.quantization.quantize_fx.convert_fx` for more details. + + * `_remove_qconfig` (bool): Option to remove the qconfig attributes in the model after convert. + + * `qconfig_mapping` (QConfigMapping): config for specifying how to convert a model for quantization. + See :func:`~torch.ao.quantization.quantize_fx.convert_fx` for more details. + + * `backend_config` (BackendConfig): A configuration for the backend which describes how + operators should be quantized in the backend. See + :func:`~torch.ao.quantization.quantize_fx.convert_fx` for more details. + + Return: + A reference quantized model (GraphModule) with operators working with decomposed quantized Tensor + + Example:: + + # prepared_model: the model after prepare_fx/prepare_qat_fx and calibration/training + # TODO: add backend_config after we split the backend_config for fbgemm and qnnpack + # e.g. backend_config = get_default_backend_config("fbgemm") + reference_quantized_model = _convert_to_reference_decomposed_fx(prepared_model) + + """ + torch._C._log_api_usage_once("quantization_api.quantize_fx._convert_to_reference_decomposed_fx") + return _convert_fx( + graph_module, + is_reference=True, + convert_custom_config=convert_custom_config, + _remove_qconfig=False, + qconfig_mapping=qconfig_mapping, + backend_config=backend_config, + is_decomposed=True, + ) + + +def _convert_standalone_module_fx( + graph_module: GraphModule, + is_reference: bool = False, + convert_custom_config: Union[ConvertCustomConfig, Dict[str, Any], None] = None, +) -> GraphModule: + r""" [Internal use only] Convert a model produced by :func:`~torch.ao.quantization.prepare_standalone_module_fx` + and convert it to a quantized model + + Returns a quantized standalone module, whether input/output is quantized is + specified by prepare_custom_config, with + input_quantized_idxs, output_quantized_idxs, please + see docs for prepare_fx for details + """ + return _convert_fx( + graph_module, + is_reference, + convert_custom_config, + is_standalone_module=True, + ) diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/quantize_jit.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/quantize_jit.py new file mode 100644 index 0000000000000000000000000000000000000000..3001deb6ab9c9b03e14d41ae441b91e7111e6135 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/quantize_jit.py @@ -0,0 +1,336 @@ +# mypy: allow-untyped-defs + +import torch +from torch.ao.quantization.qconfig import QConfig +from torch.ao.quantization.quant_type import QuantType +from torch.jit._recursive import wrap_cpp_module + +__all__ = [ + "script_qconfig", + "script_qconfig_dict", + "fuse_conv_bn_jit", + "prepare_jit", + "prepare_dynamic_jit", + "convert_jit", + "convert_dynamic_jit", + "quantize_jit", + "quantize_dynamic_jit", +] + +def _check_is_script_module(model): + if not isinstance(model, torch.jit.ScriptModule): + raise ValueError('input must be a script module, got: ' + str(type(model))) + +def _check_forward_method(model): + if not model._c._has_method('forward'): + raise ValueError('input script module does not have forward method') + +def script_qconfig(qconfig): + r"""Instantiate the activation and weight observer modules and script + them, these observer module instances will be deepcopied during + prepare_jit step. + """ + return QConfig( + activation=torch.jit.script(qconfig.activation())._c, + weight=torch.jit.script(qconfig.weight())._c) + +def script_qconfig_dict(qconfig_dict): + r"""Helper function used by `prepare_jit`. + Apply `script_qconfig` for all entries in `qconfig_dict` that is + not None. + """ + return {k: script_qconfig(v) if v else None for k, v in qconfig_dict.items()} + +def fuse_conv_bn_jit(model, inplace=False): + r""" Fuse conv - bn module + Works for eval model only. + + Args: + model: TorchScript model from scripting or tracing + """ + torch._C._log_api_usage_once("quantization_api.quantize_jit.fuse_conv_bn_jit") + model_c = model._c + model_c = torch._C._jit_pass_fold_convbn(model_c) + if inplace: + model._reconstruct(model_c) + else: + model = wrap_cpp_module(model_c) + return model + +def _prepare_jit(model, qconfig_dict, inplace=False, quant_type=QuantType.STATIC): + _check_is_script_module(model) + _check_forward_method(model) + if not all(isinstance(x, str) for x in qconfig_dict.keys()): + raise ValueError('qconfig_dict should only contain names(str) as keys.') + scripted_qconfig_dict = script_qconfig_dict(qconfig_dict) + model = fuse_conv_bn_jit(model, inplace) + model_c = torch._C._jit_pass_insert_observers(model._c, + 'forward', + scripted_qconfig_dict, + inplace, + quant_type) + if inplace: + model._reconstruct(model_c) + else: + model = wrap_cpp_module(model_c) + return model + +def _prepare_ondevice_jit(model, qconfig_dict, method_name='forward', inplace=False, quant_type=QuantType.STATIC): + _check_is_script_module(model) + if not all(isinstance(x, str) for x in qconfig_dict.keys()): + raise ValueError('qconfig_dict should only contain names(str) as keys.') + scripted_qconfig_dict = script_qconfig_dict(qconfig_dict) + method_graph = model._c._get_method(method_name).graph + torch._C._jit_pass_inline(method_graph) + model = fuse_conv_bn_jit(model, inplace) + model_c = torch._C._jit_pass_insert_observer_method_for_ondevice_ptq(model._c, + method_name, + scripted_qconfig_dict, + inplace, + quant_type) + if inplace: + model._reconstruct(model_c) + else: + model = wrap_cpp_module(model_c) + return model + +def prepare_jit(model, qconfig_dict, inplace=False): + torch._C._log_api_usage_once("quantization_api.quantize_jit.prepare_jit") + return _prepare_jit(model, qconfig_dict, inplace, quant_type=QuantType.STATIC) + +def prepare_dynamic_jit(model, qconfig_dict, inplace=False): + torch._C._log_api_usage_once("quantization_api.quantize_jit.prepare_dynamic_jit") + return _prepare_jit(model, qconfig_dict, inplace, quant_type=QuantType.DYNAMIC) + + +def _prepare_ondevice_dynamic_jit(model, qconfig_dict, method_name='forward', inplace=False): + return _prepare_ondevice_jit(model, qconfig_dict, method_name, inplace, quant_type=QuantType.DYNAMIC) + +def _convert_jit(model, inplace=False, debug=False, quant_type=QuantType.STATIC, + preserved_attrs=None): + _check_is_script_module(model) + model.eval() + model_c = model._c + model_c = torch._C._jit_pass_insert_quant_dequant(model_c, 'forward', inplace, debug, quant_type) + if not debug: + is_xpu = all(p.device.type == 'xpu' for p in model.parameters()) + if not is_xpu: + # Moving model parameters to CPU since quantized operators + # are only supported on CPU and XPU right now + model.cpu() + if preserved_attrs is None: + preserved_attrs = [] + model_c = torch._C._jit_pass_quant_finalize(model_c, quant_type, preserved_attrs) + if inplace: + model._reconstruct(model_c) + else: + model = wrap_cpp_module(model_c) + torch._C._jit_pass_constant_propagation(model.graph) + torch._C._jit_pass_dce(model.graph) + return model + + +def _convert_ondevice_jit(model, method_name, inplace=False, debug=False, quant_type=QuantType.STATIC): + _check_is_script_module(model) + assert quant_type == QuantType.DYNAMIC, "This API, while should work for static quant, is only tested for dynamic quant." + assert not method_name.startswith("observe_"), "Pass in valid method to be quantized, e.g. forward" + observe_method_name = "observe_" + method_name + quantize_method_name = "quantize_" + method_name + model_c = model._c + model_c = torch._C._jit_pass_insert_quant_dequant_for_ondevice_ptq( + model._c, observe_method_name, inplace, debug, QuantType.DYNAMIC) + model_c = torch._C._jit_pass_quant_finalize_for_ondevice_ptq(model_c, QuantType.DYNAMIC, quantize_method_name) + if inplace: + model._reconstruct(model_c) + else: + model = wrap_cpp_module(model_c) + return model + +def convert_jit(model, inplace=False, debug=False, preserved_attrs=None): + torch._C._log_api_usage_once("quantization_api.quantize_jit.convert_jit") + return _convert_jit(model, inplace, debug, quant_type=QuantType.STATIC, preserved_attrs=preserved_attrs) + +def convert_dynamic_jit(model, inplace=False, debug=False, preserved_attrs=None): + torch._C._log_api_usage_once("quantization_api.quantize_jit.convert_dynamic_jit") + return _convert_jit(model, inplace, debug, quant_type=QuantType.DYNAMIC, preserved_attrs=preserved_attrs) + + +def _convert_ondevice_dynamic_jit(model, method_name, inplace=False, debug=False): + return _convert_ondevice_jit(model, method_name, inplace, debug, quant_type=QuantType.DYNAMIC) + + +def _quantize_ondevice_dynamic_jit_impl(model, qconfig_dict, method_name, inplace=False): + model = _prepare_ondevice_dynamic_jit(model, qconfig_dict, method_name, inplace) + model = _convert_ondevice_dynamic_jit(model, method_name, inplace) + return model + +def _quantize_jit(model, qconfig_dict, run_fn=None, run_args=None, inplace=False, debug=False, quant_type=QuantType.STATIC): + # Always do inplace convert because the Tensor is already + # copied in prepare_jit when inplace is False + if quant_type == QuantType.DYNAMIC: + model = prepare_dynamic_jit(model, qconfig_dict, inplace) + model = convert_dynamic_jit(model, True, debug) + else: + assert run_fn, "Must provide calibration function for post training static quantization" + assert run_args, "Must provide calibration dataset for post training static quantization" + model = prepare_jit(model, qconfig_dict, inplace) + run_fn(model, *run_args) + model = convert_jit(model, True, debug) + + torch._C._jit_pass_constant_propagation(model.graph) + torch._C._jit_pass_dce(model.graph) + return model + +def quantize_jit(model, qconfig_dict, run_fn, run_args, inplace=False, debug=False): + r"""Quantize the input float TorchScript model with + post training static quantization. + + First it will prepare the model for calibration, then it calls + `run_fn` which will run the calibration step, after that we will + convert the model to a quantized model. + + Args: + `model`: input float TorchScript model + `qconfig_dict`: qconfig_dict is a dictionary with names of sub modules as key and + qconfig for that module as value, empty key means the qconfig will be applied + to whole model unless it's overwritten by more specific configurations, the + qconfig for each module is either found in the dictionary or fallback to + the qconfig of parent module. + + Right now qconfig_dict is the only way to configure how the model is quantized, + and it is done in the granularity of module, that is, we only support one type + of qconfig for each torch.nn.Module, and the qconfig for sub module will + override the qconfig for parent module, empty string means global configuration. + `run_fn`: a calibration function for calibrating the prepared model + `run_args`: positional arguments for `run_fn` + `inplace`: carry out model transformations in-place, the original module is + mutated + `debug`: flag for producing a debug friendly model (preserve weight attribute) + + Return: + Quantized TorchSciprt model. + + Example: + ```python + import torch + from torch.ao.quantization import get_default_qconfig + from torch.ao.quantization import quantize_jit + + ts_model = torch.jit.script(float_model.eval()) # or torch.jit.trace(float_model, input) + qconfig = get_default_qconfig('fbgemm') + def calibrate(model, data_loader): + model.eval() + with torch.no_grad(): + for image, target in data_loader: + model(image) + + quantized_model = quantize_jit( + ts_model, + {'': qconfig}, + calibrate, + [data_loader_test]) + ``` + """ + torch._C._log_api_usage_once("quantization_api.quantize_jit.quantize_jit") + return _quantize_jit(model, qconfig_dict, run_fn, run_args, inplace, debug, quant_type=QuantType.STATIC) + +def quantize_dynamic_jit(model, qconfig_dict, inplace=False, debug=False): + r"""Quantize the input float TorchScript model with + post training dynamic quantization. + Currently only qint8 quantization of torch.nn.Linear is supported. + + Args: + `model`: input float TorchScript model + `qconfig_dict`: qconfig_dict is a dictionary with names of sub modules as key and + qconfig for that module as value, please see detailed + descriptions in :func:`~torch.ao.quantization.quantize_jit` + `inplace`: carry out model transformations in-place, the original module is + mutated + `debug`: flag for producing a debug friendly model (preserve weight attribute) + + Return: + Quantized TorchSciprt model. + + Example: + ```python + import torch + from torch.ao.quantization import per_channel_dynamic_qconfig + from torch.ao.quantization import quantize_dynamic_jit + + ts_model = torch.jit.script(float_model.eval()) # or torch.jit.trace(float_model, input) + qconfig = get_default_qconfig('fbgemm') + def calibrate(model, data_loader): + model.eval() + with torch.no_grad(): + for image, target in data_loader: + model(image) + + quantized_model = quantize_dynamic_jit( + ts_model, + {'': qconfig}, + calibrate, + [data_loader_test]) + ``` + """ + torch._C._log_api_usage_once("quantization_api.quantize_jit.quantize_dynamic_jit") + return _quantize_jit(model, qconfig_dict, inplace=inplace, debug=debug, quant_type=QuantType.DYNAMIC) + + +def _quantize_ondevice_dynamic_jit(model, qconfig_dict, method_name='forward', inplace=False): + r"""Prepares the input float TorchScript model with + *on-device* post training dynamic quantization. + Currently only qint8 quantization of torch.nn.Linear is supported. + + Args: + `model`: input float TorchScript model + `qconfig_dict`: qconfig_dict is a dictionary with names of sub modules as key and + qconfig for that module as value, please see detailed + `method_name`: Name of the method within the model, to be prepared for quantization + descriptions in :func:`~torch.ao.quantization.quantize_jit` + `inplace`: carry out model transformations in-place, the original module is + mutated + + Return: + TorchScript model that is ready for on device quantization. + This means that the returned + model has: + - Method is inlined. + - Model has observer modules inserted in the model. + - Model has packed params inserted in the model. However they are empty as in they dont + contain valid quantized weights. + - observe_ is added that observe the values to be quantized. + - reset_observers_ to reset observers. + - quantize_ is added to the model. + - This method extract scale, zero points. + - Quantizes observed weights. + - Creates packed params from it and update the attribute of the model with the new values + for the packed params. + - Reset the original fp32 weights with empty tensor using SetAttr. + - quantized_ is added to the model. + - This method uses quantized weights and quantized linear ops instead of fp32 op. + - This method should be used for inference post PTQ. + - Note that all method's signatures should be the same as method_name. + + Later on device: + - Run reset_observers_ + - Run observe_ + - Run quantize_ + - Now model can be saved and loaded later. + - Run model with quantized_ + + Example: + ```python + import torch + from torch.ao.quantization import per_channel_dynamic_qconfig + from torch.ao.quantization.quantize_jit import _quantize_ondevice_dynamic_jit + + ts_model = torch.jit.script(float_model.eval()) # or torch.jit.trace(float_model, input) + qconfig = get_default_qconfig('fbgemm') + quant_ready_model = _quantize_ondevice_dynamic_jit( + ts_model, + {'': qconfig}, + 'forward', + True) + ``` + """ + return _quantize_ondevice_dynamic_jit_impl(model, qconfig_dict, method_name, inplace=inplace) diff --git a/parrot/lib/python3.10/site-packages/torch/ao/quantization/utils.py b/parrot/lib/python3.10/site-packages/torch/ao/quantization/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fadbf33a70b66d214955cd4c2d8a2e95ac38a90e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/ao/quantization/utils.py @@ -0,0 +1,761 @@ +# mypy: allow-untyped-defs +""" +Utils shared by different modes of quantization (eager/graph) +""" +import functools +import warnings +from collections import OrderedDict +from inspect import getfullargspec, signature +from typing import Any, Callable, Dict, Optional, Tuple, Union + +import torch +from torch.ao.quantization.quant_type import QuantType +from torch.fx import Node +from torch.nn.utils.parametrize import is_parametrized + +NodePattern = Union[Tuple[Node, Node], Tuple[Node, Tuple[Node, Node]], Any] +NodePattern.__module__ = "torch.ao.quantization.utils" + +# This is the Quantizer class instance from torch/quantization/fx/quantize.py. +# Define separately to prevent circular imports. +# TODO(future PR): improve this. +# make this public once fixed (can't be public as is because setting the module directly +# doesn't work) +QuantizerCls = Any + +# Type for fusion patterns, it can be more complicated than the following actually, +# see pattern.md for docs +# TODO: not sure if typing supports recursive data types +Pattern = Union[ + Callable, Tuple[Callable, Callable], Tuple[Callable, Tuple[Callable, Callable]], Any +] +Pattern.__module__ = "torch.ao.quantization.utils" + +# TODO: maybe rename this to MatchInputNode +class MatchAllNode: + """ A node pattern that matches all nodes, used in defining + fusion patterns in FX Graph Mode Quantization + """ + pass + +module_type_list = { + torch.nn.ReLU, + torch.nn.ReLU6, + torch.nn.AdaptiveAvgPool1d, + torch.nn.AdaptiveAvgPool2d, + torch.nn.AdaptiveAvgPool3d, + torch.nn.AvgPool1d, + torch.nn.AvgPool2d, + torch.nn.AvgPool3d, + torch.nn.MaxPool1d, + torch.nn.MaxPool2d, + torch.nn.MaxPool3d, + torch.nn.Identity, + torch.nn.Hardsigmoid, + torch.nn.Sigmoid, + torch.nn.Tanh, +} +func_list = { + torch.nn.functional.adaptive_avg_pool1d, + torch.nn.functional.adaptive_avg_pool2d, + torch.nn.functional.adaptive_avg_pool3d, + torch.nn.functional.elu, + torch.nn.functional.hardswish, + torch.nn.functional.instance_norm, + torch.nn.functional.layer_norm, + torch.nn.functional.leaky_relu, + torch.nn.functional.silu, + torch.nn.functional.mish, + torch.nn.functional.dropout, + torch.nn.functional.max_pool1d, + torch.nn.functional.max_pool2d, + torch.nn.functional.max_pool3d, + torch.nn.functional.relu, + torch.nn.functional.hardtanh, + torch.nn.functional.hardtanh_, + torch.nn.functional.hardsigmoid, + torch.nn.functional.sigmoid, + torch.transpose, + torch.repeat_interleave, + torch.sigmoid, + torch.squeeze, + torch.stack, + torch.sum, + torch.tanh, + torch.unsqueeze, + torch.cat, +} +method_list = { + torch.mean, + 'relu', + 'relu_', + 'contiguous', + 'detach', + 'detach_', + 'hardsigmoid', + 'hardsigmoid_', + 'permute', + 'repeat', + 'repeat_interleave', + 'reshape', + 'resize_', + 'shape', + 'sigmoid', + 'sigmoid_', + 'size', + 'squeeze', + 'squeeze_', + 'tanh', + 'tanh_', + 'transpose', + 'unsqueeze', + 'unsqueeze_', + 'view', +} + +# TODO: not used now, remove +def check_node(node, modules): + # TODO: reuse is_fixed_qparam_node after we move this function to _lower_to_native_backend.py + is_call_function = node.op == "call_function" and node.target in func_list + is_call_method = node.op == "call_method" and node.target in method_list + is_call_module = node.op == "call_module" and type(modules[str(node.target)]) in module_type_list + return is_call_function, is_call_method, is_call_module + +def get_combined_dict(default_dict, additional_dict): + """ + Combines two dictionaries. + + This function takes two dictionaries as input and returns a new dictionary + that contains all the key-value pairs from both input dictionaries. + If there are any duplicate keys in the `additional_dict`, the values + from the `additional_dict` will overwrite those in the `default_dict`. + Args: + default_dict (dict): The main dictionary that will be used as the base + additional_dict (dict): The dictionary used to update `default_dict` + + Returns: + dict: The resulting dictionary + Example: + >>> x = dict(a=1, b=1) + >>> y = dict(b=2, c=3) + >>> get_combined_dict(x, y) + {'a': 1, 'b': 2, 'c': 3} + """ + d = default_dict.copy() + d.update(additional_dict) + return d + +def is_per_tensor(qscheme): + return qscheme == torch.per_tensor_affine or \ + qscheme == torch.per_tensor_symmetric + +def is_per_channel(qscheme): + return qscheme in [torch.per_channel_affine, + torch.per_channel_affine_float_qparams, + torch.per_channel_symmetric] + +def getattr_from_fqn(obj: Any, fqn: str) -> Any: + """ + Given an obj and a fqn such as "foo.bar.baz", returns gm.foo.bar.baz. + """ + return functools.reduce(getattr, fqn.split("."), obj) + +def to_underlying_dtype(qdtype): + DTYPE_MAPPING = { + torch.quint8: torch.uint8, + torch.qint8: torch.int8, + torch.qint32: torch.int32, + torch.quint4x2: torch.uint8, + torch.quint2x4: torch.uint8, + torch.uint8: torch.uint8, + torch.int8: torch.int8, + torch.int16: torch.int16, + torch.int32: torch.int32, + torch.float8_e5m2: torch.float8_e5m2, + torch.float8_e4m3fn: torch.float8_e4m3fn, + } + assert qdtype in DTYPE_MAPPING, "Unsupported dtype: " + str(qdtype) + return DTYPE_MAPPING[qdtype] + +def get_qparam_dict(observer_or_fake_quant): + from torch.ao.quantization.observer import PlaceholderObserver + + qscheme = getattr(observer_or_fake_quant, "qscheme", None) + dtype = observer_or_fake_quant.dtype + qparams = {"qscheme": qscheme, "dtype": dtype} + + if not qscheme or isinstance(observer_or_fake_quant, PlaceholderObserver): + return {"qscheme": None, "dtype": dtype} + + if is_per_tensor(qscheme): + qscheme = torch.per_tensor_affine + elif is_per_channel(qscheme): + # change symmetric to affine since we do not have symmetric + # quantized Tensor + if qscheme == torch.per_channel_symmetric: + qscheme = torch.per_channel_affine + qparams["axis"] = observer_or_fake_quant.ch_axis + else: + raise RuntimeError(f"Unrecognized qscheme: {qscheme}") + # update qscheme, since we don't have symmetric quant qscheme + # in quantized Tensor + qparams["qscheme"] = qscheme + + scale, zero_point = observer_or_fake_quant.calculate_qparams() + qparams["scale"] = scale + qparams["zero_point"] = zero_point + + if hasattr(observer_or_fake_quant, "quant_min"): + qparams["quant_min"] = observer_or_fake_quant.quant_min + if hasattr(observer_or_fake_quant, "quant_max"): + qparams["quant_max"] = observer_or_fake_quant.quant_max + + return qparams + + +def get_swapped_custom_module_class(custom_module, custom_module_class_mapping, qconfig): + """ Get the observed/quantized custom module class that we need + to swap `custom_module` to + Input: + custom_module: input, can be an instance of either a float or observed custom module + custom_module_class_mapping: the float to observed or observed to quantized custom module class mapping + qconfig: qconfig configured for the custom module + + Output: + corresponding observed/quantized custom module class for input custom module instance + """ + quant_type = get_quant_type(qconfig) + class_mapping = custom_module_class_mapping.get(quant_type, {}) + assert type(custom_module) in class_mapping, "did not find corresponding observed " \ + f"module class for {type(custom_module)} in mapping: {class_mapping}" + return class_mapping[type(custom_module)] + +def activation_dtype(qconfig): + assert qconfig is not None + activation = qconfig.activation() + return activation.dtype + +def weight_dtype(qconfig): + assert qconfig is not None + weight = qconfig.weight() + return weight.dtype + +def activation_is_statically_quantized(qconfig): + """ Given a qconfig, decide if the activation needs to be + quantized or not, this includes quantizing to quint8, qint8 and qint32 and float16 + """ + return ( + activation_dtype(qconfig) in [ + torch.quint8, + torch.qint8, + torch.qint32, + torch.float16, + torch.uint8, + torch.int8, + torch.int16, + torch.int32, + torch.float8_e5m2, + torch.float8_e4m3fn, + ] + and (not activation_is_dynamically_quantized(qconfig)) + ) + +def activation_is_dynamically_quantized(qconfig): + """ Given a qconfig, decide if the activation needs to be + dynamically quantized or not, this includes dynamically quantizing to + quint8, qint8 and float16 + """ + activation_dtype, _, activation_is_dynamic = \ + get_qconfig_dtypes(qconfig) + return activation_is_dynamic + +def activation_is_int8_quantized(qconfig): + """ Given a qconfig, decide if the activation needs to be + quantized to int8 or not, this includes quantizing to quint8, qint8 + """ + return activation_dtype(qconfig) in [torch.quint8, torch.qint8, torch.uint8, torch.int8] + +def activation_is_int32_quantized(qconfig): + """ Given a qconfig, decide if the activation needs to be + quantized to int32 or not + """ + return activation_dtype(qconfig) in [torch.qint32, torch.int32] + +def weight_is_quantized(qconfig): + """ Given a qconfig, decide if the weight needs to be + quantized or not + """ + return weight_dtype(qconfig) in [ + torch.quint8, + torch.qint8, + torch.float16, + torch.quint4x2, + torch.uint8, + torch.int8, + torch.int16, + torch.int32, + torch.float8_e5m2, + torch.float8_e4m3fn, + ] + +def weight_is_statically_quantized(qconfig): + """ Given a qconfig, decide if the weight needs to be statically + quantized or not + """ + return weight_dtype(qconfig) in [torch.quint8, torch.qint8, torch.uint8, torch.int8] + +def op_is_int8_dynamically_quantized(qconfig) -> bool: + """ Given a qconfig, returns True if this op is using int8 dynamic + quantization + """ + activation_dtype, weight_dtype, activation_is_dynamic = \ + get_qconfig_dtypes(qconfig) + return ( + activation_dtype in [torch.quint8, torch.uint8] and + # for now, the lines below assume fbgemm or qnnpack + weight_dtype in [torch.qint8, torch.int8] and + activation_is_dynamic + ) + +def get_qconfig_dtypes(qconfig): + r""" returns the qconfig tuple for qconfig: + (activation_dtype, weight_dtype, activation_is_dynamic) + """ + assert qconfig is not None + activation = qconfig.activation() + weight = qconfig.weight() + act_is_dynamic = getattr(activation, "is_dynamic", False) + return (activation.dtype, weight.dtype, act_is_dynamic) + +def get_quant_type(qconfig): + assert qconfig is not None + activation = qconfig.activation() + weight = qconfig.weight() + static_dtypes = [ + torch.quint8, + torch.qint8, + torch.quint4x2, + torch.qint32, + torch.uint8, + torch.int8, + torch.int16, + torch.int32, + torch.float8_e5m2, + torch.float8_e4m3fn + ] + if weight.dtype in static_dtypes: + if hasattr(activation, 'is_dynamic') and activation.is_dynamic: + return QuantType.DYNAMIC + elif activation.dtype in static_dtypes: + return QuantType.STATIC + else: + return QuantType.WEIGHT_ONLY + + if weight.dtype == torch.float16: + if hasattr(activation, 'is_dynamic') and activation.is_dynamic: + return QuantType.DYNAMIC + elif activation.dtype == torch.float16: + return QuantType.STATIC + + raise Exception(f"Unrecognized dtype combination in get_quant_type: activation({activation.dtype})," # noqa: TRY002 + f"weight({weight.dtype})") + +def check_min_max_valid(min_val: torch.Tensor, max_val: torch.Tensor) -> bool: + """ Checks if the given minimum and maximum values are valid, meaning that + they exist and the min value is less than the max value. + """ + if min_val.numel() == 0 or max_val.numel() == 0: + warnings.warn( + "must run observer before calling calculate_qparams. " + + "Returning default values." + ) + return False + + if min_val.dim() == 0 or max_val.dim() == 0: + if min_val == float("inf") and max_val == float("-inf"): + warnings.warn( + "must run observer before calling calculate_qparams. " + + "Returning default values." + ) + + return False + + assert min_val <= max_val, f"min {min_val} should be less than max {max_val}" + else: + assert torch.all( + min_val <= max_val + ), f"min {min_val} should be less than max {max_val}" + + return True + + +def calculate_qmin_qmax(quant_min: int, quant_max: int, has_customized_qrange: bool, dtype: torch.dtype, + reduce_range: bool) -> Tuple[int, int]: + r"""Calculates actual qmin and qmax based on the quantization range, + observer datatype and if range is reduced. + """ + # TODO(jerryzh): Figure out why custom quant_min/quant_max are still adjusted. + if has_customized_qrange: + # This initialization here is to be resolve TorchScript compilation issues and allow + # using of refinement to decouple initial_qmin and initial_qmax from quantization range. + # The actual values of initial_qmin and initial_qmax will be reset below. + if dtype in [torch.qint32, torch.int32]: + initial_quant_min, initial_quant_max = 0, 2**32 - 1 + else: + initial_quant_min, initial_quant_max = 0, 255 + # The following assignment of self.qmin and self.qmax to the local variables and the if check refine the + # attribute from Optional valid integers for use, based on TorchScript's requirements. + custom_quant_min, custom_quant_max = quant_min, quant_max + if custom_quant_min is not None and custom_quant_max is not None: + initial_quant_min, initial_quant_max = ( + custom_quant_min, + custom_quant_max, + ) + + qrange_len = initial_quant_max - initial_quant_min + 1 + if dtype in [torch.qint8, torch.int8]: + assert ( + 0 < qrange_len <= 256 + ), "quantization range should be positive and not exceed the maximum bit range (=256)." + elif dtype in [torch.qint32, torch.int32]: + assert ( + 0 < qrange_len <= 2**32 + ), "quantization range should be positive and not exceed the maximum bit range (=4294967296)." + if reduce_range: + quant_min, quant_max = quant_min // 2, quant_max // 2 + else: + # Fallback onto default 8-bit qmin and qmax calculation if dynamic range is not used. + if dtype in [torch.qint8, torch.int8]: + if reduce_range: + quant_min, quant_max = -64, 63 + else: + quant_min, quant_max = -128, 127 + elif dtype in [torch.quint8, torch.uint8]: + if reduce_range: + quant_min, quant_max = 0, 127 + else: + quant_min, quant_max = 0, 255 + elif dtype in [torch.qint32, torch.int32]: + quant_min, quant_max = -1 * (2 ** 31), (2 ** 31) - 1 + else: + quant_min, quant_max = 0, 15 + return quant_min, quant_max + + +def _parent_name(target): + """ + Turn 'foo.bar' into ['foo', 'bar'] + """ + r = target.rsplit('.', 1) + if len(r) == 1: + return '', r[0] + else: + return r[0], r[1] + +def has_no_children_ignoring_parametrizations(module): + """ + Checks if module._modules is empty or + if module is a parametrization, checks that module._modules only has + the 'parametrizations' module + """ + if len(module._modules) == 0: + return True + elif is_parametrized(module): + return len(module._modules) == 1 and 'parametrizations' in module._modules + else: + return False + +def _get_path_of_module(root: torch.nn.Module, submodule: torch.nn.Module) -> Optional[str]: + """ Get the path (fully qualified name) of a submodule + + Example:: + + >> class M(torch.nn.Module): + def __init__(self): + self.linear = torch.nn.Linear(5, 5) + def forward(self, x): + return self.linear(x) + + >> m = M() + >> l = m.linear + >> _get_path_of_module(m, l) + "linear" + """ + for n, p in root.named_modules(): + if submodule is p: + return n + return None + +def _get_signature_locals(f: Callable, loc: Dict[str, Any]) -> Dict[str, Any]: + """ Get local keyword arguments + + Example:: + + >> def f(self, a, b=9): + pass + >> loc = {"a": 6, "c": 7} + >> _get_signature_locals(f, loc) + {"a": 6} + """ + return {k: v for k, v in loc.items() if k in signature(f).parameters} + +def _get_default_kwargs(f: Callable) -> "OrderedDict[str, Any]": + """ Get all default keyword arguments from function signature + + Example:: + + >> def f(self, a, b=9): + pass + >> _get_default_kwargs(f) + {"b": 9} + """ + kwargs = {} + for name, param in signature(f).parameters.items(): + if param.default is not param.empty: + kwargs[name] = param.default + elif param.kind is param.VAR_POSITIONAL: + kwargs[name] = () + elif param.kind is param.VAR_KEYWORD: + kwargs[name] = {} + return OrderedDict(kwargs) + +def _normalize_kwargs(func: Callable, loc: Dict[str, Any]) -> "OrderedDict[str, Any]": + """ Given a function and local function arguments, normalize the keyword + arguments by filling in default arguments from function signature + + Example:: + + >> def f(self, key1=3, key2=3): + pass + >> loc = {"key2": 6} + >> _normalize_kwargs(f, loc) + {"key1": 3, "key2": 6} + """ + default_kwargs = _get_default_kwargs(func) + local_kwargs = _get_signature_locals(func, loc) + normalized_kwargs = default_kwargs.copy() + for attr, val in local_kwargs.items(): + if attr in normalized_kwargs: + # override the default keyword arguments + normalized_kwargs[attr] = val + return normalized_kwargs + +def validate_qmin_qmax(quant_min: int, quant_max: int) -> None: + r"""Validates that the user-specified quantization range is properly initialized + and within the given bound supported by the observer dtype. + + To accommodate lower-bit quantization with respect to the existing torch.qint8 and + torch.quint8 datatypes, the user can choose to use dynamic quantization range by passing + in a tuple of initial qmin and qmax values. One use case is these customized qmin and qmax + values are used to calculate static estimates of the scale and zero point for aggressive lower-bit + fake quantization. These estimates are compared against parameters learned through backpropagation. + The related literatures for scale and zero point via backpropagation are as follows: + + Learned Step Size Quantization: https://openreview.net/pdf?id=rkgO66VKDS + Trained Quantization Thresholds: https://arxiv.org/pdf/1903.08066.pdf + """ + # The variable names are prefixed with "initial" because their values (qmin and qmax) might be adjusted + # based on whether quantization range is reduced and the datatype (signed/unsigned) used by the observer. + assert ( + quant_min <= 0 <= quant_max + ), "Used-specified quantization range must include 0." + assert ( + quant_min < quant_max + ), "qmin must be strictly less than qmax for user-specified quantization range." + + +# Functionally equivalent to '_calculate_qparams' in observer.py. Observers must be torchscriptable however and qscheme +# as far as I can tell is not allowed to passed as a parameter in torchscript functions. This makes refactoring observer +# to use this utility a massive pain and very gross. For now Im opting just to duplicate as this code seems unlikey to change +# (last update over 1 year ago) and when torchscript is fully deprecated we can refactor. TODO(jakeszwe, jerryzh168) +def determine_qparams( + min_val: torch.Tensor, max_val: torch.Tensor, quant_min: int, quant_max: int, + dtype: torch.dtype, eps: torch.Tensor, has_customized_qrange: bool, + qscheme: torch.qscheme = torch.per_tensor_affine) -> Tuple[torch.Tensor, torch.Tensor]: + r"""Calculates the quantization parameters, given min and max + value tensors. Works for both per tensor and per channel cases + + Args: + min_val: Minimum values per channel + max_val: Maximum values per channel + + Returns: + scales: Scales tensor of shape (#channels,) + zero_points: Zero points tensor of shape (#channels,) + """ + if not check_min_max_valid(min_val, max_val): + return torch.tensor([1.0], device=min_val.device.type), torch.tensor([0], device=min_val.device.type) + + min_val_neg = torch.min(min_val, torch.zeros_like(min_val)) + max_val_pos = torch.max(max_val, torch.zeros_like(max_val)) + + device = min_val_neg.device + scale = torch.ones(min_val_neg.size(), dtype=torch.double, device=device) + zero_point = torch.zeros(min_val_neg.size(), dtype=torch.int64, device=device) + + if ( + qscheme == torch.per_tensor_symmetric + or qscheme == torch.per_channel_symmetric + ): + max_val_pos = torch.max(-min_val_neg, max_val_pos) + scale = max_val_pos / (float(quant_max - quant_min) / 2) + scale = torch.max(scale, eps) + if dtype in [torch.uint8, torch.quint8]: + if has_customized_qrange: + # When customized quantization range is used, down-rounded midpoint of the range is chosen. + zero_point = zero_point.new_full( + zero_point.size(), (quant_min + quant_max) // 2 + ) + else: + zero_point = zero_point.new_full(zero_point.size(), 128) + elif qscheme == torch.per_channel_affine_float_qparams: + scale = (max_val - min_val) / float(quant_max - quant_min) + scale = torch.where(scale > eps, scale, torch.ones_like(scale)) + # We use the quantize function + # xq = Round(Xf * inv_scale + zero_point), + # setting zero_point to (-1 * min *inv_scale) we get + # Xq = Round((Xf - min) * inv_scale) + zero_point = -1 * min_val / scale + else: + scale = (max_val_pos - min_val_neg) / float(quant_max - quant_min) + scale = torch.max(scale, eps) + zero_point = quant_min - torch.round(min_val_neg / scale).to(torch.int) + zero_point = torch.clamp(zero_point, quant_min, quant_max) + + # For scalar values, cast them to Tensors of size 1 to keep the shape + # consistent with default values in FakeQuantize. + if len(scale.shape) == 0: + # TODO: switch to scale.item() after adding JIT support + scale = torch.tensor([float(scale)], dtype=scale.dtype, device=device) + if len(zero_point.shape) == 0: + # TODO: switch to zero_point.item() after adding JIT support + zero_point = torch.tensor( + [int(zero_point)], dtype=zero_point.dtype, device=device + ) + if qscheme == torch.per_channel_affine_float_qparams: + zero_point = torch.tensor( + [float(zero_point)], dtype=zero_point.dtype, device=device + ) + + return scale.to(torch.double), zero_point.to(torch.int64) + +def _get_num_pos_args(f: Callable) -> int: + """ Get number of positional args for a function + + Example:: + + >> def f(self, key1=3, key2=3): + pass + >> _get_num_pos_args(f) + 3 + """ + return len(getfullargspec(f).args) + +def get_fqn_to_example_inputs( + model: torch.nn.Module, + example_inputs: Tuple[Any, ...] +) -> Dict[str, Tuple[Any, ...]]: + """ Given a model and its example inputs, return a dictionary from + fully qualified name of submodules to example_inputs for that submodule, + e.g. {"linear1": (tensor1,), "linear2": (tensor2,), "sub": (tensor3,), + "sub.linear1": (tensor4,), ...} + + Used to make quantizing submodules easier now that FX Graph Mode Quantization requires + example inputs. + + Also works for keyword arguments with default values, we would flatten keyword + arguments as positional arguments and fill in the missing keyword args with default + values, e.g. if we have a forward function: + def forward(self, x, key1=3, key2=3): + ... + + and we call it with self.submodule(x, key2=6) + we'll get example_inputs: (x, 3, 6) + + user can also override `key1` with positional arguments as well: + for self.submodule(x, 5, key2=6) + we'll get: (x, 5, 6) + + variable positional arguments and variable positional keyword arguments in forward + function are not supported currently, so please make sure no submodules is using + them. + """ + root = model + fqn_to_example_inputs = {} + + def _patched_module_call(self, *args, **kwargs): + submodule_example_inputs = list(args).copy() + normalized_kwargs = _normalize_kwargs(self.forward, kwargs) + # minus 1 to skipping counting `self` + num_args = _get_num_pos_args(self.forward) - 1 + num_to_pop = num_args - len(submodule_example_inputs) + while num_to_pop and normalized_kwargs: + normalized_kwargs.popitem(last=False) + num_to_pop -= 1 + submodule_example_inputs.extend(normalized_kwargs.values()) + submodule_example_inputs_tuple = tuple(submodule_example_inputs) + fqn = _get_path_of_module(root, self) + if fqn is not None: + fqn_to_example_inputs[fqn] = submodule_example_inputs_tuple + return orig_module_call(self, *args, **kwargs) + + orig_module_call = torch.nn.Module.__call__ + torch.nn.Module.__call__ = _patched_module_call # type: ignore[method-assign] + try: + model(*example_inputs) + finally: + # restore the module call even if there is an exception + torch.nn.Module.__call__ = orig_module_call # type: ignore[method-assign] + return fqn_to_example_inputs + +def _assert_and_get_unique_device(module: torch.nn.Module) -> Any: + """ + Returns the unique device for a module, or None if no device is found. + Throws an error if multiple devices are detected. + """ + devices = {p.device for p in module.parameters()} | \ + {p.device for p in module.buffers()} + """ + As a temp workaround for AIMP HHC publish we added CPU check.remove it later. T163614564 + """ + if {torch.device("cpu"), torch.device("meta")} == devices: + warnings.warn("Both 'meta' and 'cpu' are present in the list of devices. Module can have one device. We Select 'cpu'.") + devices = {torch.device("cpu")} + "" + assert len(devices) <= 1, ( + "prepare only works with cpu or single-device CUDA modules, " + f"but got devices {devices}" + ) + device = next(iter(devices)) if len(devices) > 0 else None + return device + +__all__ = [ + "NodePattern", + "Pattern", + "MatchAllNode", + "check_node", + "get_combined_dict", + "is_per_tensor", + "is_per_channel", + "getattr_from_fqn", + "get_qparam_dict", + "get_swapped_custom_module_class", + "activation_dtype", + "weight_dtype", + "activation_is_statically_quantized", + "activation_is_dynamically_quantized", + "activation_is_int8_quantized", + "activation_is_int32_quantized", + "weight_is_quantized", + "weight_is_statically_quantized", + "op_is_int8_dynamically_quantized", + "get_qconfig_dtypes", + "get_quant_type", + "check_min_max_valid", + "calculate_qmin_qmax", + "has_no_children_ignoring_parametrizations", + "get_fqn_to_example_inputs", + "to_underlying_dtype", + "determine_qparams", + "validate_qmin_qmax", +] diff --git a/parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c23f8593c122eaa15b32d41cc6b4b7b26130f36b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7561f903bf1ff7e8ee897c4d5ca90f45227e955e2749b9c1d4340a31d465759 +size 139618 diff --git a/parrot/lib/python3.10/site-packages/torch/lib/libc10_cuda.so b/parrot/lib/python3.10/site-packages/torch/lib/libc10_cuda.so new file mode 100644 index 0000000000000000000000000000000000000000..1fb060959b61f06129ab5979917ae8615be793d0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/lib/libc10_cuda.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c675d3fb407815ae57a8eeff0a369dea79c63b10100440e8beac29d8ab33a4c1 +size 660473 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_boosted_trees_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_boosted_trees_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..024b9616204af56de5baf22810b2a193583dc4fc --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_boosted_trees_ops.py @@ -0,0 +1,2649 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +def boosted_trees_aggregate_stats(node_ids: Annotated[Any, _atypes.Int32], gradients: Annotated[Any, _atypes.Float32], hessians: Annotated[Any, _atypes.Float32], feature: Annotated[Any, _atypes.Int32], max_splits: int, num_buckets: int, name=None) -> Annotated[Any, _atypes.Float32]: + r"""Aggregates the summary of accumulated stats for the batch. + + The summary stats contains gradients and hessians accumulated for each node, feature dimension id and bucket. + + Args: + node_ids: A `Tensor` of type `int32`. + int32; Rank 1 Tensor containing node ids for each example, shape [batch_size]. + gradients: A `Tensor` of type `float32`. + float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example. + hessians: A `Tensor` of type `float32`. + float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example. + feature: A `Tensor` of type `int32`. + int32; Rank 2 feature Tensors (shape=[batch_size, feature_dimension]). + max_splits: An `int` that is `>= 1`. + int; the maximum number of splits possible in the whole tree. + num_buckets: An `int` that is `>= 1`. + int; equals to the maximum possible value of bucketized feature. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesAggregateStats", name, node_ids, gradients, + hessians, feature, "max_splits", max_splits, "num_buckets", + num_buckets) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_aggregate_stats_eager_fallback( + node_ids, gradients, hessians, feature, max_splits=max_splits, + num_buckets=num_buckets, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + max_splits = _execute.make_int(max_splits, "max_splits") + num_buckets = _execute.make_int(num_buckets, "num_buckets") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesAggregateStats", node_ids=node_ids, gradients=gradients, + hessians=hessians, feature=feature, + max_splits=max_splits, + num_buckets=num_buckets, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("max_splits", _op._get_attr_int("max_splits"), "num_buckets", + _op._get_attr_int("num_buckets")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BoostedTreesAggregateStats", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BoostedTreesAggregateStats = tf_export("raw_ops.BoostedTreesAggregateStats")(_ops.to_raw_op(boosted_trees_aggregate_stats)) + + +def boosted_trees_aggregate_stats_eager_fallback(node_ids: Annotated[Any, _atypes.Int32], gradients: Annotated[Any, _atypes.Float32], hessians: Annotated[Any, _atypes.Float32], feature: Annotated[Any, _atypes.Int32], max_splits: int, num_buckets: int, name, ctx) -> Annotated[Any, _atypes.Float32]: + max_splits = _execute.make_int(max_splits, "max_splits") + num_buckets = _execute.make_int(num_buckets, "num_buckets") + node_ids = _ops.convert_to_tensor(node_ids, _dtypes.int32) + gradients = _ops.convert_to_tensor(gradients, _dtypes.float32) + hessians = _ops.convert_to_tensor(hessians, _dtypes.float32) + feature = _ops.convert_to_tensor(feature, _dtypes.int32) + _inputs_flat = [node_ids, gradients, hessians, feature] + _attrs = ("max_splits", max_splits, "num_buckets", num_buckets) + _result = _execute.execute(b"BoostedTreesAggregateStats", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BoostedTreesAggregateStats", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def boosted_trees_bucketize(float_values: Annotated[List[Any], _atypes.Float32], bucket_boundaries: Annotated[List[Any], _atypes.Float32], name=None): + r"""Bucketize each feature based on bucket boundaries. + + An op that returns a list of float tensors, where each tensor represents the + bucketized values for a single feature. + + Args: + float_values: A list of `Tensor` objects with type `float32`. + float; List of Rank 1 Tensor each containing float values for a single feature. + bucket_boundaries: A list with the same length as `float_values` of `Tensor` objects with type `float32`. + float; List of Rank 1 Tensors each containing the bucket boundaries for a single + feature. + name: A name for the operation (optional). + + Returns: + A list with the same length as `float_values` of `Tensor` objects with type `int32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesBucketize", name, float_values, bucket_boundaries) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_bucketize_eager_fallback( + float_values, bucket_boundaries, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(float_values, (list, tuple)): + raise TypeError( + "Expected list for 'float_values' argument to " + "'boosted_trees_bucketize' Op, not %r." % float_values) + _attr_num_features = len(float_values) + if not isinstance(bucket_boundaries, (list, tuple)): + raise TypeError( + "Expected list for 'bucket_boundaries' argument to " + "'boosted_trees_bucketize' Op, not %r." % bucket_boundaries) + if len(bucket_boundaries) != _attr_num_features: + raise ValueError( + "List argument 'bucket_boundaries' to 'boosted_trees_bucketize' Op with length %d " + "must match length %d of argument 'float_values'." % + (len(bucket_boundaries), _attr_num_features)) + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesBucketize", float_values=float_values, + bucket_boundaries=bucket_boundaries, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("num_features", _op._get_attr_int("num_features")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BoostedTreesBucketize", _inputs_flat, _attrs, _result) + return _result + +BoostedTreesBucketize = tf_export("raw_ops.BoostedTreesBucketize")(_ops.to_raw_op(boosted_trees_bucketize)) + + +def boosted_trees_bucketize_eager_fallback(float_values: Annotated[List[Any], _atypes.Float32], bucket_boundaries: Annotated[List[Any], _atypes.Float32], name, ctx): + if not isinstance(float_values, (list, tuple)): + raise TypeError( + "Expected list for 'float_values' argument to " + "'boosted_trees_bucketize' Op, not %r." % float_values) + _attr_num_features = len(float_values) + if not isinstance(bucket_boundaries, (list, tuple)): + raise TypeError( + "Expected list for 'bucket_boundaries' argument to " + "'boosted_trees_bucketize' Op, not %r." % bucket_boundaries) + if len(bucket_boundaries) != _attr_num_features: + raise ValueError( + "List argument 'bucket_boundaries' to 'boosted_trees_bucketize' Op with length %d " + "must match length %d of argument 'float_values'." % + (len(bucket_boundaries), _attr_num_features)) + float_values = _ops.convert_n_to_tensor(float_values, _dtypes.float32) + bucket_boundaries = _ops.convert_n_to_tensor(bucket_boundaries, _dtypes.float32) + _inputs_flat = list(float_values) + list(bucket_boundaries) + _attrs = ("num_features", _attr_num_features) + _result = _execute.execute(b"BoostedTreesBucketize", _attr_num_features, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BoostedTreesBucketize", _inputs_flat, _attrs, _result) + return _result + +_BoostedTreesCalculateBestFeatureSplitOutput = collections.namedtuple( + "BoostedTreesCalculateBestFeatureSplit", + ["node_ids", "gains", "feature_dimensions", "thresholds", "left_node_contribs", "right_node_contribs", "split_with_default_directions"]) + + +def boosted_trees_calculate_best_feature_split(node_id_range: Annotated[Any, _atypes.Int32], stats_summary: Annotated[Any, _atypes.Float32], l1: Annotated[Any, _atypes.Float32], l2: Annotated[Any, _atypes.Float32], tree_complexity: Annotated[Any, _atypes.Float32], min_node_weight: Annotated[Any, _atypes.Float32], logits_dimension: int, split_type:str="inequality", name=None): + r"""Calculates gains for each feature and returns the best possible split information for the feature. + + The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature. + + It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split. + + In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features). + + The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature. + + Args: + node_id_range: A `Tensor` of type `int32`. + A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive). + stats_summary: A `Tensor` of type `float32`. + A Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature. + The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used. + l1: A `Tensor` of type `float32`. + l1 regularization factor on leaf weights, per instance based. + l2: A `Tensor` of type `float32`. + l2 regularization factor on leaf weights, per instance based. + tree_complexity: A `Tensor` of type `float32`. + adjustment to the gain, per leaf based. + min_node_weight: A `Tensor` of type `float32`. + minimum avg of hessians in a node before required for the node to be considered for splitting. + logits_dimension: An `int` that is `>= 1`. + The dimension of logit, i.e., number of classes. + split_type: An optional `string` from: `"inequality", "equality"`. Defaults to `"inequality"`. + A string indicating if this Op should perform inequality split or equality split. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (node_ids, gains, feature_dimensions, thresholds, left_node_contribs, right_node_contribs, split_with_default_directions). + + node_ids: A `Tensor` of type `int32`. + gains: A `Tensor` of type `float32`. + feature_dimensions: A `Tensor` of type `int32`. + thresholds: A `Tensor` of type `int32`. + left_node_contribs: A `Tensor` of type `float32`. + right_node_contribs: A `Tensor` of type `float32`. + split_with_default_directions: A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesCalculateBestFeatureSplit", name, node_id_range, + stats_summary, l1, l2, tree_complexity, min_node_weight, + "logits_dimension", logits_dimension, "split_type", split_type) + _result = _BoostedTreesCalculateBestFeatureSplitOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_calculate_best_feature_split_eager_fallback( + node_id_range, stats_summary, l1, l2, tree_complexity, + min_node_weight, logits_dimension=logits_dimension, + split_type=split_type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + logits_dimension = _execute.make_int(logits_dimension, "logits_dimension") + if split_type is None: + split_type = "inequality" + split_type = _execute.make_str(split_type, "split_type") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesCalculateBestFeatureSplit", node_id_range=node_id_range, + stats_summary=stats_summary, + l1=l1, l2=l2, + tree_complexity=tree_complexity, + min_node_weight=min_node_weight, + logits_dimension=logits_dimension, + split_type=split_type, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("logits_dimension", _op._get_attr_int("logits_dimension"), + "split_type", _op.get_attr("split_type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BoostedTreesCalculateBestFeatureSplit", _inputs_flat, _attrs, _result) + _result = _BoostedTreesCalculateBestFeatureSplitOutput._make(_result) + return _result + +BoostedTreesCalculateBestFeatureSplit = tf_export("raw_ops.BoostedTreesCalculateBestFeatureSplit")(_ops.to_raw_op(boosted_trees_calculate_best_feature_split)) + + +def boosted_trees_calculate_best_feature_split_eager_fallback(node_id_range: Annotated[Any, _atypes.Int32], stats_summary: Annotated[Any, _atypes.Float32], l1: Annotated[Any, _atypes.Float32], l2: Annotated[Any, _atypes.Float32], tree_complexity: Annotated[Any, _atypes.Float32], min_node_weight: Annotated[Any, _atypes.Float32], logits_dimension: int, split_type: str, name, ctx): + logits_dimension = _execute.make_int(logits_dimension, "logits_dimension") + if split_type is None: + split_type = "inequality" + split_type = _execute.make_str(split_type, "split_type") + node_id_range = _ops.convert_to_tensor(node_id_range, _dtypes.int32) + stats_summary = _ops.convert_to_tensor(stats_summary, _dtypes.float32) + l1 = _ops.convert_to_tensor(l1, _dtypes.float32) + l2 = _ops.convert_to_tensor(l2, _dtypes.float32) + tree_complexity = _ops.convert_to_tensor(tree_complexity, _dtypes.float32) + min_node_weight = _ops.convert_to_tensor(min_node_weight, _dtypes.float32) + _inputs_flat = [node_id_range, stats_summary, l1, l2, tree_complexity, min_node_weight] + _attrs = ("logits_dimension", logits_dimension, "split_type", split_type) + _result = _execute.execute(b"BoostedTreesCalculateBestFeatureSplit", 7, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BoostedTreesCalculateBestFeatureSplit", _inputs_flat, _attrs, _result) + _result = _BoostedTreesCalculateBestFeatureSplitOutput._make(_result) + return _result + +_BoostedTreesCalculateBestFeatureSplitV2Output = collections.namedtuple( + "BoostedTreesCalculateBestFeatureSplitV2", + ["node_ids", "gains", "feature_ids", "feature_dimensions", "thresholds", "left_node_contribs", "right_node_contribs", "split_with_default_directions"]) + + +def boosted_trees_calculate_best_feature_split_v2(node_id_range: Annotated[Any, _atypes.Int32], stats_summaries_list: Annotated[List[Any], _atypes.Float32], split_types: Annotated[Any, _atypes.String], candidate_feature_ids: Annotated[Any, _atypes.Int32], l1: Annotated[Any, _atypes.Float32], l2: Annotated[Any, _atypes.Float32], tree_complexity: Annotated[Any, _atypes.Float32], min_node_weight: Annotated[Any, _atypes.Float32], logits_dimension: int, name=None): + r"""Calculates gains for each feature and returns the best possible split information for each node. However, if no split is found, then no split information is returned for that node. + + The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature. + + It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split. + + In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features). + + The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature. + + Args: + node_id_range: A `Tensor` of type `int32`. + A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive). + stats_summaries_list: A list of at least 1 `Tensor` objects with type `float32`. + A list of Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature. + The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used. + split_types: A `Tensor` of type `string`. + A Rank 1 tensor indicating if this Op should perform inequality split or equality split per feature. + candidate_feature_ids: A `Tensor` of type `int32`. + Rank 1 tensor with ids for each feature. This is the real id of the feature. + l1: A `Tensor` of type `float32`. + l1 regularization factor on leaf weights, per instance based. + l2: A `Tensor` of type `float32`. + l2 regularization factor on leaf weights, per instance based. + tree_complexity: A `Tensor` of type `float32`. + adjustment to the gain, per leaf based. + min_node_weight: A `Tensor` of type `float32`. + minimum avg of hessians in a node before required for the node to be considered for splitting. + logits_dimension: An `int` that is `>= 1`. + The dimension of logit, i.e., number of classes. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (node_ids, gains, feature_ids, feature_dimensions, thresholds, left_node_contribs, right_node_contribs, split_with_default_directions). + + node_ids: A `Tensor` of type `int32`. + gains: A `Tensor` of type `float32`. + feature_ids: A `Tensor` of type `int32`. + feature_dimensions: A `Tensor` of type `int32`. + thresholds: A `Tensor` of type `int32`. + left_node_contribs: A `Tensor` of type `float32`. + right_node_contribs: A `Tensor` of type `float32`. + split_with_default_directions: A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesCalculateBestFeatureSplitV2", name, node_id_range, + stats_summaries_list, split_types, candidate_feature_ids, l1, l2, + tree_complexity, min_node_weight, "logits_dimension", + logits_dimension) + _result = _BoostedTreesCalculateBestFeatureSplitV2Output._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_calculate_best_feature_split_v2_eager_fallback( + node_id_range, stats_summaries_list, split_types, + candidate_feature_ids, l1, l2, tree_complexity, min_node_weight, + logits_dimension=logits_dimension, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(stats_summaries_list, (list, tuple)): + raise TypeError( + "Expected list for 'stats_summaries_list' argument to " + "'boosted_trees_calculate_best_feature_split_v2' Op, not %r." % stats_summaries_list) + _attr_num_features = len(stats_summaries_list) + logits_dimension = _execute.make_int(logits_dimension, "logits_dimension") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesCalculateBestFeatureSplitV2", node_id_range=node_id_range, + stats_summaries_list=stats_summaries_list, + split_types=split_types, + candidate_feature_ids=candidate_feature_ids, + l1=l1, l2=l2, + tree_complexity=tree_complexity, + min_node_weight=min_node_weight, + logits_dimension=logits_dimension, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("num_features", _op._get_attr_int("num_features"), + "logits_dimension", _op._get_attr_int("logits_dimension")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BoostedTreesCalculateBestFeatureSplitV2", _inputs_flat, _attrs, _result) + _result = _BoostedTreesCalculateBestFeatureSplitV2Output._make(_result) + return _result + +BoostedTreesCalculateBestFeatureSplitV2 = tf_export("raw_ops.BoostedTreesCalculateBestFeatureSplitV2")(_ops.to_raw_op(boosted_trees_calculate_best_feature_split_v2)) + + +def boosted_trees_calculate_best_feature_split_v2_eager_fallback(node_id_range: Annotated[Any, _atypes.Int32], stats_summaries_list: Annotated[List[Any], _atypes.Float32], split_types: Annotated[Any, _atypes.String], candidate_feature_ids: Annotated[Any, _atypes.Int32], l1: Annotated[Any, _atypes.Float32], l2: Annotated[Any, _atypes.Float32], tree_complexity: Annotated[Any, _atypes.Float32], min_node_weight: Annotated[Any, _atypes.Float32], logits_dimension: int, name, ctx): + if not isinstance(stats_summaries_list, (list, tuple)): + raise TypeError( + "Expected list for 'stats_summaries_list' argument to " + "'boosted_trees_calculate_best_feature_split_v2' Op, not %r." % stats_summaries_list) + _attr_num_features = len(stats_summaries_list) + logits_dimension = _execute.make_int(logits_dimension, "logits_dimension") + node_id_range = _ops.convert_to_tensor(node_id_range, _dtypes.int32) + stats_summaries_list = _ops.convert_n_to_tensor(stats_summaries_list, _dtypes.float32) + split_types = _ops.convert_to_tensor(split_types, _dtypes.string) + candidate_feature_ids = _ops.convert_to_tensor(candidate_feature_ids, _dtypes.int32) + l1 = _ops.convert_to_tensor(l1, _dtypes.float32) + l2 = _ops.convert_to_tensor(l2, _dtypes.float32) + tree_complexity = _ops.convert_to_tensor(tree_complexity, _dtypes.float32) + min_node_weight = _ops.convert_to_tensor(min_node_weight, _dtypes.float32) + _inputs_flat = [node_id_range] + list(stats_summaries_list) + [split_types, candidate_feature_ids, l1, l2, tree_complexity, min_node_weight] + _attrs = ("num_features", _attr_num_features, "logits_dimension", + logits_dimension) + _result = _execute.execute(b"BoostedTreesCalculateBestFeatureSplitV2", 8, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BoostedTreesCalculateBestFeatureSplitV2", _inputs_flat, _attrs, _result) + _result = _BoostedTreesCalculateBestFeatureSplitV2Output._make(_result) + return _result + +_BoostedTreesCalculateBestGainsPerFeatureOutput = collections.namedtuple( + "BoostedTreesCalculateBestGainsPerFeature", + ["node_ids_list", "gains_list", "thresholds_list", "left_node_contribs_list", "right_node_contribs_list"]) + + +def boosted_trees_calculate_best_gains_per_feature(node_id_range: Annotated[Any, _atypes.Int32], stats_summary_list: Annotated[List[Any], _atypes.Float32], l1: Annotated[Any, _atypes.Float32], l2: Annotated[Any, _atypes.Float32], tree_complexity: Annotated[Any, _atypes.Float32], min_node_weight: Annotated[Any, _atypes.Float32], max_splits: int, name=None): + r"""Calculates gains for each feature and returns the best possible split information for the feature. + + The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature. + + It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split. + + In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features). + + The length of output lists are all of the same length, `num_features`. + The output shapes are compatible in a way that the first dimension of all tensors of all lists are the same and equal to the number of possible split nodes for each feature. + + Args: + node_id_range: A `Tensor` of type `int32`. + A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive). + stats_summary_list: A list of at least 1 `Tensor` objects with type `float32`. + A list of Rank 3 tensor (#shape=[max_splits, bucket, 2]) for accumulated stats summary (gradient/hessian) per node per buckets for each feature. The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used. + l1: A `Tensor` of type `float32`. + l1 regularization factor on leaf weights, per instance based. + l2: A `Tensor` of type `float32`. + l2 regularization factor on leaf weights, per instance based. + tree_complexity: A `Tensor` of type `float32`. + adjustment to the gain, per leaf based. + min_node_weight: A `Tensor` of type `float32`. + minimum avg of hessians in a node before required for the node to be considered for splitting. + max_splits: An `int` that is `>= 1`. + the number of nodes that can be split in the whole tree. Used as a dimension of output tensors. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (node_ids_list, gains_list, thresholds_list, left_node_contribs_list, right_node_contribs_list). + + node_ids_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `int32`. + gains_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `float32`. + thresholds_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `int32`. + left_node_contribs_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `float32`. + right_node_contribs_list: A list with the same length as `stats_summary_list` of `Tensor` objects with type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesCalculateBestGainsPerFeature", name, node_id_range, + stats_summary_list, l1, l2, tree_complexity, min_node_weight, + "max_splits", max_splits) + _result = _BoostedTreesCalculateBestGainsPerFeatureOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_calculate_best_gains_per_feature_eager_fallback( + node_id_range, stats_summary_list, l1, l2, tree_complexity, + min_node_weight, max_splits=max_splits, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(stats_summary_list, (list, tuple)): + raise TypeError( + "Expected list for 'stats_summary_list' argument to " + "'boosted_trees_calculate_best_gains_per_feature' Op, not %r." % stats_summary_list) + _attr_num_features = len(stats_summary_list) + max_splits = _execute.make_int(max_splits, "max_splits") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesCalculateBestGainsPerFeature", node_id_range=node_id_range, + stats_summary_list=stats_summary_list, + l1=l1, l2=l2, + tree_complexity=tree_complexity, + min_node_weight=min_node_weight, + max_splits=max_splits, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("max_splits", _op._get_attr_int("max_splits"), "num_features", + _op._get_attr_int("num_features")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BoostedTreesCalculateBestGainsPerFeature", _inputs_flat, _attrs, _result) + _result = [_result[:_attr_num_features]] + _result[_attr_num_features:] + _result = _result[:1] + [_result[1:1 + _attr_num_features]] + _result[1 + _attr_num_features:] + _result = _result[:2] + [_result[2:2 + _attr_num_features]] + _result[2 + _attr_num_features:] + _result = _result[:3] + [_result[3:3 + _attr_num_features]] + _result[3 + _attr_num_features:] + _result = _result[:4] + [_result[4:]] + _result = _BoostedTreesCalculateBestGainsPerFeatureOutput._make(_result) + return _result + +BoostedTreesCalculateBestGainsPerFeature = tf_export("raw_ops.BoostedTreesCalculateBestGainsPerFeature")(_ops.to_raw_op(boosted_trees_calculate_best_gains_per_feature)) + + +def boosted_trees_calculate_best_gains_per_feature_eager_fallback(node_id_range: Annotated[Any, _atypes.Int32], stats_summary_list: Annotated[List[Any], _atypes.Float32], l1: Annotated[Any, _atypes.Float32], l2: Annotated[Any, _atypes.Float32], tree_complexity: Annotated[Any, _atypes.Float32], min_node_weight: Annotated[Any, _atypes.Float32], max_splits: int, name, ctx): + if not isinstance(stats_summary_list, (list, tuple)): + raise TypeError( + "Expected list for 'stats_summary_list' argument to " + "'boosted_trees_calculate_best_gains_per_feature' Op, not %r." % stats_summary_list) + _attr_num_features = len(stats_summary_list) + max_splits = _execute.make_int(max_splits, "max_splits") + node_id_range = _ops.convert_to_tensor(node_id_range, _dtypes.int32) + stats_summary_list = _ops.convert_n_to_tensor(stats_summary_list, _dtypes.float32) + l1 = _ops.convert_to_tensor(l1, _dtypes.float32) + l2 = _ops.convert_to_tensor(l2, _dtypes.float32) + tree_complexity = _ops.convert_to_tensor(tree_complexity, _dtypes.float32) + min_node_weight = _ops.convert_to_tensor(min_node_weight, _dtypes.float32) + _inputs_flat = [node_id_range] + list(stats_summary_list) + [l1, l2, tree_complexity, min_node_weight] + _attrs = ("max_splits", max_splits, "num_features", _attr_num_features) + _result = _execute.execute(b"BoostedTreesCalculateBestGainsPerFeature", + _attr_num_features + _attr_num_features + + _attr_num_features + _attr_num_features + + _attr_num_features, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BoostedTreesCalculateBestGainsPerFeature", _inputs_flat, _attrs, _result) + _result = [_result[:_attr_num_features]] + _result[_attr_num_features:] + _result = _result[:1] + [_result[1:1 + _attr_num_features]] + _result[1 + _attr_num_features:] + _result = _result[:2] + [_result[2:2 + _attr_num_features]] + _result[2 + _attr_num_features:] + _result = _result[:3] + [_result[3:3 + _attr_num_features]] + _result[3 + _attr_num_features:] + _result = _result[:4] + [_result[4:]] + _result = _BoostedTreesCalculateBestGainsPerFeatureOutput._make(_result) + return _result + + +def boosted_trees_center_bias(tree_ensemble_handle: Annotated[Any, _atypes.Resource], mean_gradients: Annotated[Any, _atypes.Float32], mean_hessians: Annotated[Any, _atypes.Float32], l1: Annotated[Any, _atypes.Float32], l2: Annotated[Any, _atypes.Float32], name=None) -> Annotated[Any, _atypes.Bool]: + r"""Calculates the prior from the training data (the bias) and fills in the first node with the logits' prior. Returns a boolean indicating whether to continue centering. + + Args: + tree_ensemble_handle: A `Tensor` of type `resource`. + Handle to the tree ensemble. + mean_gradients: A `Tensor` of type `float32`. + A tensor with shape=[logits_dimension] with mean of gradients for a first node. + mean_hessians: A `Tensor` of type `float32`. + A tensor with shape=[logits_dimension] mean of hessians for a first node. + l1: A `Tensor` of type `float32`. + l1 regularization factor on leaf weights, per instance based. + l2: A `Tensor` of type `float32`. + l2 regularization factor on leaf weights, per instance based. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesCenterBias", name, tree_ensemble_handle, + mean_gradients, mean_hessians, l1, l2) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_center_bias_eager_fallback( + tree_ensemble_handle, mean_gradients, mean_hessians, l1, l2, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesCenterBias", tree_ensemble_handle=tree_ensemble_handle, + mean_gradients=mean_gradients, + mean_hessians=mean_hessians, l1=l1, l2=l2, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "BoostedTreesCenterBias", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BoostedTreesCenterBias = tf_export("raw_ops.BoostedTreesCenterBias")(_ops.to_raw_op(boosted_trees_center_bias)) + + +def boosted_trees_center_bias_eager_fallback(tree_ensemble_handle: Annotated[Any, _atypes.Resource], mean_gradients: Annotated[Any, _atypes.Float32], mean_hessians: Annotated[Any, _atypes.Float32], l1: Annotated[Any, _atypes.Float32], l2: Annotated[Any, _atypes.Float32], name, ctx) -> Annotated[Any, _atypes.Bool]: + tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource) + mean_gradients = _ops.convert_to_tensor(mean_gradients, _dtypes.float32) + mean_hessians = _ops.convert_to_tensor(mean_hessians, _dtypes.float32) + l1 = _ops.convert_to_tensor(l1, _dtypes.float32) + l2 = _ops.convert_to_tensor(l2, _dtypes.float32) + _inputs_flat = [tree_ensemble_handle, mean_gradients, mean_hessians, l1, l2] + _attrs = None + _result = _execute.execute(b"BoostedTreesCenterBias", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BoostedTreesCenterBias", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def boosted_trees_create_ensemble(tree_ensemble_handle: Annotated[Any, _atypes.Resource], stamp_token: Annotated[Any, _atypes.Int64], tree_ensemble_serialized: Annotated[Any, _atypes.String], name=None): + r"""Creates a tree ensemble model and returns a handle to it. + + Args: + tree_ensemble_handle: A `Tensor` of type `resource`. + Handle to the tree ensemble resource to be created. + stamp_token: A `Tensor` of type `int64`. + Token to use as the initial value of the resource stamp. + tree_ensemble_serialized: A `Tensor` of type `string`. + Serialized proto of the tree ensemble. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesCreateEnsemble", name, tree_ensemble_handle, + stamp_token, tree_ensemble_serialized) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_create_ensemble_eager_fallback( + tree_ensemble_handle, stamp_token, tree_ensemble_serialized, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesCreateEnsemble", tree_ensemble_handle=tree_ensemble_handle, + stamp_token=stamp_token, + tree_ensemble_serialized=tree_ensemble_serialized, + name=name) + return _op +BoostedTreesCreateEnsemble = tf_export("raw_ops.BoostedTreesCreateEnsemble")(_ops.to_raw_op(boosted_trees_create_ensemble)) + + +def boosted_trees_create_ensemble_eager_fallback(tree_ensemble_handle: Annotated[Any, _atypes.Resource], stamp_token: Annotated[Any, _atypes.Int64], tree_ensemble_serialized: Annotated[Any, _atypes.String], name, ctx): + tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource) + stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64) + tree_ensemble_serialized = _ops.convert_to_tensor(tree_ensemble_serialized, _dtypes.string) + _inputs_flat = [tree_ensemble_handle, stamp_token, tree_ensemble_serialized] + _attrs = None + _result = _execute.execute(b"BoostedTreesCreateEnsemble", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def boosted_trees_create_quantile_stream_resource(quantile_stream_resource_handle: Annotated[Any, _atypes.Resource], epsilon: Annotated[Any, _atypes.Float32], num_streams: Annotated[Any, _atypes.Int64], max_elements:int=1099511627776, name=None): + r"""Create the Resource for Quantile Streams. + + Args: + quantile_stream_resource_handle: A `Tensor` of type `resource`. + resource; Handle to quantile stream resource. + epsilon: A `Tensor` of type `float32`. + float; The required approximation error of the stream resource. + num_streams: A `Tensor` of type `int64`. + int; The number of streams managed by the resource that shares the same epsilon. + max_elements: An optional `int`. Defaults to `1099511627776`. + int; The maximum number of data points that can be fed to the stream. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesCreateQuantileStreamResource", name, + quantile_stream_resource_handle, epsilon, num_streams, "max_elements", + max_elements) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_create_quantile_stream_resource_eager_fallback( + quantile_stream_resource_handle, epsilon, num_streams, + max_elements=max_elements, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if max_elements is None: + max_elements = 1099511627776 + max_elements = _execute.make_int(max_elements, "max_elements") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesCreateQuantileStreamResource", quantile_stream_resource_handle=quantile_stream_resource_handle, + epsilon=epsilon, + num_streams=num_streams, + max_elements=max_elements, + name=name) + return _op +BoostedTreesCreateQuantileStreamResource = tf_export("raw_ops.BoostedTreesCreateQuantileStreamResource")(_ops.to_raw_op(boosted_trees_create_quantile_stream_resource)) + + +def boosted_trees_create_quantile_stream_resource_eager_fallback(quantile_stream_resource_handle: Annotated[Any, _atypes.Resource], epsilon: Annotated[Any, _atypes.Float32], num_streams: Annotated[Any, _atypes.Int64], max_elements: int, name, ctx): + if max_elements is None: + max_elements = 1099511627776 + max_elements = _execute.make_int(max_elements, "max_elements") + quantile_stream_resource_handle = _ops.convert_to_tensor(quantile_stream_resource_handle, _dtypes.resource) + epsilon = _ops.convert_to_tensor(epsilon, _dtypes.float32) + num_streams = _ops.convert_to_tensor(num_streams, _dtypes.int64) + _inputs_flat = [quantile_stream_resource_handle, epsilon, num_streams] + _attrs = ("max_elements", max_elements) + _result = _execute.execute(b"BoostedTreesCreateQuantileStreamResource", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def boosted_trees_deserialize_ensemble(tree_ensemble_handle: Annotated[Any, _atypes.Resource], stamp_token: Annotated[Any, _atypes.Int64], tree_ensemble_serialized: Annotated[Any, _atypes.String], name=None): + r"""Deserializes a serialized tree ensemble config and replaces current tree + + ensemble. + + Args: + tree_ensemble_handle: A `Tensor` of type `resource`. + Handle to the tree ensemble. + stamp_token: A `Tensor` of type `int64`. + Token to use as the new value of the resource stamp. + tree_ensemble_serialized: A `Tensor` of type `string`. + Serialized proto of the ensemble. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesDeserializeEnsemble", name, tree_ensemble_handle, + stamp_token, tree_ensemble_serialized) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_deserialize_ensemble_eager_fallback( + tree_ensemble_handle, stamp_token, tree_ensemble_serialized, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesDeserializeEnsemble", tree_ensemble_handle=tree_ensemble_handle, + stamp_token=stamp_token, + tree_ensemble_serialized=tree_ensemble_serialized, + name=name) + return _op +BoostedTreesDeserializeEnsemble = tf_export("raw_ops.BoostedTreesDeserializeEnsemble")(_ops.to_raw_op(boosted_trees_deserialize_ensemble)) + + +def boosted_trees_deserialize_ensemble_eager_fallback(tree_ensemble_handle: Annotated[Any, _atypes.Resource], stamp_token: Annotated[Any, _atypes.Int64], tree_ensemble_serialized: Annotated[Any, _atypes.String], name, ctx): + tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource) + stamp_token = _ops.convert_to_tensor(stamp_token, _dtypes.int64) + tree_ensemble_serialized = _ops.convert_to_tensor(tree_ensemble_serialized, _dtypes.string) + _inputs_flat = [tree_ensemble_handle, stamp_token, tree_ensemble_serialized] + _attrs = None + _result = _execute.execute(b"BoostedTreesDeserializeEnsemble", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def boosted_trees_ensemble_resource_handle_op(container:str="", shared_name:str="", name=None) -> Annotated[Any, _atypes.Resource]: + r"""Creates a handle to a BoostedTreesEnsembleResource + + Args: + container: An optional `string`. Defaults to `""`. + shared_name: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesEnsembleResourceHandleOp", name, "container", + container, "shared_name", shared_name) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_ensemble_resource_handle_op_eager_fallback( + container=container, shared_name=shared_name, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesEnsembleResourceHandleOp", container=container, + shared_name=shared_name, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("container", _op.get_attr("container"), "shared_name", + _op.get_attr("shared_name")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BoostedTreesEnsembleResourceHandleOp", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BoostedTreesEnsembleResourceHandleOp = tf_export("raw_ops.BoostedTreesEnsembleResourceHandleOp")(_ops.to_raw_op(boosted_trees_ensemble_resource_handle_op)) + + +def boosted_trees_ensemble_resource_handle_op_eager_fallback(container: str, shared_name: str, name, ctx) -> Annotated[Any, _atypes.Resource]: + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + _inputs_flat = [] + _attrs = ("container", container, "shared_name", shared_name) + _result = _execute.execute(b"BoostedTreesEnsembleResourceHandleOp", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BoostedTreesEnsembleResourceHandleOp", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def boosted_trees_example_debug_outputs(tree_ensemble_handle: Annotated[Any, _atypes.Resource], bucketized_features: Annotated[List[Any], _atypes.Int32], logits_dimension: int, name=None) -> Annotated[Any, _atypes.String]: + r"""Debugging/model interpretability outputs for each example. + + It traverses all the trees and computes debug metrics for individual examples, + such as getting split feature ids and logits after each split along the decision + path used to compute directional feature contributions. + + Args: + tree_ensemble_handle: A `Tensor` of type `resource`. + bucketized_features: A list of at least 1 `Tensor` objects with type `int32`. + A list of rank 1 Tensors containing bucket id for each + feature. + logits_dimension: An `int`. + scalar, dimension of the logits, to be used for constructing the protos in + examples_debug_outputs_serialized. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesExampleDebugOutputs", name, tree_ensemble_handle, + bucketized_features, "logits_dimension", logits_dimension) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_example_debug_outputs_eager_fallback( + tree_ensemble_handle, bucketized_features, + logits_dimension=logits_dimension, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(bucketized_features, (list, tuple)): + raise TypeError( + "Expected list for 'bucketized_features' argument to " + "'boosted_trees_example_debug_outputs' Op, not %r." % bucketized_features) + _attr_num_bucketized_features = len(bucketized_features) + logits_dimension = _execute.make_int(logits_dimension, "logits_dimension") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesExampleDebugOutputs", tree_ensemble_handle=tree_ensemble_handle, + bucketized_features=bucketized_features, + logits_dimension=logits_dimension, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("num_bucketized_features", + _op._get_attr_int("num_bucketized_features"), + "logits_dimension", _op._get_attr_int("logits_dimension")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BoostedTreesExampleDebugOutputs", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BoostedTreesExampleDebugOutputs = tf_export("raw_ops.BoostedTreesExampleDebugOutputs")(_ops.to_raw_op(boosted_trees_example_debug_outputs)) + + +def boosted_trees_example_debug_outputs_eager_fallback(tree_ensemble_handle: Annotated[Any, _atypes.Resource], bucketized_features: Annotated[List[Any], _atypes.Int32], logits_dimension: int, name, ctx) -> Annotated[Any, _atypes.String]: + if not isinstance(bucketized_features, (list, tuple)): + raise TypeError( + "Expected list for 'bucketized_features' argument to " + "'boosted_trees_example_debug_outputs' Op, not %r." % bucketized_features) + _attr_num_bucketized_features = len(bucketized_features) + logits_dimension = _execute.make_int(logits_dimension, "logits_dimension") + tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource) + bucketized_features = _ops.convert_n_to_tensor(bucketized_features, _dtypes.int32) + _inputs_flat = [tree_ensemble_handle] + list(bucketized_features) + _attrs = ("num_bucketized_features", _attr_num_bucketized_features, + "logits_dimension", logits_dimension) + _result = _execute.execute(b"BoostedTreesExampleDebugOutputs", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BoostedTreesExampleDebugOutputs", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def boosted_trees_flush_quantile_summaries(quantile_stream_resource_handle: Annotated[Any, _atypes.Resource], num_features: int, name=None): + r"""Flush the quantile summaries from each quantile stream resource. + + An op that outputs a list of quantile summaries of a quantile stream resource. + Each summary Tensor is rank 2, containing summaries (value, weight, min_rank, + max_rank) for a single feature. + + Args: + quantile_stream_resource_handle: A `Tensor` of type `resource`. + resource handle referring to a QuantileStreamResource. + num_features: An `int` that is `>= 0`. + name: A name for the operation (optional). + + Returns: + A list of `num_features` `Tensor` objects with type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesFlushQuantileSummaries", name, + quantile_stream_resource_handle, "num_features", num_features) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_flush_quantile_summaries_eager_fallback( + quantile_stream_resource_handle, num_features=num_features, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_features = _execute.make_int(num_features, "num_features") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesFlushQuantileSummaries", quantile_stream_resource_handle=quantile_stream_resource_handle, + num_features=num_features, + name=name) + _result = _outputs[:] + if not _result: + return _op + if _execute.must_record_gradient(): + _attrs = ("num_features", _op._get_attr_int("num_features")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BoostedTreesFlushQuantileSummaries", _inputs_flat, _attrs, _result) + return _result + +BoostedTreesFlushQuantileSummaries = tf_export("raw_ops.BoostedTreesFlushQuantileSummaries")(_ops.to_raw_op(boosted_trees_flush_quantile_summaries)) + + +def boosted_trees_flush_quantile_summaries_eager_fallback(quantile_stream_resource_handle: Annotated[Any, _atypes.Resource], num_features: int, name, ctx): + num_features = _execute.make_int(num_features, "num_features") + quantile_stream_resource_handle = _ops.convert_to_tensor(quantile_stream_resource_handle, _dtypes.resource) + _inputs_flat = [quantile_stream_resource_handle] + _attrs = ("num_features", num_features) + _result = _execute.execute(b"BoostedTreesFlushQuantileSummaries", + num_features, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BoostedTreesFlushQuantileSummaries", _inputs_flat, _attrs, _result) + return _result + +_BoostedTreesGetEnsembleStatesOutput = collections.namedtuple( + "BoostedTreesGetEnsembleStates", + ["stamp_token", "num_trees", "num_finalized_trees", "num_attempted_layers", "last_layer_nodes_range"]) + + +def boosted_trees_get_ensemble_states(tree_ensemble_handle: Annotated[Any, _atypes.Resource], name=None): + r"""Retrieves the tree ensemble resource stamp token, number of trees and growing statistics. + + Args: + tree_ensemble_handle: A `Tensor` of type `resource`. + Handle to the tree ensemble. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (stamp_token, num_trees, num_finalized_trees, num_attempted_layers, last_layer_nodes_range). + + stamp_token: A `Tensor` of type `int64`. + num_trees: A `Tensor` of type `int32`. + num_finalized_trees: A `Tensor` of type `int32`. + num_attempted_layers: A `Tensor` of type `int32`. + last_layer_nodes_range: A `Tensor` of type `int32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesGetEnsembleStates", name, tree_ensemble_handle) + _result = _BoostedTreesGetEnsembleStatesOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_get_ensemble_states_eager_fallback( + tree_ensemble_handle, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesGetEnsembleStates", tree_ensemble_handle=tree_ensemble_handle, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "BoostedTreesGetEnsembleStates", _inputs_flat, _attrs, _result) + _result = _BoostedTreesGetEnsembleStatesOutput._make(_result) + return _result + +BoostedTreesGetEnsembleStates = tf_export("raw_ops.BoostedTreesGetEnsembleStates")(_ops.to_raw_op(boosted_trees_get_ensemble_states)) + + +def boosted_trees_get_ensemble_states_eager_fallback(tree_ensemble_handle: Annotated[Any, _atypes.Resource], name, ctx): + tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource) + _inputs_flat = [tree_ensemble_handle] + _attrs = None + _result = _execute.execute(b"BoostedTreesGetEnsembleStates", 5, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BoostedTreesGetEnsembleStates", _inputs_flat, _attrs, _result) + _result = _BoostedTreesGetEnsembleStatesOutput._make(_result) + return _result + + +def boosted_trees_make_quantile_summaries(float_values: Annotated[List[Any], _atypes.Float32], example_weights: Annotated[Any, _atypes.Float32], epsilon: Annotated[Any, _atypes.Float32], name=None): + r"""Makes the summary of quantiles for the batch. + + An op that takes a list of tensors (one tensor per feature) and outputs the + quantile summaries for each tensor. + + Args: + float_values: A list of `Tensor` objects with type `float32`. + float; List of Rank 1 Tensors each containing values for a single feature. + example_weights: A `Tensor` of type `float32`. + float; Rank 1 Tensor with weights per instance. + epsilon: A `Tensor` of type `float32`. + float; The required maximum approximation error. + name: A name for the operation (optional). + + Returns: + A list with the same length as `float_values` of `Tensor` objects with type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesMakeQuantileSummaries", name, float_values, + example_weights, epsilon) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_make_quantile_summaries_eager_fallback( + float_values, example_weights, epsilon, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(float_values, (list, tuple)): + raise TypeError( + "Expected list for 'float_values' argument to " + "'boosted_trees_make_quantile_summaries' Op, not %r." % float_values) + _attr_num_features = len(float_values) + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesMakeQuantileSummaries", float_values=float_values, + example_weights=example_weights, + epsilon=epsilon, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("num_features", _op._get_attr_int("num_features")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BoostedTreesMakeQuantileSummaries", _inputs_flat, _attrs, _result) + return _result + +BoostedTreesMakeQuantileSummaries = tf_export("raw_ops.BoostedTreesMakeQuantileSummaries")(_ops.to_raw_op(boosted_trees_make_quantile_summaries)) + + +def boosted_trees_make_quantile_summaries_eager_fallback(float_values: Annotated[List[Any], _atypes.Float32], example_weights: Annotated[Any, _atypes.Float32], epsilon: Annotated[Any, _atypes.Float32], name, ctx): + if not isinstance(float_values, (list, tuple)): + raise TypeError( + "Expected list for 'float_values' argument to " + "'boosted_trees_make_quantile_summaries' Op, not %r." % float_values) + _attr_num_features = len(float_values) + float_values = _ops.convert_n_to_tensor(float_values, _dtypes.float32) + example_weights = _ops.convert_to_tensor(example_weights, _dtypes.float32) + epsilon = _ops.convert_to_tensor(epsilon, _dtypes.float32) + _inputs_flat = list(float_values) + [example_weights, epsilon] + _attrs = ("num_features", _attr_num_features) + _result = _execute.execute(b"BoostedTreesMakeQuantileSummaries", + _attr_num_features, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BoostedTreesMakeQuantileSummaries", _inputs_flat, _attrs, _result) + return _result + + +def boosted_trees_make_stats_summary(node_ids: Annotated[Any, _atypes.Int32], gradients: Annotated[Any, _atypes.Float32], hessians: Annotated[Any, _atypes.Float32], bucketized_features_list: Annotated[List[Any], _atypes.Int32], max_splits: int, num_buckets: int, name=None) -> Annotated[Any, _atypes.Float32]: + r"""Makes the summary of accumulated stats for the batch. + + The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example. + + Args: + node_ids: A `Tensor` of type `int32`. + int32 Rank 1 Tensor containing node ids, which each example falls into for the requested layer. + gradients: A `Tensor` of type `float32`. + float32; Rank 2 Tensor (shape=[#examples, 1]) for gradients. + hessians: A `Tensor` of type `float32`. + float32; Rank 2 Tensor (shape=[#examples, 1]) for hessians. + bucketized_features_list: A list of at least 1 `Tensor` objects with type `int32`. + int32 list of Rank 1 Tensors, each containing the bucketized feature (for each feature column). + max_splits: An `int` that is `>= 1`. + int; the maximum number of splits possible in the whole tree. + num_buckets: An `int` that is `>= 1`. + int; equals to the maximum possible value of bucketized feature. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesMakeStatsSummary", name, node_ids, gradients, + hessians, bucketized_features_list, "max_splits", max_splits, + "num_buckets", num_buckets) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_make_stats_summary_eager_fallback( + node_ids, gradients, hessians, bucketized_features_list, + max_splits=max_splits, num_buckets=num_buckets, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(bucketized_features_list, (list, tuple)): + raise TypeError( + "Expected list for 'bucketized_features_list' argument to " + "'boosted_trees_make_stats_summary' Op, not %r." % bucketized_features_list) + _attr_num_features = len(bucketized_features_list) + max_splits = _execute.make_int(max_splits, "max_splits") + num_buckets = _execute.make_int(num_buckets, "num_buckets") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesMakeStatsSummary", node_ids=node_ids, + gradients=gradients, + hessians=hessians, + bucketized_features_list=bucketized_features_list, + max_splits=max_splits, + num_buckets=num_buckets, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("max_splits", _op._get_attr_int("max_splits"), "num_buckets", + _op._get_attr_int("num_buckets"), "num_features", + _op._get_attr_int("num_features")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BoostedTreesMakeStatsSummary", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BoostedTreesMakeStatsSummary = tf_export("raw_ops.BoostedTreesMakeStatsSummary")(_ops.to_raw_op(boosted_trees_make_stats_summary)) + + +def boosted_trees_make_stats_summary_eager_fallback(node_ids: Annotated[Any, _atypes.Int32], gradients: Annotated[Any, _atypes.Float32], hessians: Annotated[Any, _atypes.Float32], bucketized_features_list: Annotated[List[Any], _atypes.Int32], max_splits: int, num_buckets: int, name, ctx) -> Annotated[Any, _atypes.Float32]: + if not isinstance(bucketized_features_list, (list, tuple)): + raise TypeError( + "Expected list for 'bucketized_features_list' argument to " + "'boosted_trees_make_stats_summary' Op, not %r." % bucketized_features_list) + _attr_num_features = len(bucketized_features_list) + max_splits = _execute.make_int(max_splits, "max_splits") + num_buckets = _execute.make_int(num_buckets, "num_buckets") + node_ids = _ops.convert_to_tensor(node_ids, _dtypes.int32) + gradients = _ops.convert_to_tensor(gradients, _dtypes.float32) + hessians = _ops.convert_to_tensor(hessians, _dtypes.float32) + bucketized_features_list = _ops.convert_n_to_tensor(bucketized_features_list, _dtypes.int32) + _inputs_flat = [node_ids, gradients, hessians] + list(bucketized_features_list) + _attrs = ("max_splits", max_splits, "num_buckets", num_buckets, + "num_features", _attr_num_features) + _result = _execute.execute(b"BoostedTreesMakeStatsSummary", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BoostedTreesMakeStatsSummary", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def boosted_trees_predict(tree_ensemble_handle: Annotated[Any, _atypes.Resource], bucketized_features: Annotated[List[Any], _atypes.Int32], logits_dimension: int, name=None) -> Annotated[Any, _atypes.Float32]: + r"""Runs multiple additive regression ensemble predictors on input instances and + + computes the logits. It is designed to be used during prediction. + It traverses all the trees and calculates the final score for each instance. + + Args: + tree_ensemble_handle: A `Tensor` of type `resource`. + bucketized_features: A list of at least 1 `Tensor` objects with type `int32`. + A list of rank 1 Tensors containing bucket id for each + feature. + logits_dimension: An `int`. + scalar, dimension of the logits, to be used for partial logits + shape. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesPredict", name, tree_ensemble_handle, + bucketized_features, "logits_dimension", logits_dimension) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_predict_eager_fallback( + tree_ensemble_handle, bucketized_features, + logits_dimension=logits_dimension, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(bucketized_features, (list, tuple)): + raise TypeError( + "Expected list for 'bucketized_features' argument to " + "'boosted_trees_predict' Op, not %r." % bucketized_features) + _attr_num_bucketized_features = len(bucketized_features) + logits_dimension = _execute.make_int(logits_dimension, "logits_dimension") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesPredict", tree_ensemble_handle=tree_ensemble_handle, + bucketized_features=bucketized_features, + logits_dimension=logits_dimension, name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("num_bucketized_features", + _op._get_attr_int("num_bucketized_features"), + "logits_dimension", _op._get_attr_int("logits_dimension")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BoostedTreesPredict", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BoostedTreesPredict = tf_export("raw_ops.BoostedTreesPredict")(_ops.to_raw_op(boosted_trees_predict)) + + +def boosted_trees_predict_eager_fallback(tree_ensemble_handle: Annotated[Any, _atypes.Resource], bucketized_features: Annotated[List[Any], _atypes.Int32], logits_dimension: int, name, ctx) -> Annotated[Any, _atypes.Float32]: + if not isinstance(bucketized_features, (list, tuple)): + raise TypeError( + "Expected list for 'bucketized_features' argument to " + "'boosted_trees_predict' Op, not %r." % bucketized_features) + _attr_num_bucketized_features = len(bucketized_features) + logits_dimension = _execute.make_int(logits_dimension, "logits_dimension") + tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource) + bucketized_features = _ops.convert_n_to_tensor(bucketized_features, _dtypes.int32) + _inputs_flat = [tree_ensemble_handle] + list(bucketized_features) + _attrs = ("num_bucketized_features", _attr_num_bucketized_features, + "logits_dimension", logits_dimension) + _result = _execute.execute(b"BoostedTreesPredict", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BoostedTreesPredict", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def boosted_trees_quantile_stream_resource_add_summaries(quantile_stream_resource_handle: Annotated[Any, _atypes.Resource], summaries: Annotated[List[Any], _atypes.Float32], name=None): + r"""Add the quantile summaries to each quantile stream resource. + + An op that adds a list of quantile summaries to a quantile stream resource. Each + summary Tensor is rank 2, containing summaries (value, weight, min_rank, max_rank) + for a single feature. + + Args: + quantile_stream_resource_handle: A `Tensor` of type `resource`. + resource handle referring to a QuantileStreamResource. + summaries: A list of `Tensor` objects with type `float32`. + string; List of Rank 2 Tensor each containing the summaries for a single feature. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesQuantileStreamResourceAddSummaries", name, + quantile_stream_resource_handle, summaries) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_quantile_stream_resource_add_summaries_eager_fallback( + quantile_stream_resource_handle, summaries, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(summaries, (list, tuple)): + raise TypeError( + "Expected list for 'summaries' argument to " + "'boosted_trees_quantile_stream_resource_add_summaries' Op, not %r." % summaries) + _attr_num_features = len(summaries) + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesQuantileStreamResourceAddSummaries", quantile_stream_resource_handle=quantile_stream_resource_handle, + summaries=summaries, + name=name) + return _op +BoostedTreesQuantileStreamResourceAddSummaries = tf_export("raw_ops.BoostedTreesQuantileStreamResourceAddSummaries")(_ops.to_raw_op(boosted_trees_quantile_stream_resource_add_summaries)) + + +def boosted_trees_quantile_stream_resource_add_summaries_eager_fallback(quantile_stream_resource_handle: Annotated[Any, _atypes.Resource], summaries: Annotated[List[Any], _atypes.Float32], name, ctx): + if not isinstance(summaries, (list, tuple)): + raise TypeError( + "Expected list for 'summaries' argument to " + "'boosted_trees_quantile_stream_resource_add_summaries' Op, not %r." % summaries) + _attr_num_features = len(summaries) + quantile_stream_resource_handle = _ops.convert_to_tensor(quantile_stream_resource_handle, _dtypes.resource) + summaries = _ops.convert_n_to_tensor(summaries, _dtypes.float32) + _inputs_flat = [quantile_stream_resource_handle] + list(summaries) + _attrs = ("num_features", _attr_num_features) + _result = _execute.execute(b"BoostedTreesQuantileStreamResourceAddSummaries", + 0, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def boosted_trees_quantile_stream_resource_deserialize(quantile_stream_resource_handle: Annotated[Any, _atypes.Resource], bucket_boundaries: Annotated[List[Any], _atypes.Float32], name=None): + r"""Deserialize bucket boundaries and ready flag into current QuantileAccumulator. + + An op that deserializes bucket boundaries and are boundaries ready flag into current QuantileAccumulator. + + Args: + quantile_stream_resource_handle: A `Tensor` of type `resource`. + resource handle referring to a QuantileStreamResource. + bucket_boundaries: A list of at least 1 `Tensor` objects with type `float32`. + float; List of Rank 1 Tensors each containing the bucket boundaries for a feature. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesQuantileStreamResourceDeserialize", name, + quantile_stream_resource_handle, bucket_boundaries) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_quantile_stream_resource_deserialize_eager_fallback( + quantile_stream_resource_handle, bucket_boundaries, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(bucket_boundaries, (list, tuple)): + raise TypeError( + "Expected list for 'bucket_boundaries' argument to " + "'boosted_trees_quantile_stream_resource_deserialize' Op, not %r." % bucket_boundaries) + _attr_num_streams = len(bucket_boundaries) + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesQuantileStreamResourceDeserialize", quantile_stream_resource_handle=quantile_stream_resource_handle, + bucket_boundaries=bucket_boundaries, + name=name) + return _op +BoostedTreesQuantileStreamResourceDeserialize = tf_export("raw_ops.BoostedTreesQuantileStreamResourceDeserialize")(_ops.to_raw_op(boosted_trees_quantile_stream_resource_deserialize)) + + +def boosted_trees_quantile_stream_resource_deserialize_eager_fallback(quantile_stream_resource_handle: Annotated[Any, _atypes.Resource], bucket_boundaries: Annotated[List[Any], _atypes.Float32], name, ctx): + if not isinstance(bucket_boundaries, (list, tuple)): + raise TypeError( + "Expected list for 'bucket_boundaries' argument to " + "'boosted_trees_quantile_stream_resource_deserialize' Op, not %r." % bucket_boundaries) + _attr_num_streams = len(bucket_boundaries) + quantile_stream_resource_handle = _ops.convert_to_tensor(quantile_stream_resource_handle, _dtypes.resource) + bucket_boundaries = _ops.convert_n_to_tensor(bucket_boundaries, _dtypes.float32) + _inputs_flat = [quantile_stream_resource_handle] + list(bucket_boundaries) + _attrs = ("num_streams", _attr_num_streams) + _result = _execute.execute(b"BoostedTreesQuantileStreamResourceDeserialize", + 0, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def boosted_trees_quantile_stream_resource_flush(quantile_stream_resource_handle: Annotated[Any, _atypes.Resource], num_buckets: Annotated[Any, _atypes.Int64], generate_quantiles:bool=False, name=None): + r"""Flush the summaries for a quantile stream resource. + + An op that flushes the summaries for a quantile stream resource. + + Args: + quantile_stream_resource_handle: A `Tensor` of type `resource`. + resource handle referring to a QuantileStreamResource. + num_buckets: A `Tensor` of type `int64`. + int; approximate number of buckets unless using generate_quantiles. + generate_quantiles: An optional `bool`. Defaults to `False`. + bool; If True, the output will be the num_quantiles for each stream where the ith + entry is the ith quantile of the input with an approximation error of epsilon. + Duplicate values may be present. + If False, the output will be the points in the histogram that we got which roughly + translates to 1/epsilon boundaries and without any duplicates. + Default to False. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesQuantileStreamResourceFlush", name, + quantile_stream_resource_handle, num_buckets, "generate_quantiles", + generate_quantiles) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_quantile_stream_resource_flush_eager_fallback( + quantile_stream_resource_handle, num_buckets, + generate_quantiles=generate_quantiles, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if generate_quantiles is None: + generate_quantiles = False + generate_quantiles = _execute.make_bool(generate_quantiles, "generate_quantiles") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesQuantileStreamResourceFlush", quantile_stream_resource_handle=quantile_stream_resource_handle, + num_buckets=num_buckets, + generate_quantiles=generate_quantiles, + name=name) + return _op +BoostedTreesQuantileStreamResourceFlush = tf_export("raw_ops.BoostedTreesQuantileStreamResourceFlush")(_ops.to_raw_op(boosted_trees_quantile_stream_resource_flush)) + + +def boosted_trees_quantile_stream_resource_flush_eager_fallback(quantile_stream_resource_handle: Annotated[Any, _atypes.Resource], num_buckets: Annotated[Any, _atypes.Int64], generate_quantiles: bool, name, ctx): + if generate_quantiles is None: + generate_quantiles = False + generate_quantiles = _execute.make_bool(generate_quantiles, "generate_quantiles") + quantile_stream_resource_handle = _ops.convert_to_tensor(quantile_stream_resource_handle, _dtypes.resource) + num_buckets = _ops.convert_to_tensor(num_buckets, _dtypes.int64) + _inputs_flat = [quantile_stream_resource_handle, num_buckets] + _attrs = ("generate_quantiles", generate_quantiles) + _result = _execute.execute(b"BoostedTreesQuantileStreamResourceFlush", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def boosted_trees_quantile_stream_resource_get_bucket_boundaries(quantile_stream_resource_handle: Annotated[Any, _atypes.Resource], num_features: int, name=None): + r"""Generate the bucket boundaries for each feature based on accumulated summaries. + + An op that returns a list of float tensors for a quantile stream resource. Each + tensor is Rank 1 containing bucket boundaries for a single feature. + + Args: + quantile_stream_resource_handle: A `Tensor` of type `resource`. + resource handle referring to a QuantileStreamResource. + num_features: An `int` that is `>= 0`. + inferred int; number of features to get bucket boundaries for. + name: A name for the operation (optional). + + Returns: + A list of `num_features` `Tensor` objects with type `float32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesQuantileStreamResourceGetBucketBoundaries", name, + quantile_stream_resource_handle, "num_features", num_features) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_quantile_stream_resource_get_bucket_boundaries_eager_fallback( + quantile_stream_resource_handle, num_features=num_features, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + num_features = _execute.make_int(num_features, "num_features") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesQuantileStreamResourceGetBucketBoundaries", quantile_stream_resource_handle=quantile_stream_resource_handle, + num_features=num_features, + name=name) + _result = _outputs[:] + if not _result: + return _op + if _execute.must_record_gradient(): + _attrs = ("num_features", _op._get_attr_int("num_features")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BoostedTreesQuantileStreamResourceGetBucketBoundaries", _inputs_flat, _attrs, _result) + return _result + +BoostedTreesQuantileStreamResourceGetBucketBoundaries = tf_export("raw_ops.BoostedTreesQuantileStreamResourceGetBucketBoundaries")(_ops.to_raw_op(boosted_trees_quantile_stream_resource_get_bucket_boundaries)) + + +def boosted_trees_quantile_stream_resource_get_bucket_boundaries_eager_fallback(quantile_stream_resource_handle: Annotated[Any, _atypes.Resource], num_features: int, name, ctx): + num_features = _execute.make_int(num_features, "num_features") + quantile_stream_resource_handle = _ops.convert_to_tensor(quantile_stream_resource_handle, _dtypes.resource) + _inputs_flat = [quantile_stream_resource_handle] + _attrs = ("num_features", num_features) + _result = _execute.execute(b"BoostedTreesQuantileStreamResourceGetBucketBoundaries", + num_features, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BoostedTreesQuantileStreamResourceGetBucketBoundaries", _inputs_flat, _attrs, _result) + return _result + + +def boosted_trees_quantile_stream_resource_handle_op(container:str="", shared_name:str="", name=None) -> Annotated[Any, _atypes.Resource]: + r"""Creates a handle to a BoostedTreesQuantileStreamResource. + + Args: + container: An optional `string`. Defaults to `""`. + shared_name: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesQuantileStreamResourceHandleOp", name, "container", + container, "shared_name", shared_name) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_quantile_stream_resource_handle_op_eager_fallback( + container=container, shared_name=shared_name, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesQuantileStreamResourceHandleOp", container=container, + shared_name=shared_name, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("container", _op.get_attr("container"), "shared_name", + _op.get_attr("shared_name")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BoostedTreesQuantileStreamResourceHandleOp", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +BoostedTreesQuantileStreamResourceHandleOp = tf_export("raw_ops.BoostedTreesQuantileStreamResourceHandleOp")(_ops.to_raw_op(boosted_trees_quantile_stream_resource_handle_op)) + + +def boosted_trees_quantile_stream_resource_handle_op_eager_fallback(container: str, shared_name: str, name, ctx) -> Annotated[Any, _atypes.Resource]: + if container is None: + container = "" + container = _execute.make_str(container, "container") + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + _inputs_flat = [] + _attrs = ("container", container, "shared_name", shared_name) + _result = _execute.execute(b"BoostedTreesQuantileStreamResourceHandleOp", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BoostedTreesQuantileStreamResourceHandleOp", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +_BoostedTreesSerializeEnsembleOutput = collections.namedtuple( + "BoostedTreesSerializeEnsemble", + ["stamp_token", "tree_ensemble_serialized"]) + + +def boosted_trees_serialize_ensemble(tree_ensemble_handle: Annotated[Any, _atypes.Resource], name=None): + r"""Serializes the tree ensemble to a proto. + + Args: + tree_ensemble_handle: A `Tensor` of type `resource`. + Handle to the tree ensemble. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (stamp_token, tree_ensemble_serialized). + + stamp_token: A `Tensor` of type `int64`. + tree_ensemble_serialized: A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesSerializeEnsemble", name, tree_ensemble_handle) + _result = _BoostedTreesSerializeEnsembleOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_serialize_ensemble_eager_fallback( + tree_ensemble_handle, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesSerializeEnsemble", tree_ensemble_handle=tree_ensemble_handle, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "BoostedTreesSerializeEnsemble", _inputs_flat, _attrs, _result) + _result = _BoostedTreesSerializeEnsembleOutput._make(_result) + return _result + +BoostedTreesSerializeEnsemble = tf_export("raw_ops.BoostedTreesSerializeEnsemble")(_ops.to_raw_op(boosted_trees_serialize_ensemble)) + + +def boosted_trees_serialize_ensemble_eager_fallback(tree_ensemble_handle: Annotated[Any, _atypes.Resource], name, ctx): + tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource) + _inputs_flat = [tree_ensemble_handle] + _attrs = None + _result = _execute.execute(b"BoostedTreesSerializeEnsemble", 2, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BoostedTreesSerializeEnsemble", _inputs_flat, _attrs, _result) + _result = _BoostedTreesSerializeEnsembleOutput._make(_result) + return _result + +_BoostedTreesSparseAggregateStatsOutput = collections.namedtuple( + "BoostedTreesSparseAggregateStats", + ["stats_summary_indices", "stats_summary_values", "stats_summary_shape"]) + + +def boosted_trees_sparse_aggregate_stats(node_ids: Annotated[Any, _atypes.Int32], gradients: Annotated[Any, _atypes.Float32], hessians: Annotated[Any, _atypes.Float32], feature_indices: Annotated[Any, _atypes.Int32], feature_values: Annotated[Any, _atypes.Int32], feature_shape: Annotated[Any, _atypes.Int32], max_splits: int, num_buckets: int, name=None): + r"""Aggregates the summary of accumulated stats for the batch. + + The summary stats contains gradients and hessians accumulated for each node, bucket and dimension id. + + Args: + node_ids: A `Tensor` of type `int32`. + int32; Rank 1 Tensor containing node ids for each example, shape [batch_size]. + gradients: A `Tensor` of type `float32`. + float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example. + hessians: A `Tensor` of type `float32`. + float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example. + feature_indices: A `Tensor` of type `int32`. + int32; Rank 2 indices of feature sparse Tensors (shape=[number of sparse entries, 2]). + Number of sparse entries across all instances from the batch. The first value is + the index of the instance, the second is dimension of the feature. The second axis + can only have 2 values, i.e., the input dense version of Tensor can only be matrix. + feature_values: A `Tensor` of type `int32`. + int32; Rank 1 values of feature sparse Tensors (shape=[number of sparse entries]). + Number of sparse entries across all instances from the batch. The first value is + the index of the instance, the second is dimension of the feature. + feature_shape: A `Tensor` of type `int32`. + int32; Rank 1 dense shape of feature sparse Tensors (shape=[2]). + The first axis can only have 2 values, [batch_size, feature_dimension]. + max_splits: An `int` that is `>= 1`. + int; the maximum number of splits possible in the whole tree. + num_buckets: An `int` that is `>= 1`. + int; equals to the maximum possible value of bucketized feature + 1. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (stats_summary_indices, stats_summary_values, stats_summary_shape). + + stats_summary_indices: A `Tensor` of type `int32`. + stats_summary_values: A `Tensor` of type `float32`. + stats_summary_shape: A `Tensor` of type `int32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesSparseAggregateStats", name, node_ids, gradients, + hessians, feature_indices, feature_values, feature_shape, + "max_splits", max_splits, "num_buckets", num_buckets) + _result = _BoostedTreesSparseAggregateStatsOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_sparse_aggregate_stats_eager_fallback( + node_ids, gradients, hessians, feature_indices, feature_values, + feature_shape, max_splits=max_splits, num_buckets=num_buckets, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + max_splits = _execute.make_int(max_splits, "max_splits") + num_buckets = _execute.make_int(num_buckets, "num_buckets") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesSparseAggregateStats", node_ids=node_ids, + gradients=gradients, + hessians=hessians, + feature_indices=feature_indices, + feature_values=feature_values, + feature_shape=feature_shape, + max_splits=max_splits, + num_buckets=num_buckets, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("max_splits", _op._get_attr_int("max_splits"), "num_buckets", + _op._get_attr_int("num_buckets")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BoostedTreesSparseAggregateStats", _inputs_flat, _attrs, _result) + _result = _BoostedTreesSparseAggregateStatsOutput._make(_result) + return _result + +BoostedTreesSparseAggregateStats = tf_export("raw_ops.BoostedTreesSparseAggregateStats")(_ops.to_raw_op(boosted_trees_sparse_aggregate_stats)) + + +def boosted_trees_sparse_aggregate_stats_eager_fallback(node_ids: Annotated[Any, _atypes.Int32], gradients: Annotated[Any, _atypes.Float32], hessians: Annotated[Any, _atypes.Float32], feature_indices: Annotated[Any, _atypes.Int32], feature_values: Annotated[Any, _atypes.Int32], feature_shape: Annotated[Any, _atypes.Int32], max_splits: int, num_buckets: int, name, ctx): + max_splits = _execute.make_int(max_splits, "max_splits") + num_buckets = _execute.make_int(num_buckets, "num_buckets") + node_ids = _ops.convert_to_tensor(node_ids, _dtypes.int32) + gradients = _ops.convert_to_tensor(gradients, _dtypes.float32) + hessians = _ops.convert_to_tensor(hessians, _dtypes.float32) + feature_indices = _ops.convert_to_tensor(feature_indices, _dtypes.int32) + feature_values = _ops.convert_to_tensor(feature_values, _dtypes.int32) + feature_shape = _ops.convert_to_tensor(feature_shape, _dtypes.int32) + _inputs_flat = [node_ids, gradients, hessians, feature_indices, feature_values, feature_shape] + _attrs = ("max_splits", max_splits, "num_buckets", num_buckets) + _result = _execute.execute(b"BoostedTreesSparseAggregateStats", 3, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BoostedTreesSparseAggregateStats", _inputs_flat, _attrs, _result) + _result = _BoostedTreesSparseAggregateStatsOutput._make(_result) + return _result + +_BoostedTreesSparseCalculateBestFeatureSplitOutput = collections.namedtuple( + "BoostedTreesSparseCalculateBestFeatureSplit", + ["node_ids", "gains", "feature_dimensions", "thresholds", "left_node_contribs", "right_node_contribs", "split_with_default_directions"]) + + +def boosted_trees_sparse_calculate_best_feature_split(node_id_range: Annotated[Any, _atypes.Int32], stats_summary_indices: Annotated[Any, _atypes.Int32], stats_summary_values: Annotated[Any, _atypes.Float32], stats_summary_shape: Annotated[Any, _atypes.Int32], l1: Annotated[Any, _atypes.Float32], l2: Annotated[Any, _atypes.Float32], tree_complexity: Annotated[Any, _atypes.Float32], min_node_weight: Annotated[Any, _atypes.Float32], logits_dimension: int, split_type:str="inequality", name=None): + r"""Calculates gains for each feature and returns the best possible split information for the feature. + + The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature. + + It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split. + + In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features). + + The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature. + + Args: + node_id_range: A `Tensor` of type `int32`. + A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive). + stats_summary_indices: A `Tensor` of type `int32`. + A Rank 2 int64 tensor of dense shape [N, 4] (N specifies the number of non-zero values) for accumulated stats summary (gradient/hessian) per node per bucket for each feature. The second dimension contains node id, feature dimension, bucket id, and stats dim. + stats dim is the sum of logits dimension and hessian dimension, hessian dimension can either be logits dimension if diagonal hessian is used, or logits dimension^2 if full hessian is used. + stats_summary_values: A `Tensor` of type `float32`. + A Rank 1 float tensor of dense shape [N] (N specifies the number of non-zero values), which supplies the values for each element in summary_indices. + stats_summary_shape: A `Tensor` of type `int32`. + A Rank 1 float tensor of dense shape [4], which specifies the dense shape of the sparse tensor, which is [num tree nodes, feature dimensions, num buckets, stats dim]. + l1: A `Tensor` of type `float32`. + l1 regularization factor on leaf weights, per instance based. + l2: A `Tensor` of type `float32`. + l2 regularization factor on leaf weights, per instance based. + tree_complexity: A `Tensor` of type `float32`. + adjustment to the gain, per leaf based. + min_node_weight: A `Tensor` of type `float32`. + minimum avg of hessians in a node before required for the node to be considered for splitting. + logits_dimension: An `int` that is `>= 1`. + The dimension of logit, i.e., number of classes. + split_type: An optional `string` from: `"inequality"`. Defaults to `"inequality"`. + A string indicating if this Op should perform inequality split or equality split. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (node_ids, gains, feature_dimensions, thresholds, left_node_contribs, right_node_contribs, split_with_default_directions). + + node_ids: A `Tensor` of type `int32`. + gains: A `Tensor` of type `float32`. + feature_dimensions: A `Tensor` of type `int32`. + thresholds: A `Tensor` of type `int32`. + left_node_contribs: A `Tensor` of type `float32`. + right_node_contribs: A `Tensor` of type `float32`. + split_with_default_directions: A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesSparseCalculateBestFeatureSplit", name, + node_id_range, stats_summary_indices, stats_summary_values, + stats_summary_shape, l1, l2, tree_complexity, min_node_weight, + "logits_dimension", logits_dimension, "split_type", split_type) + _result = _BoostedTreesSparseCalculateBestFeatureSplitOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_sparse_calculate_best_feature_split_eager_fallback( + node_id_range, stats_summary_indices, stats_summary_values, + stats_summary_shape, l1, l2, tree_complexity, min_node_weight, + logits_dimension=logits_dimension, split_type=split_type, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + logits_dimension = _execute.make_int(logits_dimension, "logits_dimension") + if split_type is None: + split_type = "inequality" + split_type = _execute.make_str(split_type, "split_type") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesSparseCalculateBestFeatureSplit", node_id_range=node_id_range, + stats_summary_indices=stats_summary_indices, + stats_summary_values=stats_summary_values, + stats_summary_shape=stats_summary_shape, + l1=l1, l2=l2, + tree_complexity=tree_complexity, + min_node_weight=min_node_weight, + logits_dimension=logits_dimension, + split_type=split_type, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("logits_dimension", _op._get_attr_int("logits_dimension"), + "split_type", _op.get_attr("split_type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BoostedTreesSparseCalculateBestFeatureSplit", _inputs_flat, _attrs, _result) + _result = _BoostedTreesSparseCalculateBestFeatureSplitOutput._make(_result) + return _result + +BoostedTreesSparseCalculateBestFeatureSplit = tf_export("raw_ops.BoostedTreesSparseCalculateBestFeatureSplit")(_ops.to_raw_op(boosted_trees_sparse_calculate_best_feature_split)) + + +def boosted_trees_sparse_calculate_best_feature_split_eager_fallback(node_id_range: Annotated[Any, _atypes.Int32], stats_summary_indices: Annotated[Any, _atypes.Int32], stats_summary_values: Annotated[Any, _atypes.Float32], stats_summary_shape: Annotated[Any, _atypes.Int32], l1: Annotated[Any, _atypes.Float32], l2: Annotated[Any, _atypes.Float32], tree_complexity: Annotated[Any, _atypes.Float32], min_node_weight: Annotated[Any, _atypes.Float32], logits_dimension: int, split_type: str, name, ctx): + logits_dimension = _execute.make_int(logits_dimension, "logits_dimension") + if split_type is None: + split_type = "inequality" + split_type = _execute.make_str(split_type, "split_type") + node_id_range = _ops.convert_to_tensor(node_id_range, _dtypes.int32) + stats_summary_indices = _ops.convert_to_tensor(stats_summary_indices, _dtypes.int32) + stats_summary_values = _ops.convert_to_tensor(stats_summary_values, _dtypes.float32) + stats_summary_shape = _ops.convert_to_tensor(stats_summary_shape, _dtypes.int32) + l1 = _ops.convert_to_tensor(l1, _dtypes.float32) + l2 = _ops.convert_to_tensor(l2, _dtypes.float32) + tree_complexity = _ops.convert_to_tensor(tree_complexity, _dtypes.float32) + min_node_weight = _ops.convert_to_tensor(min_node_weight, _dtypes.float32) + _inputs_flat = [node_id_range, stats_summary_indices, stats_summary_values, stats_summary_shape, l1, l2, tree_complexity, min_node_weight] + _attrs = ("logits_dimension", logits_dimension, "split_type", split_type) + _result = _execute.execute(b"BoostedTreesSparseCalculateBestFeatureSplit", + 7, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BoostedTreesSparseCalculateBestFeatureSplit", _inputs_flat, _attrs, _result) + _result = _BoostedTreesSparseCalculateBestFeatureSplitOutput._make(_result) + return _result + +_BoostedTreesTrainingPredictOutput = collections.namedtuple( + "BoostedTreesTrainingPredict", + ["partial_logits", "tree_ids", "node_ids"]) + + +def boosted_trees_training_predict(tree_ensemble_handle: Annotated[Any, _atypes.Resource], cached_tree_ids: Annotated[Any, _atypes.Int32], cached_node_ids: Annotated[Any, _atypes.Int32], bucketized_features: Annotated[List[Any], _atypes.Int32], logits_dimension: int, name=None): + r"""Runs multiple additive regression ensemble predictors on input instances and + + computes the update to cached logits. It is designed to be used during training. + It traverses the trees starting from cached tree id and cached node id and + calculates the updates to be pushed to the cache. + + Args: + tree_ensemble_handle: A `Tensor` of type `resource`. + cached_tree_ids: A `Tensor` of type `int32`. + Rank 1 Tensor containing cached tree ids which is the starting + tree of prediction. + cached_node_ids: A `Tensor` of type `int32`. + Rank 1 Tensor containing cached node id which is the starting + node of prediction. + bucketized_features: A list of at least 1 `Tensor` objects with type `int32`. + A list of rank 1 Tensors containing bucket id for each + feature. + logits_dimension: An `int`. + scalar, dimension of the logits, to be used for partial logits + shape. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (partial_logits, tree_ids, node_ids). + + partial_logits: A `Tensor` of type `float32`. + tree_ids: A `Tensor` of type `int32`. + node_ids: A `Tensor` of type `int32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesTrainingPredict", name, tree_ensemble_handle, + cached_tree_ids, cached_node_ids, bucketized_features, + "logits_dimension", logits_dimension) + _result = _BoostedTreesTrainingPredictOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_training_predict_eager_fallback( + tree_ensemble_handle, cached_tree_ids, cached_node_ids, + bucketized_features, logits_dimension=logits_dimension, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(bucketized_features, (list, tuple)): + raise TypeError( + "Expected list for 'bucketized_features' argument to " + "'boosted_trees_training_predict' Op, not %r." % bucketized_features) + _attr_num_bucketized_features = len(bucketized_features) + logits_dimension = _execute.make_int(logits_dimension, "logits_dimension") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesTrainingPredict", tree_ensemble_handle=tree_ensemble_handle, + cached_tree_ids=cached_tree_ids, + cached_node_ids=cached_node_ids, + bucketized_features=bucketized_features, + logits_dimension=logits_dimension, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("num_bucketized_features", + _op._get_attr_int("num_bucketized_features"), + "logits_dimension", _op._get_attr_int("logits_dimension")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "BoostedTreesTrainingPredict", _inputs_flat, _attrs, _result) + _result = _BoostedTreesTrainingPredictOutput._make(_result) + return _result + +BoostedTreesTrainingPredict = tf_export("raw_ops.BoostedTreesTrainingPredict")(_ops.to_raw_op(boosted_trees_training_predict)) + + +def boosted_trees_training_predict_eager_fallback(tree_ensemble_handle: Annotated[Any, _atypes.Resource], cached_tree_ids: Annotated[Any, _atypes.Int32], cached_node_ids: Annotated[Any, _atypes.Int32], bucketized_features: Annotated[List[Any], _atypes.Int32], logits_dimension: int, name, ctx): + if not isinstance(bucketized_features, (list, tuple)): + raise TypeError( + "Expected list for 'bucketized_features' argument to " + "'boosted_trees_training_predict' Op, not %r." % bucketized_features) + _attr_num_bucketized_features = len(bucketized_features) + logits_dimension = _execute.make_int(logits_dimension, "logits_dimension") + tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource) + cached_tree_ids = _ops.convert_to_tensor(cached_tree_ids, _dtypes.int32) + cached_node_ids = _ops.convert_to_tensor(cached_node_ids, _dtypes.int32) + bucketized_features = _ops.convert_n_to_tensor(bucketized_features, _dtypes.int32) + _inputs_flat = [tree_ensemble_handle, cached_tree_ids, cached_node_ids] + list(bucketized_features) + _attrs = ("num_bucketized_features", _attr_num_bucketized_features, + "logits_dimension", logits_dimension) + _result = _execute.execute(b"BoostedTreesTrainingPredict", 3, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "BoostedTreesTrainingPredict", _inputs_flat, _attrs, _result) + _result = _BoostedTreesTrainingPredictOutput._make(_result) + return _result + + +def boosted_trees_update_ensemble(tree_ensemble_handle: Annotated[Any, _atypes.Resource], feature_ids: Annotated[Any, _atypes.Int32], node_ids: Annotated[List[Any], _atypes.Int32], gains: Annotated[List[Any], _atypes.Float32], thresholds: Annotated[List[Any], _atypes.Int32], left_node_contribs: Annotated[List[Any], _atypes.Float32], right_node_contribs: Annotated[List[Any], _atypes.Float32], max_depth: Annotated[Any, _atypes.Int32], learning_rate: Annotated[Any, _atypes.Float32], pruning_mode: int, name=None): + r"""Updates the tree ensemble by either adding a layer to the last tree being grown + + or by starting a new tree. + + Args: + tree_ensemble_handle: A `Tensor` of type `resource`. + Handle to the ensemble variable. + feature_ids: A `Tensor` of type `int32`. + Rank 1 tensor with ids for each feature. This is the real id of + the feature that will be used in the split. + node_ids: A list of `Tensor` objects with type `int32`. + List of rank 1 tensors representing the nodes for which this feature + has a split. + gains: A list with the same length as `node_ids` of `Tensor` objects with type `float32`. + List of rank 1 tensors representing the gains for each of the feature's + split. + thresholds: A list with the same length as `node_ids` of `Tensor` objects with type `int32`. + List of rank 1 tensors representing the thesholds for each of the + feature's split. + left_node_contribs: A list with the same length as `node_ids` of `Tensor` objects with type `float32`. + List of rank 2 tensors with left leaf contribs for each of + the feature's splits. Will be added to the previous node values to constitute + the values of the left nodes. + right_node_contribs: A list with the same length as `node_ids` of `Tensor` objects with type `float32`. + List of rank 2 tensors with right leaf contribs for each + of the feature's splits. Will be added to the previous node values to constitute + the values of the right nodes. + max_depth: A `Tensor` of type `int32`. Max depth of the tree to build. + learning_rate: A `Tensor` of type `float32`. + shrinkage const for each new tree. + pruning_mode: An `int` that is `>= 0`. + 0-No pruning, 1-Pre-pruning, 2-Post-pruning. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesUpdateEnsemble", name, tree_ensemble_handle, + feature_ids, node_ids, gains, thresholds, left_node_contribs, + right_node_contribs, max_depth, learning_rate, "pruning_mode", + pruning_mode) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_update_ensemble_eager_fallback( + tree_ensemble_handle, feature_ids, node_ids, gains, thresholds, + left_node_contribs, right_node_contribs, max_depth, learning_rate, + pruning_mode=pruning_mode, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(node_ids, (list, tuple)): + raise TypeError( + "Expected list for 'node_ids' argument to " + "'boosted_trees_update_ensemble' Op, not %r." % node_ids) + _attr_num_features = len(node_ids) + if not isinstance(gains, (list, tuple)): + raise TypeError( + "Expected list for 'gains' argument to " + "'boosted_trees_update_ensemble' Op, not %r." % gains) + if len(gains) != _attr_num_features: + raise ValueError( + "List argument 'gains' to 'boosted_trees_update_ensemble' Op with length %d " + "must match length %d of argument 'node_ids'." % + (len(gains), _attr_num_features)) + if not isinstance(thresholds, (list, tuple)): + raise TypeError( + "Expected list for 'thresholds' argument to " + "'boosted_trees_update_ensemble' Op, not %r." % thresholds) + if len(thresholds) != _attr_num_features: + raise ValueError( + "List argument 'thresholds' to 'boosted_trees_update_ensemble' Op with length %d " + "must match length %d of argument 'node_ids'." % + (len(thresholds), _attr_num_features)) + if not isinstance(left_node_contribs, (list, tuple)): + raise TypeError( + "Expected list for 'left_node_contribs' argument to " + "'boosted_trees_update_ensemble' Op, not %r." % left_node_contribs) + if len(left_node_contribs) != _attr_num_features: + raise ValueError( + "List argument 'left_node_contribs' to 'boosted_trees_update_ensemble' Op with length %d " + "must match length %d of argument 'node_ids'." % + (len(left_node_contribs), _attr_num_features)) + if not isinstance(right_node_contribs, (list, tuple)): + raise TypeError( + "Expected list for 'right_node_contribs' argument to " + "'boosted_trees_update_ensemble' Op, not %r." % right_node_contribs) + if len(right_node_contribs) != _attr_num_features: + raise ValueError( + "List argument 'right_node_contribs' to 'boosted_trees_update_ensemble' Op with length %d " + "must match length %d of argument 'node_ids'." % + (len(right_node_contribs), _attr_num_features)) + pruning_mode = _execute.make_int(pruning_mode, "pruning_mode") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesUpdateEnsemble", tree_ensemble_handle=tree_ensemble_handle, + feature_ids=feature_ids, + node_ids=node_ids, gains=gains, + thresholds=thresholds, + left_node_contribs=left_node_contribs, + right_node_contribs=right_node_contribs, + max_depth=max_depth, + learning_rate=learning_rate, + pruning_mode=pruning_mode, name=name) + return _op +BoostedTreesUpdateEnsemble = tf_export("raw_ops.BoostedTreesUpdateEnsemble")(_ops.to_raw_op(boosted_trees_update_ensemble)) + + +def boosted_trees_update_ensemble_eager_fallback(tree_ensemble_handle: Annotated[Any, _atypes.Resource], feature_ids: Annotated[Any, _atypes.Int32], node_ids: Annotated[List[Any], _atypes.Int32], gains: Annotated[List[Any], _atypes.Float32], thresholds: Annotated[List[Any], _atypes.Int32], left_node_contribs: Annotated[List[Any], _atypes.Float32], right_node_contribs: Annotated[List[Any], _atypes.Float32], max_depth: Annotated[Any, _atypes.Int32], learning_rate: Annotated[Any, _atypes.Float32], pruning_mode: int, name, ctx): + if not isinstance(node_ids, (list, tuple)): + raise TypeError( + "Expected list for 'node_ids' argument to " + "'boosted_trees_update_ensemble' Op, not %r." % node_ids) + _attr_num_features = len(node_ids) + if not isinstance(gains, (list, tuple)): + raise TypeError( + "Expected list for 'gains' argument to " + "'boosted_trees_update_ensemble' Op, not %r." % gains) + if len(gains) != _attr_num_features: + raise ValueError( + "List argument 'gains' to 'boosted_trees_update_ensemble' Op with length %d " + "must match length %d of argument 'node_ids'." % + (len(gains), _attr_num_features)) + if not isinstance(thresholds, (list, tuple)): + raise TypeError( + "Expected list for 'thresholds' argument to " + "'boosted_trees_update_ensemble' Op, not %r." % thresholds) + if len(thresholds) != _attr_num_features: + raise ValueError( + "List argument 'thresholds' to 'boosted_trees_update_ensemble' Op with length %d " + "must match length %d of argument 'node_ids'." % + (len(thresholds), _attr_num_features)) + if not isinstance(left_node_contribs, (list, tuple)): + raise TypeError( + "Expected list for 'left_node_contribs' argument to " + "'boosted_trees_update_ensemble' Op, not %r." % left_node_contribs) + if len(left_node_contribs) != _attr_num_features: + raise ValueError( + "List argument 'left_node_contribs' to 'boosted_trees_update_ensemble' Op with length %d " + "must match length %d of argument 'node_ids'." % + (len(left_node_contribs), _attr_num_features)) + if not isinstance(right_node_contribs, (list, tuple)): + raise TypeError( + "Expected list for 'right_node_contribs' argument to " + "'boosted_trees_update_ensemble' Op, not %r." % right_node_contribs) + if len(right_node_contribs) != _attr_num_features: + raise ValueError( + "List argument 'right_node_contribs' to 'boosted_trees_update_ensemble' Op with length %d " + "must match length %d of argument 'node_ids'." % + (len(right_node_contribs), _attr_num_features)) + pruning_mode = _execute.make_int(pruning_mode, "pruning_mode") + tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource) + feature_ids = _ops.convert_to_tensor(feature_ids, _dtypes.int32) + node_ids = _ops.convert_n_to_tensor(node_ids, _dtypes.int32) + gains = _ops.convert_n_to_tensor(gains, _dtypes.float32) + thresholds = _ops.convert_n_to_tensor(thresholds, _dtypes.int32) + left_node_contribs = _ops.convert_n_to_tensor(left_node_contribs, _dtypes.float32) + right_node_contribs = _ops.convert_n_to_tensor(right_node_contribs, _dtypes.float32) + max_depth = _ops.convert_to_tensor(max_depth, _dtypes.int32) + learning_rate = _ops.convert_to_tensor(learning_rate, _dtypes.float32) + _inputs_flat = [tree_ensemble_handle, feature_ids] + list(node_ids) + list(gains) + list(thresholds) + list(left_node_contribs) + list(right_node_contribs) + [max_depth, learning_rate] + _attrs = ("pruning_mode", pruning_mode, "num_features", _attr_num_features) + _result = _execute.execute(b"BoostedTreesUpdateEnsemble", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def boosted_trees_update_ensemble_v2(tree_ensemble_handle: Annotated[Any, _atypes.Resource], feature_ids: Annotated[List[Any], _atypes.Int32], dimension_ids: Annotated[List[Any], _atypes.Int32], node_ids: Annotated[List[Any], _atypes.Int32], gains: Annotated[List[Any], _atypes.Float32], thresholds: Annotated[List[Any], _atypes.Int32], left_node_contribs: Annotated[List[Any], _atypes.Float32], right_node_contribs: Annotated[List[Any], _atypes.Float32], split_types: Annotated[List[Any], _atypes.String], max_depth: Annotated[Any, _atypes.Int32], learning_rate: Annotated[Any, _atypes.Float32], pruning_mode: Annotated[Any, _atypes.Int32], logits_dimension:int=1, name=None): + r"""Updates the tree ensemble by adding a layer to the last tree being grown + + or by starting a new tree. + + Args: + tree_ensemble_handle: A `Tensor` of type `resource`. + Handle to the ensemble variable. + feature_ids: A list of at least 1 `Tensor` objects with type `int32`. + Rank 1 tensor with ids for each feature. This is the real id of + the feature that will be used in the split. + dimension_ids: A list of `Tensor` objects with type `int32`. + List of rank 1 tensors representing the dimension in each feature. + node_ids: A list with the same length as `dimension_ids` of `Tensor` objects with type `int32`. + List of rank 1 tensors representing the nodes for which this feature + has a split. + gains: A list with the same length as `dimension_ids` of `Tensor` objects with type `float32`. + List of rank 1 tensors representing the gains for each of the feature's + split. + thresholds: A list with the same length as `dimension_ids` of `Tensor` objects with type `int32`. + List of rank 1 tensors representing the thesholds for each of the + feature's split. + left_node_contribs: A list with the same length as `dimension_ids` of `Tensor` objects with type `float32`. + List of rank 2 tensors with left leaf contribs for each of + the feature's splits. Will be added to the previous node values to constitute + the values of the left nodes. + right_node_contribs: A list with the same length as `dimension_ids` of `Tensor` objects with type `float32`. + List of rank 2 tensors with right leaf contribs for each + of the feature's splits. Will be added to the previous node values to constitute + the values of the right nodes. + split_types: A list with the same length as `dimension_ids` of `Tensor` objects with type `string`. + List of rank 1 tensors representing the split type for each feature. + max_depth: A `Tensor` of type `int32`. Max depth of the tree to build. + learning_rate: A `Tensor` of type `float32`. + shrinkage const for each new tree. + pruning_mode: A `Tensor` of type `int32`. + 0-No pruning, 1-Pre-pruning, 2-Post-pruning. + logits_dimension: An optional `int`. Defaults to `1`. + scalar, dimension of the logits + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "BoostedTreesUpdateEnsembleV2", name, tree_ensemble_handle, + feature_ids, dimension_ids, node_ids, gains, thresholds, + left_node_contribs, right_node_contribs, split_types, max_depth, + learning_rate, pruning_mode, "logits_dimension", logits_dimension) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return boosted_trees_update_ensemble_v2_eager_fallback( + tree_ensemble_handle, feature_ids, dimension_ids, node_ids, gains, + thresholds, left_node_contribs, right_node_contribs, split_types, + max_depth, learning_rate, pruning_mode, + logits_dimension=logits_dimension, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + if not isinstance(dimension_ids, (list, tuple)): + raise TypeError( + "Expected list for 'dimension_ids' argument to " + "'boosted_trees_update_ensemble_v2' Op, not %r." % dimension_ids) + _attr_num_features = len(dimension_ids) + if not isinstance(node_ids, (list, tuple)): + raise TypeError( + "Expected list for 'node_ids' argument to " + "'boosted_trees_update_ensemble_v2' Op, not %r." % node_ids) + if len(node_ids) != _attr_num_features: + raise ValueError( + "List argument 'node_ids' to 'boosted_trees_update_ensemble_v2' Op with length %d " + "must match length %d of argument 'dimension_ids'." % + (len(node_ids), _attr_num_features)) + if not isinstance(gains, (list, tuple)): + raise TypeError( + "Expected list for 'gains' argument to " + "'boosted_trees_update_ensemble_v2' Op, not %r." % gains) + if len(gains) != _attr_num_features: + raise ValueError( + "List argument 'gains' to 'boosted_trees_update_ensemble_v2' Op with length %d " + "must match length %d of argument 'dimension_ids'." % + (len(gains), _attr_num_features)) + if not isinstance(thresholds, (list, tuple)): + raise TypeError( + "Expected list for 'thresholds' argument to " + "'boosted_trees_update_ensemble_v2' Op, not %r." % thresholds) + if len(thresholds) != _attr_num_features: + raise ValueError( + "List argument 'thresholds' to 'boosted_trees_update_ensemble_v2' Op with length %d " + "must match length %d of argument 'dimension_ids'." % + (len(thresholds), _attr_num_features)) + if not isinstance(left_node_contribs, (list, tuple)): + raise TypeError( + "Expected list for 'left_node_contribs' argument to " + "'boosted_trees_update_ensemble_v2' Op, not %r." % left_node_contribs) + if len(left_node_contribs) != _attr_num_features: + raise ValueError( + "List argument 'left_node_contribs' to 'boosted_trees_update_ensemble_v2' Op with length %d " + "must match length %d of argument 'dimension_ids'." % + (len(left_node_contribs), _attr_num_features)) + if not isinstance(right_node_contribs, (list, tuple)): + raise TypeError( + "Expected list for 'right_node_contribs' argument to " + "'boosted_trees_update_ensemble_v2' Op, not %r." % right_node_contribs) + if len(right_node_contribs) != _attr_num_features: + raise ValueError( + "List argument 'right_node_contribs' to 'boosted_trees_update_ensemble_v2' Op with length %d " + "must match length %d of argument 'dimension_ids'." % + (len(right_node_contribs), _attr_num_features)) + if not isinstance(split_types, (list, tuple)): + raise TypeError( + "Expected list for 'split_types' argument to " + "'boosted_trees_update_ensemble_v2' Op, not %r." % split_types) + if len(split_types) != _attr_num_features: + raise ValueError( + "List argument 'split_types' to 'boosted_trees_update_ensemble_v2' Op with length %d " + "must match length %d of argument 'dimension_ids'." % + (len(split_types), _attr_num_features)) + if not isinstance(feature_ids, (list, tuple)): + raise TypeError( + "Expected list for 'feature_ids' argument to " + "'boosted_trees_update_ensemble_v2' Op, not %r." % feature_ids) + _attr_num_groups = len(feature_ids) + if logits_dimension is None: + logits_dimension = 1 + logits_dimension = _execute.make_int(logits_dimension, "logits_dimension") + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "BoostedTreesUpdateEnsembleV2", tree_ensemble_handle=tree_ensemble_handle, + feature_ids=feature_ids, + dimension_ids=dimension_ids, + node_ids=node_ids, gains=gains, + thresholds=thresholds, + left_node_contribs=left_node_contribs, + right_node_contribs=right_node_contribs, + split_types=split_types, + max_depth=max_depth, + learning_rate=learning_rate, + pruning_mode=pruning_mode, + logits_dimension=logits_dimension, + name=name) + return _op +BoostedTreesUpdateEnsembleV2 = tf_export("raw_ops.BoostedTreesUpdateEnsembleV2")(_ops.to_raw_op(boosted_trees_update_ensemble_v2)) + + +def boosted_trees_update_ensemble_v2_eager_fallback(tree_ensemble_handle: Annotated[Any, _atypes.Resource], feature_ids: Annotated[List[Any], _atypes.Int32], dimension_ids: Annotated[List[Any], _atypes.Int32], node_ids: Annotated[List[Any], _atypes.Int32], gains: Annotated[List[Any], _atypes.Float32], thresholds: Annotated[List[Any], _atypes.Int32], left_node_contribs: Annotated[List[Any], _atypes.Float32], right_node_contribs: Annotated[List[Any], _atypes.Float32], split_types: Annotated[List[Any], _atypes.String], max_depth: Annotated[Any, _atypes.Int32], learning_rate: Annotated[Any, _atypes.Float32], pruning_mode: Annotated[Any, _atypes.Int32], logits_dimension: int, name, ctx): + if not isinstance(dimension_ids, (list, tuple)): + raise TypeError( + "Expected list for 'dimension_ids' argument to " + "'boosted_trees_update_ensemble_v2' Op, not %r." % dimension_ids) + _attr_num_features = len(dimension_ids) + if not isinstance(node_ids, (list, tuple)): + raise TypeError( + "Expected list for 'node_ids' argument to " + "'boosted_trees_update_ensemble_v2' Op, not %r." % node_ids) + if len(node_ids) != _attr_num_features: + raise ValueError( + "List argument 'node_ids' to 'boosted_trees_update_ensemble_v2' Op with length %d " + "must match length %d of argument 'dimension_ids'." % + (len(node_ids), _attr_num_features)) + if not isinstance(gains, (list, tuple)): + raise TypeError( + "Expected list for 'gains' argument to " + "'boosted_trees_update_ensemble_v2' Op, not %r." % gains) + if len(gains) != _attr_num_features: + raise ValueError( + "List argument 'gains' to 'boosted_trees_update_ensemble_v2' Op with length %d " + "must match length %d of argument 'dimension_ids'." % + (len(gains), _attr_num_features)) + if not isinstance(thresholds, (list, tuple)): + raise TypeError( + "Expected list for 'thresholds' argument to " + "'boosted_trees_update_ensemble_v2' Op, not %r." % thresholds) + if len(thresholds) != _attr_num_features: + raise ValueError( + "List argument 'thresholds' to 'boosted_trees_update_ensemble_v2' Op with length %d " + "must match length %d of argument 'dimension_ids'." % + (len(thresholds), _attr_num_features)) + if not isinstance(left_node_contribs, (list, tuple)): + raise TypeError( + "Expected list for 'left_node_contribs' argument to " + "'boosted_trees_update_ensemble_v2' Op, not %r." % left_node_contribs) + if len(left_node_contribs) != _attr_num_features: + raise ValueError( + "List argument 'left_node_contribs' to 'boosted_trees_update_ensemble_v2' Op with length %d " + "must match length %d of argument 'dimension_ids'." % + (len(left_node_contribs), _attr_num_features)) + if not isinstance(right_node_contribs, (list, tuple)): + raise TypeError( + "Expected list for 'right_node_contribs' argument to " + "'boosted_trees_update_ensemble_v2' Op, not %r." % right_node_contribs) + if len(right_node_contribs) != _attr_num_features: + raise ValueError( + "List argument 'right_node_contribs' to 'boosted_trees_update_ensemble_v2' Op with length %d " + "must match length %d of argument 'dimension_ids'." % + (len(right_node_contribs), _attr_num_features)) + if not isinstance(split_types, (list, tuple)): + raise TypeError( + "Expected list for 'split_types' argument to " + "'boosted_trees_update_ensemble_v2' Op, not %r." % split_types) + if len(split_types) != _attr_num_features: + raise ValueError( + "List argument 'split_types' to 'boosted_trees_update_ensemble_v2' Op with length %d " + "must match length %d of argument 'dimension_ids'." % + (len(split_types), _attr_num_features)) + if not isinstance(feature_ids, (list, tuple)): + raise TypeError( + "Expected list for 'feature_ids' argument to " + "'boosted_trees_update_ensemble_v2' Op, not %r." % feature_ids) + _attr_num_groups = len(feature_ids) + if logits_dimension is None: + logits_dimension = 1 + logits_dimension = _execute.make_int(logits_dimension, "logits_dimension") + tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource) + feature_ids = _ops.convert_n_to_tensor(feature_ids, _dtypes.int32) + dimension_ids = _ops.convert_n_to_tensor(dimension_ids, _dtypes.int32) + node_ids = _ops.convert_n_to_tensor(node_ids, _dtypes.int32) + gains = _ops.convert_n_to_tensor(gains, _dtypes.float32) + thresholds = _ops.convert_n_to_tensor(thresholds, _dtypes.int32) + left_node_contribs = _ops.convert_n_to_tensor(left_node_contribs, _dtypes.float32) + right_node_contribs = _ops.convert_n_to_tensor(right_node_contribs, _dtypes.float32) + split_types = _ops.convert_n_to_tensor(split_types, _dtypes.string) + max_depth = _ops.convert_to_tensor(max_depth, _dtypes.int32) + learning_rate = _ops.convert_to_tensor(learning_rate, _dtypes.float32) + pruning_mode = _ops.convert_to_tensor(pruning_mode, _dtypes.int32) + _inputs_flat = [tree_ensemble_handle] + list(feature_ids) + list(dimension_ids) + list(node_ids) + list(gains) + list(thresholds) + list(left_node_contribs) + list(right_node_contribs) + list(split_types) + [max_depth, learning_rate, pruning_mode] + _attrs = ("num_features", _attr_num_features, "logits_dimension", + logits_dimension, "num_groups", _attr_num_groups) + _result = _execute.execute(b"BoostedTreesUpdateEnsembleV2", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + + +def is_boosted_trees_ensemble_initialized(tree_ensemble_handle: Annotated[Any, _atypes.Resource], name=None) -> Annotated[Any, _atypes.Bool]: + r"""Checks whether a tree ensemble has been initialized. + + Args: + tree_ensemble_handle: A `Tensor` of type `resource`. + Handle to the tree ensemble resource. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "IsBoostedTreesEnsembleInitialized", name, tree_ensemble_handle) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return is_boosted_trees_ensemble_initialized_eager_fallback( + tree_ensemble_handle, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "IsBoostedTreesEnsembleInitialized", tree_ensemble_handle=tree_ensemble_handle, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "IsBoostedTreesEnsembleInitialized", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +IsBoostedTreesEnsembleInitialized = tf_export("raw_ops.IsBoostedTreesEnsembleInitialized")(_ops.to_raw_op(is_boosted_trees_ensemble_initialized)) + + +def is_boosted_trees_ensemble_initialized_eager_fallback(tree_ensemble_handle: Annotated[Any, _atypes.Resource], name, ctx) -> Annotated[Any, _atypes.Bool]: + tree_ensemble_handle = _ops.convert_to_tensor(tree_ensemble_handle, _dtypes.resource) + _inputs_flat = [tree_ensemble_handle] + _attrs = None + _result = _execute.execute(b"IsBoostedTreesEnsembleInitialized", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "IsBoostedTreesEnsembleInitialized", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +def is_boosted_trees_quantile_stream_resource_initialized(quantile_stream_resource_handle: Annotated[Any, _atypes.Resource], name=None) -> Annotated[Any, _atypes.Bool]: + r"""Checks whether a quantile stream has been initialized. + + An Op that checks if quantile stream resource is initialized. + + Args: + quantile_stream_resource_handle: A `Tensor` of type `resource`. + resource; The reference to quantile stream resource handle. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bool`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "IsBoostedTreesQuantileStreamResourceInitialized", name, + quantile_stream_resource_handle) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + return is_boosted_trees_quantile_stream_resource_initialized_eager_fallback( + quantile_stream_resource_handle, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + # Add nodes to the TensorFlow graph. + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "IsBoostedTreesQuantileStreamResourceInitialized", quantile_stream_resource_handle=quantile_stream_resource_handle, + name=name) + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "IsBoostedTreesQuantileStreamResourceInitialized", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +IsBoostedTreesQuantileStreamResourceInitialized = tf_export("raw_ops.IsBoostedTreesQuantileStreamResourceInitialized")(_ops.to_raw_op(is_boosted_trees_quantile_stream_resource_initialized)) + + +def is_boosted_trees_quantile_stream_resource_initialized_eager_fallback(quantile_stream_resource_handle: Annotated[Any, _atypes.Resource], name, ctx) -> Annotated[Any, _atypes.Bool]: + quantile_stream_resource_handle = _ops.convert_to_tensor(quantile_stream_resource_handle, _dtypes.resource) + _inputs_flat = [quantile_stream_resource_handle] + _attrs = None + _result = _execute.execute(b"IsBoostedTreesQuantileStreamResourceInitialized", + 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "IsBoostedTreesQuantileStreamResourceInitialized", _inputs_flat, _attrs, _result) + _result, = _result + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_decode_proto_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_decode_proto_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..2b54f765f78083220fcb027d4395bf87e80c006e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/gen_decode_proto_ops.py @@ -0,0 +1,319 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated +_DecodeProtoV2Output = collections.namedtuple( + "DecodeProtoV2", + ["sizes", "values"]) + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('io.decode_proto') +def decode_proto_v2(bytes: Annotated[Any, _atypes.String], message_type: str, field_names, output_types, descriptor_source:str="local://", message_format:str="binary", sanitize:bool=False, name=None): + r"""The op extracts fields from a serialized protocol buffers message into tensors. + + Note: This API is designed for orthogonality rather than human-friendliness. It + can be used to parse input protos by hand, but it is intended for use in + generated code. + + The `decode_proto` op extracts fields from a serialized protocol buffers + message into tensors. The fields in `field_names` are decoded and converted + to the corresponding `output_types` if possible. + + A `message_type` name must be provided to give context for the field names. + The actual message descriptor can be looked up either in the linked-in + descriptor pool or a filename provided by the caller using the + `descriptor_source` attribute. + + Each output tensor is a dense tensor. This means that it is padded to hold + the largest number of repeated elements seen in the input minibatch. (The + shape is also padded by one to prevent zero-sized dimensions). The actual + repeat counts for each example in the minibatch can be found in the `sizes` + output. In many cases the output of `decode_proto` is fed immediately into + tf.squeeze if missing values are not a concern. When using tf.squeeze, always + pass the squeeze dimension explicitly to avoid surprises. + + For the most part, the mapping between Proto field types and TensorFlow dtypes + is straightforward. However, there are a few special cases: + + - A proto field that contains a submessage or group can only be converted + to `DT_STRING` (the serialized submessage). This is to reduce the complexity + of the API. The resulting string can be used as input to another instance of + the decode_proto op. + + - TensorFlow lacks support for unsigned integers. The ops represent uint64 + types as a `DT_INT64` with the same twos-complement bit pattern (the obvious + way). Unsigned int32 values can be represented exactly by specifying type + `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in + the `output_types` attribute. + + - `map` fields are not directly decoded. They are treated as `repeated` fields, + of the appropriate entry type. The proto-compiler defines entry types for each + map field. The type-name is the field name, converted to "CamelCase" with + "Entry" appended. The `tf.train.Features.FeatureEntry` message is an example of + one of these implicit `Entry` types. + + - `enum` fields should be read as int32. + + Both binary and text proto serializations are supported, and can be + chosen using the `format` attribute. + + The `descriptor_source` attribute selects the source of protocol + descriptors to consult when looking up `message_type`. This may be: + + - An empty string or "local://", in which case protocol descriptors are + created for C++ (not Python) proto definitions linked to the binary. + + - A file, in which case protocol descriptors are created from the file, + which is expected to contain a `FileDescriptorSet` serialized as a string. + NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out` + and `--include_imports` options to the protocol compiler `protoc`. + + - A "bytes://", in which protocol descriptors are created from ``, + which is expected to be a `FileDescriptorSet` serialized as a string. + + Here is an example: + + The, internal, `Summary.Value` proto contains a + `oneof {float simple_value; Image image; ...}` + + >>> from google.protobuf import text_format + >>> + >>> # A Summary.Value contains: oneof {float simple_value; Image image} + >>> values = [ + ... "simple_value: 2.2", + ... "simple_value: 1.2", + ... "image { height: 128 width: 512 }", + ... "image { height: 256 width: 256 }",] + >>> values = [ + ... text_format.Parse(v, tf.compat.v1.Summary.Value()).SerializeToString() + ... for v in values] + + The following can decode both fields from the serialized strings: + + >>> sizes, [simple_value, image] = tf.io.decode_proto( + ... values, + ... tf.compat.v1.Summary.Value.DESCRIPTOR.full_name, + ... field_names=['simple_value', 'image'], + ... output_types=[tf.float32, tf.string]) + + The `sizes` has the same shape as the input, with an additional axis across the + fields that were decoded. Here the first column of `sizes` is the size of the + decoded `simple_value` field: + + >>> print(sizes) + tf.Tensor( + [[1 0] + [1 0] + [0 1] + [0 1]], shape=(4, 2), dtype=int32) + + The result tensors each have one more index than the input byte-strings. + The valid elements of each result tensor are indicated by + the appropriate column of `sizes`. The invalid elements are padded with a + default value: + + >>> print(simple_value) + tf.Tensor( + [[2.2] + [1.2] + [0. ] + [0. ]], shape=(4, 1), dtype=float32) + + Nested protos are extracted as string tensors: + + >>> print(image.dtype) + + >>> print(image.shape.as_list()) + [4, 1] + + To convert to a `tf.RaggedTensor` representation use: + + >>> tf.RaggedTensor.from_tensor(simple_value, lengths=sizes[:, 0]).to_list() + [[2.2], [1.2], [], []] + + Args: + bytes: A `Tensor` of type `string`. + Tensor of serialized protos with shape `batch_shape`. + message_type: A `string`. Name of the proto message type to decode. + field_names: A list of `strings`. + List of strings containing proto field names. An extension field can be decoded + by using its full name, e.g. EXT_PACKAGE.EXT_FIELD_NAME. + output_types: A list of `tf.DTypes`. + List of TF types to use for the respective field in field_names. + descriptor_source: An optional `string`. Defaults to `"local://"`. + Either the special value `local://` or a path to a file containing + a serialized `FileDescriptorSet`. + message_format: An optional `string`. Defaults to `"binary"`. + Either `binary` or `text`. + sanitize: An optional `bool`. Defaults to `False`. + Whether to sanitize the result or not. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (sizes, values). + + sizes: A `Tensor` of type `int32`. + values: A list of `Tensor` objects of type `output_types`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DecodeProtoV2", name, bytes, "message_type", message_type, + "field_names", field_names, "output_types", output_types, + "descriptor_source", descriptor_source, "message_format", + message_format, "sanitize", sanitize) + _result = _DecodeProtoV2Output._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_decode_proto_v2( + (bytes, message_type, field_names, output_types, descriptor_source, + message_format, sanitize, name,), None) + if _result is not NotImplemented: + return _result + return decode_proto_v2_eager_fallback( + bytes, message_type=message_type, field_names=field_names, + output_types=output_types, descriptor_source=descriptor_source, + message_format=message_format, sanitize=sanitize, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + decode_proto_v2, (), dict(bytes=bytes, message_type=message_type, + field_names=field_names, + output_types=output_types, + descriptor_source=descriptor_source, + message_format=message_format, + sanitize=sanitize, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_decode_proto_v2( + (bytes, message_type, field_names, output_types, descriptor_source, + message_format, sanitize, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + message_type = _execute.make_str(message_type, "message_type") + if not isinstance(field_names, (list, tuple)): + raise TypeError( + "Expected list for 'field_names' argument to " + "'decode_proto_v2' Op, not %r." % field_names) + field_names = [_execute.make_str(_s, "field_names") for _s in field_names] + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'decode_proto_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if descriptor_source is None: + descriptor_source = "local://" + descriptor_source = _execute.make_str(descriptor_source, "descriptor_source") + if message_format is None: + message_format = "binary" + message_format = _execute.make_str(message_format, "message_format") + if sanitize is None: + sanitize = False + sanitize = _execute.make_bool(sanitize, "sanitize") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DecodeProtoV2", bytes=bytes, message_type=message_type, + field_names=field_names, output_types=output_types, + descriptor_source=descriptor_source, + message_format=message_format, sanitize=sanitize, + name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + decode_proto_v2, (), dict(bytes=bytes, message_type=message_type, + field_names=field_names, + output_types=output_types, + descriptor_source=descriptor_source, + message_format=message_format, + sanitize=sanitize, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("message_type", _op.get_attr("message_type"), "field_names", + _op.get_attr("field_names"), "output_types", + _op.get_attr("output_types"), "descriptor_source", + _op.get_attr("descriptor_source"), "message_format", + _op.get_attr("message_format"), "sanitize", + _op._get_attr_bool("sanitize")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "DecodeProtoV2", _inputs_flat, _attrs, _result) + _result = _result[:1] + [_result[1:]] + _result = _DecodeProtoV2Output._make(_result) + return _result + +DecodeProtoV2 = tf_export("raw_ops.DecodeProtoV2")(_ops.to_raw_op(decode_proto_v2)) +_dispatcher_for_decode_proto_v2 = decode_proto_v2._tf_type_based_dispatcher.Dispatch + + +def decode_proto_v2_eager_fallback(bytes: Annotated[Any, _atypes.String], message_type: str, field_names, output_types, descriptor_source: str, message_format: str, sanitize: bool, name, ctx): + message_type = _execute.make_str(message_type, "message_type") + if not isinstance(field_names, (list, tuple)): + raise TypeError( + "Expected list for 'field_names' argument to " + "'decode_proto_v2' Op, not %r." % field_names) + field_names = [_execute.make_str(_s, "field_names") for _s in field_names] + if not isinstance(output_types, (list, tuple)): + raise TypeError( + "Expected list for 'output_types' argument to " + "'decode_proto_v2' Op, not %r." % output_types) + output_types = [_execute.make_type(_t, "output_types") for _t in output_types] + if descriptor_source is None: + descriptor_source = "local://" + descriptor_source = _execute.make_str(descriptor_source, "descriptor_source") + if message_format is None: + message_format = "binary" + message_format = _execute.make_str(message_format, "message_format") + if sanitize is None: + sanitize = False + sanitize = _execute.make_bool(sanitize, "sanitize") + bytes = _ops.convert_to_tensor(bytes, _dtypes.string) + _inputs_flat = [bytes] + _attrs = ("message_type", message_type, "field_names", field_names, + "output_types", output_types, "descriptor_source", descriptor_source, + "message_format", message_format, "sanitize", sanitize) + _result = _execute.execute(b"DecodeProtoV2", len(output_types) + 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "DecodeProtoV2", _inputs_flat, _attrs, _result) + _result = _result[:1] + [_result[1:]] + _result = _DecodeProtoV2Output._make(_result) + return _result +