Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/__pycache__/converter_flags_pb2.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/__pycache__/model_flags_pb2.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/__pycache__/types_pb2.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/converter_flags_pb2.py +39 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/debug/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/debug/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/debug/__pycache__/debug_options_pb2.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/debug/debug_options_pb2.py +25 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/metrics/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/metrics/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/metrics/__pycache__/converter_error_data_pb2.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/metrics/converter_error_data_pb2.py +37 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/model_flags_pb2.py +40 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/python/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/python/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/python/__pycache__/wrap_converter.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/python/_pywrap_converter_api.pyi +21 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/python/_pywrap_converter_api.so +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/python/wrap_converter.py +92 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/types_pb2.py +25 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/stablehlo/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/stablehlo/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/stablehlo/__pycache__/quantization_config_pb2.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/stablehlo/__pycache__/quantization_options_pb2.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/stablehlo/quantization_config_pb2.py +86 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/stablehlo/quantization_options_pb2.py +42 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/__pycache__/exported_model_pb2.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/__pycache__/quantization_options_pb2.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/__pycache__/calibration_algorithm.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/__pycache__/calibration_statistics_pb2.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_algorithm.py +395 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_pb2.py +38 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/exported_model_pb2.py +33 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_quantize_model.pyi +72 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py +926 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/python/representative_dataset.py +402 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/python/save_model.py +346 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/quantization_options_pb2.py +56 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/stablehlo/__init__.py +0 -0
.gitattributes
CHANGED
|
@@ -204,3 +204,5 @@ SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/pyth
|
|
| 204 |
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/_pywrap_analyzer_wrapper.so filter=lfs diff=lfs merge=lfs -text
|
| 205 |
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/_audio_microfrontend_op.so filter=lfs diff=lfs merge=lfs -text
|
| 206 |
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/core/kernels/libtfkernel_sobol_op.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 204 |
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/_pywrap_analyzer_wrapper.so filter=lfs diff=lfs merge=lfs -text
|
| 205 |
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/_audio_microfrontend_op.so filter=lfs diff=lfs merge=lfs -text
|
| 206 |
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/core/kernels/libtfkernel_sobol_op.so filter=lfs diff=lfs merge=lfs -text
|
| 207 |
+
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/ops/_xla_ops.so filter=lfs diff=lfs merge=lfs -text
|
| 208 |
+
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/stablehlo/stablehlo_extension.so filter=lfs diff=lfs merge=lfs -text
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (200 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (205 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/__pycache__/converter_flags_pb2.cpython-310.pyc
ADDED
|
Binary file (4.83 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/__pycache__/model_flags_pb2.cpython-310.pyc
ADDED
|
Binary file (2.69 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/__pycache__/types_pb2.cpython-310.pyc
ADDED
|
Binary file (1.22 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/converter_flags_pb2.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
| 3 |
+
# source: tensorflow/compiler/mlir/lite/converter_flags.proto
|
| 4 |
+
"""Generated protocol buffer code."""
|
| 5 |
+
from google.protobuf.internal import builder as _builder
|
| 6 |
+
from google.protobuf import descriptor as _descriptor
|
| 7 |
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
| 8 |
+
from google.protobuf import symbol_database as _symbol_database
|
| 9 |
+
# @@protoc_insertion_point(imports)
|
| 10 |
+
|
| 11 |
+
_sym_db = _symbol_database.Default()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
from tensorflow.compiler.mlir.lite.debug import debug_options_pb2 as tensorflow_dot_compiler_dot_mlir_dot_lite_dot_debug_dot_debug__options__pb2
|
| 15 |
+
from tensorflow.compiler.mlir.lite import types_pb2 as tensorflow_dot_compiler_dot_mlir_dot_lite_dot_types__pb2
|
| 16 |
+
from tensorflow.compiler.mlir.quantization.stablehlo import quantization_config_pb2 as tensorflow_dot_compiler_dot_mlir_dot_quantization_dot_stablehlo_dot_quantization__config__pb2
|
| 17 |
+
from tensorflow.compiler.mlir.quantization.stablehlo import quantization_options_pb2 as tensorflow_dot_compiler_dot_mlir_dot_quantization_dot_stablehlo_dot_quantization__options__pb2
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n3tensorflow/compiler/mlir/lite/converter_flags.proto\x12\x06tflite\x1a\x37tensorflow/compiler/mlir/lite/debug/debug_options.proto\x1a)tensorflow/compiler/mlir/lite/types.proto\x1aItensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto\x1aJtensorflow/compiler/mlir/quantization/stablehlo/quantization_options.proto\"\xd3\x14\n\x0e\x43onverterFlags\x12(\n\x0cinput_format\x18\x01 \x01(\x0e\x32\x12.tflite.FileFormat\x12)\n\routput_format\x18\x02 \x01(\x0e\x32\x12.tflite.FileFormat\x12\x30\n\x14inference_input_type\x18\x0b \x01(\x0e\x32\x12.tflite.IODataType\x12*\n\x0einference_type\x18\x04 \x01(\x0e\x32\x12.tflite.IODataType\x12\x1a\n\x12\x64\x65\x66\x61ult_ranges_min\x18\x05 \x01(\x02\x12\x1a\n\x12\x64\x65\x66\x61ult_ranges_max\x18\x06 \x01(\x02\x12 \n\x18\x64\x65\x66\x61ult_int16_ranges_min\x18\x0f \x01(\x02\x12 \n\x18\x64\x65\x66\x61ult_int16_ranges_max\x18\x10 \x01(\x02\x12\x17\n\x0f\x64rop_fake_quant\x18\x07 \x01(\x08\x12!\n\x19reorder_across_fake_quant\x18\x08 \x01(\x08\x12\x18\n\x10\x61llow_custom_ops\x18\n \x01(\x08\x12\x1f\n\x17\x64rop_control_dependency\x18\x0c \x01(\x08\x12+\n#debug_disable_recurrent_cell_fusion\x18\r \x01(\x08\x12%\n\x1dpropagate_fake_quant_num_bits\x18\x0e \x01(\x08\x12\x35\n-allow_nudging_weights_to_use_fast_gemm_kernel\x18\x11 \x01(\x08\x12\'\n\x1b\x64\x65\x64upe_array_min_size_bytes\x18\x12 \x01(\x03:\x02\x36\x34\x12&\n\x18split_tflite_lstm_inputs\x18\x13 \x01(\x08:\x04true\x12\x1f\n\x10quantize_weights\x18\x14 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x11\x64ump_graphviz_dir\x18\x18 \x01(\t\x12#\n\x1b\x64ump_graphviz_include_video\x18\x19 \x01(\x08\x12%\n\x16post_training_quantize\x18\x1a \x01(\x08:\x05\x66\x61lse\x12#\n\x14\x65nable_select_tf_ops\x18\x1b \x01(\x08:\x05\x66\x61lse\x12\"\n\x13\x66orce_select_tf_ops\x18\x1c \x01(\x08:\x05\x66\x61lse\x12\"\n\x13quantize_to_float16\x18\x1d \x01(\x08:\x05\x66\x61lse\x12#\n\x15\x61llow_dynamic_tensors\x18\x1e \x01(\x08:\x04true\x12\x1e\n\x16\x63onversion_summary_dir\x18\x1f \x01(\t\x12\x19\n\rcustom_opdefs\x18 \x03(\tB\x02\x18\x01\x12\x1a\n\x12select_user_tf_ops\x18! \x03(\t\x12.\n enable_tflite_resource_variables\x18\" \x01(\x08:\x04true\x12!\n\x12unfold_batchmatmul\x18# \x01(\x08:\x05\x66\x61lse\x12#\n\x15lower_tensor_list_ops\x18$ \x01(\x08:\x04true\x12-\n\x11\x61\x63\x63umulation_type\x18% \x01(\x0e\x32\x12.tflite.IODataType\x12\x1d\n\x0e\x61llow_bfloat16\x18& \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x17\x61llow_all_select_tf_ops\x18\' \x01(\x08\x12*\n\x1bunfold_large_splat_constant\x18( \x01(\x08:\x05\x66\x61lse\x12\x1a\n\x12supported_backends\x18) \x03(\t\x12\x39\n*default_to_single_batch_in_tensor_list_ops\x18* \x01(\x08:\x05\x66\x61lse\x12/\n disable_per_channel_quantization\x18+ \x01(\x08:\x05\x66\x61lse\x12\x32\n#enable_mlir_dynamic_range_quantizer\x18, \x01(\x08:\x05\x66\x61lse\x12\x1c\n\x14tf_quantization_mode\x18- \x01(\t\x12)\n\x1a\x64isable_infer_tensor_range\x18. \x01(\x08:\x05\x66\x61lse\x12&\n\x17use_fake_quant_num_bits\x18/ \x01(\x08:\x05\x66\x61lse\x12*\n\x1b\x65nable_dynamic_update_slice\x18\x30 \x01(\x08:\x05\x66\x61lse\x12!\n\x12preserve_assert_op\x18\x31 \x01(\x08:\x05\x66\x61lse\x12*\n\x1bguarantee_all_funcs_one_use\x18\x32 \x01(\x08:\x05\x66\x61lse\x12#\n\x14\x63onvert_to_stablehlo\x18\x33 \x01(\x08:\x05\x66\x61lse\x12\x30\n!enable_mlir_variable_quantization\x18\x34 \x01(\x08:\x05\x66\x61lse\x12&\n\x17\x64isable_fuse_mul_and_fc\x18\x35 \x01(\x08:\x05\x66\x61lse\x12M\n\x14quantization_options\x18\x36 \x01(\x0b\x32+.stablehlo.quantization.QuantizationOptionsB\x02\x18\x01\x12.\n\x1b\x65nable_hlo_to_tf_conversion\x18\x37 \x01(\x08:\x05\x66\x61lseB\x02\x18\x01\x12\x39\n\rdebug_options\x18\x38 \x01(\x0b\x32\".tensorflow.converter.DebugOptions\x12 \n\x11use_buffer_offset\x18\x39 \x01(\x08:\x05\x66\x61lse\x12.\n\x1flegalize_custom_tensor_list_ops\x18: \x01(\x08:\x05\x66\x61lse\x12$\n\x15reduce_type_precision\x18; \x01(\x08:\x05\x66\x61lse\x12!\n\x13qdq_conversion_mode\x18< \x01(\t:\x04NONE\x12G\n\x13quantization_config\x18= \x01(\x0b\x32*.stablehlo.quantization.QuantizationConfig\x12@\n1disable_per_channel_quantization_for_dense_layers\x18> \x01(\x08:\x05\x66\x61lse\x12/\n enable_composite_direct_lowering\x18? \x01(\x08:\x05\x66\x61lse\x12R\n\x16model_origin_framework\x18@ \x01(\x0e\x32+.tflite.ConverterFlags.ModelOriginFramework:\x05UNSET\x12\x32\n#canonicalizing_inf_as_min_max_float\x18\x41 \x01(\x08:\x05\x66\x61lse\x12\'\n\x18serialize_debug_metadata\x18\x42 \x01(\x08:\x05\x66\x61lse\"R\n\x14ModelOriginFramework\x12\t\n\x05UNSET\x10\x00\x12\x0e\n\nTENSORFLOW\x10\x01\x12\t\n\x05KERAS\x10\x02\x12\x07\n\x03JAX\x10\x03\x12\x0b\n\x07PYTORCH\x10\x04*\\\n\nFileFormat\x12\x17\n\x13\x46ILE_FORMAT_UNKNOWN\x10\x00\x12\x17\n\x13TENSORFLOW_GRAPHDEF\x10\x01\x12\n\n\x06TFLITE\x10\x02\x12\x10\n\x0cGRAPHVIZ_DOT\x10\x03')
|
| 21 |
+
|
| 22 |
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
| 23 |
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.compiler.mlir.lite.converter_flags_pb2', globals())
|
| 24 |
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
| 25 |
+
|
| 26 |
+
DESCRIPTOR._options = None
|
| 27 |
+
_CONVERTERFLAGS.fields_by_name['custom_opdefs']._options = None
|
| 28 |
+
_CONVERTERFLAGS.fields_by_name['custom_opdefs']._serialized_options = b'\030\001'
|
| 29 |
+
_CONVERTERFLAGS.fields_by_name['quantization_options']._options = None
|
| 30 |
+
_CONVERTERFLAGS.fields_by_name['quantization_options']._serialized_options = b'\030\001'
|
| 31 |
+
_CONVERTERFLAGS.fields_by_name['enable_hlo_to_tf_conversion']._options = None
|
| 32 |
+
_CONVERTERFLAGS.fields_by_name['enable_hlo_to_tf_conversion']._serialized_options = b'\030\001'
|
| 33 |
+
_FILEFORMAT._serialized_start=2960
|
| 34 |
+
_FILEFORMAT._serialized_end=3052
|
| 35 |
+
_CONVERTERFLAGS._serialized_start=315
|
| 36 |
+
_CONVERTERFLAGS._serialized_end=2958
|
| 37 |
+
_CONVERTERFLAGS_MODELORIGINFRAMEWORK._serialized_start=2876
|
| 38 |
+
_CONVERTERFLAGS_MODELORIGINFRAMEWORK._serialized_end=2958
|
| 39 |
+
# @@protoc_insertion_point(module_scope)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/debug/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/debug/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (211 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/debug/__pycache__/debug_options_pb2.cpython-310.pyc
ADDED
|
Binary file (1.23 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/debug/debug_options_pb2.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
| 3 |
+
# source: tensorflow/compiler/mlir/lite/debug/debug_options.proto
|
| 4 |
+
"""Generated protocol buffer code."""
|
| 5 |
+
from google.protobuf.internal import builder as _builder
|
| 6 |
+
from google.protobuf import descriptor as _descriptor
|
| 7 |
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
| 8 |
+
from google.protobuf import symbol_database as _symbol_database
|
| 9 |
+
# @@protoc_insertion_point(imports)
|
| 10 |
+
|
| 11 |
+
_sym_db = _symbol_database.Default()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n7tensorflow/compiler/mlir/lite/debug/debug_options.proto\x12\x14tensorflow.converter\"\x84\x02\n\x0c\x44\x65\x62ugOptions\x12\x15\n\x0bir_dump_dir\x18\x01 \x01(\t:\x00\x12\x1e\n\x12ir_dump_pass_regex\x18\x02 \x01(\t:\x02.*\x12\x1e\n\x12ir_dump_func_regex\x18\x03 \x01(\t:\x02.*\x12\x1c\n\renable_timing\x18\x04 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x0fprint_ir_before\x18\x05 \x01(\t:\x00\x12\x18\n\x0eprint_ir_after\x18\x06 \x01(\t:\x00\x12#\n\x15print_ir_module_scope\x18\x07 \x01(\x08:\x04true\x12%\n\x1d\x65lide_elementsattrs_if_larger\x18\x08 \x01(\x03')
|
| 17 |
+
|
| 18 |
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
| 19 |
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.compiler.mlir.lite.debug.debug_options_pb2', globals())
|
| 20 |
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
| 21 |
+
|
| 22 |
+
DESCRIPTOR._options = None
|
| 23 |
+
_DEBUGOPTIONS._serialized_start=82
|
| 24 |
+
_DEBUGOPTIONS._serialized_end=342
|
| 25 |
+
# @@protoc_insertion_point(module_scope)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/metrics/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/metrics/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (213 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/metrics/__pycache__/converter_error_data_pb2.cpython-310.pyc
ADDED
|
Binary file (2.19 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/metrics/converter_error_data_pb2.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
| 3 |
+
# source: tensorflow/compiler/mlir/lite/metrics/converter_error_data.proto
|
| 4 |
+
"""Generated protocol buffer code."""
|
| 5 |
+
from google.protobuf.internal import builder as _builder
|
| 6 |
+
from google.protobuf import descriptor as _descriptor
|
| 7 |
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
| 8 |
+
from google.protobuf import symbol_database as _symbol_database
|
| 9 |
+
# @@protoc_insertion_point(imports)
|
| 10 |
+
|
| 11 |
+
_sym_db = _symbol_database.Default()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n@tensorflow/compiler/mlir/lite/metrics/converter_error_data.proto\x12\x0etflite.metrics\"\xdc\x06\n\x12\x43onverterErrorData\x12\x11\n\tcomponent\x18\x01 \x01(\t\x12\x14\n\x0csubcomponent\x18\x02 \x01(\t\x12@\n\nerror_code\x18\x03 \x01(\x0e\x32,.tflite.metrics.ConverterErrorData.ErrorCode\x12\x15\n\rerror_message\x18\x04 \x01(\t\x12=\n\x08operator\x18\x05 \x01(\x0b\x32+.tflite.metrics.ConverterErrorData.Operator\x12=\n\x08location\x18\x06 \x01(\x0b\x32+.tflite.metrics.ConverterErrorData.Location\x1a\x18\n\x08Operator\x12\x0c\n\x04name\x18\x01 \x01(\t\x1a\x39\n\x07\x46ileLoc\x12\x10\n\x08\x66ilename\x18\x01 \x01(\t\x12\x0c\n\x04line\x18\x02 \x01(\r\x12\x0e\n\x06\x63olumn\x18\x03 \x01(\r\x1aU\n\tSourceLoc\x12\x0c\n\x04name\x18\x01 \x01(\t\x12:\n\x06source\x18\x02 \x01(\x0b\x32*.tflite.metrics.ConverterErrorData.FileLoc\x1a\x85\x01\n\x08Location\x12=\n\x04type\x18\x01 \x01(\x0e\x32/.tflite.metrics.ConverterErrorData.LocationType\x12:\n\x04\x63\x61ll\x18\x02 \x03(\x0b\x32,.tflite.metrics.ConverterErrorData.SourceLoc\"\xc5\x01\n\tErrorCode\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x18\n\x14\x45RROR_NEEDS_FLEX_OPS\x10\x01\x12\x1a\n\x16\x45RROR_NEEDS_CUSTOM_OPS\x10\x02\x12%\n!ERROR_UNSUPPORTED_CONTROL_FLOW_V1\x10\x03\x12/\n+ERROR_STATEFUL_PARTITIONED_CALL_IN_FINAL_IR\x10\x04\x12\x1d\n\x18\x45RROR_GPU_NOT_COMPATIBLE\x10\xc8\x01\"J\n\x0cLocationType\x12\x0e\n\nUNKNOWNLOC\x10\x00\x12\x0b\n\x07NAMELOC\x10\x01\x12\x0f\n\x0b\x43\x41LLSITELOC\x10\x02\x12\x0c\n\x08\x46USEDLOC\x10\x03')
|
| 17 |
+
|
| 18 |
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
| 19 |
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.compiler.mlir.lite.metrics.converter_error_data_pb2', globals())
|
| 20 |
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
| 21 |
+
|
| 22 |
+
DESCRIPTOR._options = None
|
| 23 |
+
_CONVERTERERRORDATA._serialized_start=85
|
| 24 |
+
_CONVERTERERRORDATA._serialized_end=945
|
| 25 |
+
_CONVERTERERRORDATA_OPERATOR._serialized_start=363
|
| 26 |
+
_CONVERTERERRORDATA_OPERATOR._serialized_end=387
|
| 27 |
+
_CONVERTERERRORDATA_FILELOC._serialized_start=389
|
| 28 |
+
_CONVERTERERRORDATA_FILELOC._serialized_end=446
|
| 29 |
+
_CONVERTERERRORDATA_SOURCELOC._serialized_start=448
|
| 30 |
+
_CONVERTERERRORDATA_SOURCELOC._serialized_end=533
|
| 31 |
+
_CONVERTERERRORDATA_LOCATION._serialized_start=536
|
| 32 |
+
_CONVERTERERRORDATA_LOCATION._serialized_end=669
|
| 33 |
+
_CONVERTERERRORDATA_ERRORCODE._serialized_start=672
|
| 34 |
+
_CONVERTERERRORDATA_ERRORCODE._serialized_end=869
|
| 35 |
+
_CONVERTERERRORDATA_LOCATIONTYPE._serialized_start=871
|
| 36 |
+
_CONVERTERERRORDATA_LOCATIONTYPE._serialized_end=945
|
| 37 |
+
# @@protoc_insertion_point(module_scope)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/model_flags_pb2.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
| 3 |
+
# source: tensorflow/compiler/mlir/lite/model_flags.proto
|
| 4 |
+
"""Generated protocol buffer code."""
|
| 5 |
+
from google.protobuf.internal import builder as _builder
|
| 6 |
+
from google.protobuf import descriptor as _descriptor
|
| 7 |
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
| 8 |
+
from google.protobuf import symbol_database as _symbol_database
|
| 9 |
+
# @@protoc_insertion_point(imports)
|
| 10 |
+
|
| 11 |
+
_sym_db = _symbol_database.Default()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
from tensorflow.compiler.mlir.lite import types_pb2 as tensorflow_dot_compiler_dot_mlir_dot_lite_dot_types__pb2
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/tensorflow/compiler/mlir/lite/model_flags.proto\x12\x06tflite\x1a)tensorflow/compiler/mlir/lite/types.proto\"5\n\x0fInputArrayShape\x12\x0c\n\x04\x64ims\x18\x02 \x03(\x05\x12\x14\n\x0cunknown_rank\x18\x03 \x01(\x08\"\x93\x01\n\nInputArray\x12\x0c\n\x04name\x18\x01 \x01(\t\x12&\n\x05shape\x18\x06 \x01(\x0b\x32\x17.tflite.InputArrayShape\x12\x12\n\nmean_value\x18\x03 \x01(\x02\x12\x14\n\tstd_value\x18\x04 \x01(\x02:\x01\x31\x12%\n\tdata_type\x18\x05 \x01(\x0e\x32\x12.tflite.IODataType\"t\n\x08RnnState\x12\x13\n\x0bstate_array\x18\x01 \x01(\t\x12\x1e\n\x16\x62\x61\x63k_edge_source_array\x18\x02 \x01(\t\x12\x13\n\x0b\x64iscardable\x18\x05 \x01(\x08\x12\x0c\n\x04size\x18\x03 \x01(\x05\x12\x10\n\x08num_dims\x18\x04 \x01(\x05\"\xf5\x01\n\x0f\x41rraysExtraInfo\x12.\n\x07\x65ntries\x18\x01 \x03(\x0b\x32\x1d.tflite.ArraysExtraInfo.Entry\x1a\xb1\x01\n\x05\x45ntry\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0bname_regexp\x18\x07 \x01(\t\x12\x0b\n\x03min\x18\x02 \x01(\x01\x12\x0b\n\x03max\x18\x03 \x01(\x01\x12%\n\tdata_type\x18\x04 \x01(\x0e\x32\x12.tflite.IODataType\x12&\n\x05shape\x18\x05 \x01(\x0b\x32\x17.tflite.InputArrayShape\x12\x1c\n\x14\x63onstant_float_value\x18\x06 \x01(\x02\"\xd0\x05\n\nModelFlags\x12(\n\x0cinput_arrays\x18\x01 \x03(\x0b\x32\x12.tflite.InputArray\x12\x15\n\routput_arrays\x18\x02 \x03(\t\x12\x1d\n\x15\x63ontrol_output_arrays\x18\x18 \x03(\t\x12\x16\n\x0evariable_batch\x18\n \x01(\x08\x12$\n\nrnn_states\x18\x0c \x03(\x0b\x32\x10.tflite.RnnState\x12\x33\n\x0cmodel_checks\x18\x0e \x03(\x0b\x32\x1d.tflite.ModelFlags.ModelCheck\x12 \n\x18\x61llow_nonexistent_arrays\x18\x10 \x01(\x08\x12\x1d\n\x15\x61llow_nonascii_arrays\x18\x11 \x01(\x08\x12\x32\n\x11\x61rrays_extra_info\x18\x12 \x01(\x0b\x32\x17.tflite.ArraysExtraInfo\x12(\n\x1a\x63hange_concat_input_ranges\x18\x13 \x01(\x08:\x04true\x12\x17\n\x0fsaved_model_dir\x18\x14 \x01(\t\x12\x1b\n\x13saved_model_version\x18\x15 \x01(\x05\x12\x18\n\x10saved_model_tags\x18\x16 \x03(\t\x12\"\n\x1asaved_model_exported_names\x18\x17 \x03(\t\x12\x16\n\x0euse_hlo_import\x18\x19 \x01(\x08\x12\x35\n\rhlo_file_type\x18\x1a \x01(\x0e\x32\x1e.tflite.ModelFlags.HloFileType\x1aT\n\nModelCheck\x12\x18\n\ncount_type\x18\x01 \x01(\t:\x04None\x12\x15\n\tcount_min\x18\x02 \x01(\x05:\x02-1\x12\x15\n\tcount_max\x18\x03 \x01(\x05:\x02-1\"7\n\x0bHloFileType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0c\n\x08HLO_TEXT\x10\x01\x12\r\n\tHLO_PROTO\x10\x02')
|
| 18 |
+
|
| 19 |
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
| 20 |
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.compiler.mlir.lite.model_flags_pb2', globals())
|
| 21 |
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
| 22 |
+
|
| 23 |
+
DESCRIPTOR._options = None
|
| 24 |
+
_INPUTARRAYSHAPE._serialized_start=102
|
| 25 |
+
_INPUTARRAYSHAPE._serialized_end=155
|
| 26 |
+
_INPUTARRAY._serialized_start=158
|
| 27 |
+
_INPUTARRAY._serialized_end=305
|
| 28 |
+
_RNNSTATE._serialized_start=307
|
| 29 |
+
_RNNSTATE._serialized_end=423
|
| 30 |
+
_ARRAYSEXTRAINFO._serialized_start=426
|
| 31 |
+
_ARRAYSEXTRAINFO._serialized_end=671
|
| 32 |
+
_ARRAYSEXTRAINFO_ENTRY._serialized_start=494
|
| 33 |
+
_ARRAYSEXTRAINFO_ENTRY._serialized_end=671
|
| 34 |
+
_MODELFLAGS._serialized_start=674
|
| 35 |
+
_MODELFLAGS._serialized_end=1394
|
| 36 |
+
_MODELFLAGS_MODELCHECK._serialized_start=1253
|
| 37 |
+
_MODELFLAGS_MODELCHECK._serialized_end=1337
|
| 38 |
+
_MODELFLAGS_HLOFILETYPE._serialized_start=1339
|
| 39 |
+
_MODELFLAGS_HLOFILETYPE._serialized_end=1394
|
| 40 |
+
# @@protoc_insertion_point(module_scope)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/python/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/python/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (212 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/python/__pycache__/wrap_converter.cpython-310.pyc
ADDED
|
Binary file (2.2 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/python/_pywrap_converter_api.pyi
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
|
| 16 |
+
def Convert(model_flags_proto_txt_raw: object, toco_flags_proto_txt_raw: object, input_contents_txt_raw: object, extended_return: bool = ..., debug_info_txt_raw: object = ..., enable_mlir_converter: bool = ..., quantization_py_function_library = ...) -> object: ...
|
| 17 |
+
def ExperimentalMlirQuantizeModel(input_contents_txt_raw: object, disable_per_channel: bool = ..., fully_quantize: bool = ..., inference_type: int = ..., input_data_type: int = ..., output_data_type: int = ..., enable_numeric_verify: bool = ..., enable_whole_model_verify: bool = ..., op_blocklist: object = ..., node_blocklist: object = ..., enable_variable_quantization: bool = ..., disable_per_channel_for_dense_layers: bool = ..., debug_options_proto_txt_raw: object = ...) -> object: ...
|
| 18 |
+
def ExperimentalMlirSparsifyModel(input_contents_txt_raw: object) -> object: ...
|
| 19 |
+
def FlatBufferToMlir(arg0: str, arg1: bool) -> str: ...
|
| 20 |
+
def RegisterCustomOpdefs(custom_opdefs_txt_raw: object) -> object: ...
|
| 21 |
+
def RetrieveCollectedErrors() -> list: ...
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/python/_pywrap_converter_api.so
ADDED
|
Binary file (228 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/python/wrap_converter.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Wraps TFLite Converter interface with python lazy loader."""
|
| 16 |
+
# We need to import pywrap_tensorflow prior to the converter wrapper.
|
| 17 |
+
# pylint: disable=invalid-import-order,g-bad-import-order
|
| 18 |
+
from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import
|
| 19 |
+
from tensorflow.compiler.mlir.lite.python import _pywrap_converter_api
|
| 20 |
+
from tensorflow.compiler.mlir.quantization.tensorflow.python import py_function_lib
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def wrapped_convert(
|
| 24 |
+
model_flags_str,
|
| 25 |
+
toco_flags_str,
|
| 26 |
+
input_data_str,
|
| 27 |
+
debug_info_str,
|
| 28 |
+
enable_mlir_converter,
|
| 29 |
+
):
|
| 30 |
+
"""Wraps TocoConvert with lazy loader."""
|
| 31 |
+
return _pywrap_converter_api.Convert(
|
| 32 |
+
model_flags_str,
|
| 33 |
+
toco_flags_str,
|
| 34 |
+
input_data_str,
|
| 35 |
+
False, # extended_return
|
| 36 |
+
debug_info_str,
|
| 37 |
+
enable_mlir_converter,
|
| 38 |
+
py_function_lib.PyFunctionLibrary(),
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def wrapped_experimental_mlir_quantize(
|
| 43 |
+
input_data_str,
|
| 44 |
+
disable_per_channel,
|
| 45 |
+
fully_quantize,
|
| 46 |
+
inference_type,
|
| 47 |
+
input_data_type,
|
| 48 |
+
output_data_type,
|
| 49 |
+
enable_numeric_verify,
|
| 50 |
+
enable_whole_model_verify,
|
| 51 |
+
denylisted_ops,
|
| 52 |
+
denylisted_nodes,
|
| 53 |
+
enable_variable_quantization,
|
| 54 |
+
disable_per_channel_for_dense_layers,
|
| 55 |
+
debug_options_str,
|
| 56 |
+
):
|
| 57 |
+
"""Wraps experimental mlir quantize model."""
|
| 58 |
+
return _pywrap_converter_api.ExperimentalMlirQuantizeModel(
|
| 59 |
+
input_data_str,
|
| 60 |
+
disable_per_channel,
|
| 61 |
+
fully_quantize,
|
| 62 |
+
inference_type,
|
| 63 |
+
input_data_type,
|
| 64 |
+
output_data_type,
|
| 65 |
+
enable_numeric_verify,
|
| 66 |
+
enable_whole_model_verify,
|
| 67 |
+
denylisted_ops,
|
| 68 |
+
denylisted_nodes,
|
| 69 |
+
enable_variable_quantization,
|
| 70 |
+
disable_per_channel_for_dense_layers,
|
| 71 |
+
debug_options_str,
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def wrapped_experimental_mlir_sparsify(input_data_str):
|
| 76 |
+
"""Wraps experimental mlir sparsify model."""
|
| 77 |
+
return _pywrap_converter_api.ExperimentalMlirSparsifyModel(input_data_str)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def wrapped_register_custom_opdefs(custom_opdefs_list):
|
| 81 |
+
"""Wraps RegisterCustomOpdefs with lazy loader."""
|
| 82 |
+
return _pywrap_converter_api.RegisterCustomOpdefs(custom_opdefs_list)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def wrapped_retrieve_collected_errors():
|
| 86 |
+
"""Wraps RetrieveCollectedErrors with lazy loader."""
|
| 87 |
+
return _pywrap_converter_api.RetrieveCollectedErrors()
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def wrapped_flat_buffer_file_to_mlir(model, input_is_filepath):
|
| 91 |
+
"""Wraps FlatBufferFileToMlir with lazy loader."""
|
| 92 |
+
return _pywrap_converter_api.FlatBufferToMlir(model, input_is_filepath)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/lite/types_pb2.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
| 3 |
+
# source: tensorflow/compiler/mlir/lite/types.proto
|
| 4 |
+
"""Generated protocol buffer code."""
|
| 5 |
+
from google.protobuf.internal import builder as _builder
|
| 6 |
+
from google.protobuf import descriptor as _descriptor
|
| 7 |
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
| 8 |
+
from google.protobuf import symbol_database as _symbol_database
|
| 9 |
+
# @@protoc_insertion_point(imports)
|
| 10 |
+
|
| 11 |
+
_sym_db = _symbol_database.Default()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n)tensorflow/compiler/mlir/lite/types.proto\x12\x06tflite*\xb3\x02\n\nIODataType\x12\x18\n\x14IO_DATA_TYPE_UNKNOWN\x10\x00\x12\t\n\x05\x46LOAT\x10\x01\x12\x13\n\x0fQUANTIZED_UINT8\x10\x02\x12\t\n\x05INT32\x10\x03\x12\t\n\x05INT64\x10\x04\x12\n\n\x06STRING\x10\x05\x12\x13\n\x0fQUANTIZED_INT16\x10\x06\x12\x08\n\x04\x42OOL\x10\x07\x12\r\n\tCOMPLEX64\x10\x08\x12\x12\n\x0eQUANTIZED_INT8\x10\t\x12\x0b\n\x07\x46LOAT16\x10\n\x12\x0b\n\x07\x46LOAT64\x10\x0b\x12\x0e\n\nCOMPLEX128\x10\x0c\x12\n\n\x06UINT64\x10\r\x12\x0c\n\x08RESOURCE\x10\x0e\x12\x0b\n\x07VARIANT\x10\x0f\x12\n\n\x06UINT32\x10\x10\x12\t\n\x05UINT8\x10\x11\x12\x08\n\x04INT8\x10\x12\x12\t\n\x05INT16\x10\x13\x12\n\n\x06UINT16\x10\x14')
|
| 17 |
+
|
| 18 |
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
| 19 |
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.compiler.mlir.lite.types_pb2', globals())
|
| 20 |
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
| 21 |
+
|
| 22 |
+
DESCRIPTOR._options = None
|
| 23 |
+
_IODATATYPE._serialized_start=54
|
| 24 |
+
_IODATATYPE._serialized_end=361
|
| 25 |
+
# @@protoc_insertion_point(module_scope)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (213 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/stablehlo/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/stablehlo/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (223 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/stablehlo/__pycache__/quantization_config_pb2.cpython-310.pyc
ADDED
|
Binary file (6.18 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/stablehlo/__pycache__/quantization_options_pb2.cpython-310.pyc
ADDED
|
Binary file (2.97 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/stablehlo/quantization_config_pb2.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
| 3 |
+
# source: tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto
|
| 4 |
+
"""Generated protocol buffer code."""
|
| 5 |
+
from google.protobuf.internal import builder as _builder
|
| 6 |
+
from google.protobuf import descriptor as _descriptor
|
| 7 |
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
| 8 |
+
from google.protobuf import symbol_database as _symbol_database
|
| 9 |
+
# @@protoc_insertion_point(imports)
|
| 10 |
+
|
| 11 |
+
_sym_db = _symbol_database.Default()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\nItensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto\x12\x16stablehlo.quantization\"\x1c\n\x0cTfRecordFile\x12\x0c\n\x04path\x18\x01 \x01(\t\"\x8e\x01\n\x1bRepresentativeDatasetConfig\x12\x39\n\ttf_record\x18\x01 \x01(\x0b\x32$.stablehlo.quantization.TfRecordFileH\x00\x12\x1a\n\rsignature_key\x18\x02 \x01(\tH\x01\x88\x01\x01\x42\x06\n\x04\x66ileB\x10\n\x0e_signature_key\"\xc3\x01\n\x14StaticRangePtqPreset\x12T\n\x17representative_datasets\x18\x01 \x03(\x0b\x32\x33.stablehlo.quantization.RepresentativeDatasetConfig\x12/\n#enable_per_channel_quantized_weight\x18\x02 \x01(\x08\x42\x02\x18\x01\x12$\n\x1c\x65nable_full_int_quantization\x18\x03 \x01(\x08\"\x15\n\x13WeightOnlyPtqPreset\"\"\n\x12TfSavedModelConfig\x12\x0c\n\x04tags\x18\x01 \x03(\t\"v\n\x0ePipelineConfig\x12#\n\x16unpack_quantized_types\x18\x01 \x01(\x08H\x00\x88\x01\x01\x12$\n\x1cmerge_fusion_with_dequantize\x18\x02 \x01(\x08\x42\x19\n\x17_unpack_quantized_types\"\x1f\n\x0fQuantizableUnit\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x87\x01\n\x12QuantizationResult\x12\x41\n\x10quantizable_unit\x18\x01 \x01(\x0b\x32\'.stablehlo.quantization.QuantizableUnit\x12.\n\x06method\x18\x02 \x01(\x0b\x32\x1e.stablehlo.quantization.Method\"R\n\x13QuantizationResults\x12;\n\x07results\x18\x01 \x03(\x0b\x32*.stablehlo.quantization.QuantizationResult\":\n\x12QuantizedDimension\x12\x16\n\tdimension\x18\x01 \x01(\x05H\x00\x88\x01\x01\x42\x0c\n\n_dimension\"\x0b\n\tPerTensor\"\x97\x01\n\rQuantizedType\x12\x45\n\x0f\x64imension_specs\x18\x01 \x01(\x0b\x32*.stablehlo.quantization.QuantizedDimensionH\x00\x12\x37\n\nper_tensor\x18\x02 \x01(\x0b\x32!.stablehlo.quantization.PerTensorH\x00\x42\x06\n\x04type\"\x10\n\x0eNoQuantization\"\xd3\x01\n\x0eStaticRangePtq\x12^\n\x15input_quantized_types\x18\x01 \x03(\x0b\x32?.stablehlo.quantization.StaticRangePtq.InputQuantizedTypesEntry\x1a\x61\n\x18InputQuantizedTypesEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\x34\n\x05value\x18\x02 \x01(\x0b\x32%.stablehlo.quantization.QuantizedType:\x02\x38\x01\"\xd1\x01\n\rWeightOnlyPtq\x12]\n\x15input_quantized_types\x18\x01 \x03(\x0b\x32>.stablehlo.quantization.WeightOnlyPtq.InputQuantizedTypesEntry\x1a\x61\n\x18InputQuantizedTypesEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\x34\n\x05value\x18\x02 \x01(\x0b\x32%.stablehlo.quantization.QuantizedType:\x02\x38\x01\"(\n\x17\x46unctionNameMatcherSpec\x12\r\n\x05regex\x18\x01 \x01(\t\"U\n\x0bMatcherSpec\x12\x46\n\rfunction_name\x18\x01 \x01(\x0b\x32/.stablehlo.quantization.FunctionNameMatcherSpec\"\xdb\x01\n\x06Method\x12\x41\n\x0fno_quantization\x18\x01 \x01(\x0b\x32&.stablehlo.quantization.NoQuantizationH\x00\x12\x42\n\x10static_range_ptq\x18\x02 \x01(\x0b\x32&.stablehlo.quantization.StaticRangePtqH\x00\x12@\n\x0fweight_only_ptq\x18\x03 \x01(\x0b\x32%.stablehlo.quantization.WeightOnlyPtqH\x00\x42\x08\n\x06method\"x\n\x10QuantizationSpec\x12\x34\n\x07matcher\x18\x01 \x01(\x0b\x32#.stablehlo.quantization.MatcherSpec\x12.\n\x06method\x18\x02 \x01(\x0b\x32\x1e.stablehlo.quantization.Method\"L\n\x11QuantizationSpecs\x12\x37\n\x05specs\x18\x01 \x03(\x0b\x32(.stablehlo.quantization.QuantizationSpec\"\xaa\x02\n\x0e\x44\x65\x62uggerConfig\x12J\n\rdebugger_type\x18\x01 \x01(\x0e\x32\x33.stablehlo.quantization.DebuggerConfig.DebuggerType\x12#\n\x1bunquantized_dump_model_path\x18\x02 \x01(\t\x12\x14\n\x0clog_dir_path\x18\x03 \x01(\t\"\x90\x01\n\x0c\x44\x65\x62uggerType\x12\x1d\n\x19\x44\x45\x42UGGER_TYPE_UNSPECIFIED\x10\x00\x12\x1d\n\x19\x44\x45\x42UGGER_TYPE_WHOLE_MODEL\x10\x01\x12\x1f\n\x1b\x44\x45\x42UGGER_TYPE_INT_PER_LAYER\x10\x02\x12!\n\x1d\x44\x45\x42UGGER_TYPE_FLOAT_PER_LAYER\x10\x03\"\x8e\x06\n\x12\x43\x61librationOptions\x12X\n\x12\x63\x61libration_method\x18\x01 \x01(\x0e\x32<.stablehlo.quantization.CalibrationOptions.CalibrationMethod\x12`\n\x16\x63\x61libration_parameters\x18\x02 \x01(\x0b\x32@.stablehlo.quantization.CalibrationOptions.CalibrationParameters\x12T\n\x17representative_datasets\x18\x03 \x03(\x0b\x32\x33.stablehlo.quantization.RepresentativeDatasetConfig\x12\x1c\n\x14\x63\x61libration_data_dir\x18\x04 \x01(\t\x12)\n!force_regenerate_calibration_data\x18\x05 \x01(\x08\x1aY\n\x15\x43\x61librationParameters\x12\x10\n\x08num_bins\x18\x01 \x01(\x05\x12\x16\n\x0emin_percentile\x18\x02 \x01(\x02\x12\x16\n\x0emax_percentile\x18\x03 \x01(\x02\"\xc1\x02\n\x11\x43\x61librationMethod\x12\"\n\x1e\x43\x41LIBRATION_METHOD_UNSPECIFIED\x10\x00\x12\x1e\n\x1a\x43\x41LIBRATION_METHOD_MIN_MAX\x10\x01\x12&\n\"CALIBRATION_METHOD_AVERAGE_MIN_MAX\x10\x02\x12+\n\'CALIBRATION_METHOD_HISTOGRAM_PERCENTILE\x10\x03\x12/\n+CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE\x10\x04\x12\x32\n.CALIBRATION_METHOD_HISTOGRAM_MSE_MAX_FREQUENCY\x10\x05\x12.\n*CALIBRATION_METHOD_HISTOGRAM_MSE_SYMMETRIC\x10\x06\"\xbb\x04\n\x12QuantizationConfig\x12O\n\x17static_range_ptq_preset\x18\x01 \x01(\x0b\x32,.stablehlo.quantization.StaticRangePtqPresetH\x00\x12M\n\x16weight_only_ptq_preset\x18\x07 \x01(\x0b\x32+.stablehlo.quantization.WeightOnlyPtqPresetH\x00\x12\x42\n\x0etf_saved_model\x18\x02 \x01(\x0b\x32*.stablehlo.quantization.TfSavedModelConfig\x12?\n\x0fpipeline_config\x18\x03 \x01(\x0b\x32&.stablehlo.quantization.PipelineConfig\x12\x38\n\x05specs\x18\x04 \x01(\x0b\x32).stablehlo.quantization.QuantizationSpecs\x12?\n\x0f\x64\x65\x62ugger_config\x18\x05 \x01(\x0b\x32&.stablehlo.quantization.DebuggerConfig\x12G\n\x13\x63\x61libration_options\x18\x06 \x01(\x0b\x32*.stablehlo.quantization.CalibrationOptions\x12\x1d\n\x10report_file_path\x18\x08 \x01(\tH\x01\x88\x01\x01\x42\x08\n\x06presetB\x13\n\x11_report_file_pathB\x03\xf8\x01\x01\x62\x06proto3')
|
| 17 |
+
|
| 18 |
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
| 19 |
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.compiler.mlir.quantization.stablehlo.quantization_config_pb2', globals())
|
| 20 |
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
| 21 |
+
|
| 22 |
+
DESCRIPTOR._options = None
|
| 23 |
+
DESCRIPTOR._serialized_options = b'\370\001\001'
|
| 24 |
+
_STATICRANGEPTQPRESET.fields_by_name['enable_per_channel_quantized_weight']._options = None
|
| 25 |
+
_STATICRANGEPTQPRESET.fields_by_name['enable_per_channel_quantized_weight']._serialized_options = b'\030\001'
|
| 26 |
+
_STATICRANGEPTQ_INPUTQUANTIZEDTYPESENTRY._options = None
|
| 27 |
+
_STATICRANGEPTQ_INPUTQUANTIZEDTYPESENTRY._serialized_options = b'8\001'
|
| 28 |
+
_WEIGHTONLYPTQ_INPUTQUANTIZEDTYPESENTRY._options = None
|
| 29 |
+
_WEIGHTONLYPTQ_INPUTQUANTIZEDTYPESENTRY._serialized_options = b'8\001'
|
| 30 |
+
_TFRECORDFILE._serialized_start=101
|
| 31 |
+
_TFRECORDFILE._serialized_end=129
|
| 32 |
+
_REPRESENTATIVEDATASETCONFIG._serialized_start=132
|
| 33 |
+
_REPRESENTATIVEDATASETCONFIG._serialized_end=274
|
| 34 |
+
_STATICRANGEPTQPRESET._serialized_start=277
|
| 35 |
+
_STATICRANGEPTQPRESET._serialized_end=472
|
| 36 |
+
_WEIGHTONLYPTQPRESET._serialized_start=474
|
| 37 |
+
_WEIGHTONLYPTQPRESET._serialized_end=495
|
| 38 |
+
_TFSAVEDMODELCONFIG._serialized_start=497
|
| 39 |
+
_TFSAVEDMODELCONFIG._serialized_end=531
|
| 40 |
+
_PIPELINECONFIG._serialized_start=533
|
| 41 |
+
_PIPELINECONFIG._serialized_end=651
|
| 42 |
+
_QUANTIZABLEUNIT._serialized_start=653
|
| 43 |
+
_QUANTIZABLEUNIT._serialized_end=684
|
| 44 |
+
_QUANTIZATIONRESULT._serialized_start=687
|
| 45 |
+
_QUANTIZATIONRESULT._serialized_end=822
|
| 46 |
+
_QUANTIZATIONRESULTS._serialized_start=824
|
| 47 |
+
_QUANTIZATIONRESULTS._serialized_end=906
|
| 48 |
+
_QUANTIZEDDIMENSION._serialized_start=908
|
| 49 |
+
_QUANTIZEDDIMENSION._serialized_end=966
|
| 50 |
+
_PERTENSOR._serialized_start=968
|
| 51 |
+
_PERTENSOR._serialized_end=979
|
| 52 |
+
_QUANTIZEDTYPE._serialized_start=982
|
| 53 |
+
_QUANTIZEDTYPE._serialized_end=1133
|
| 54 |
+
_NOQUANTIZATION._serialized_start=1135
|
| 55 |
+
_NOQUANTIZATION._serialized_end=1151
|
| 56 |
+
_STATICRANGEPTQ._serialized_start=1154
|
| 57 |
+
_STATICRANGEPTQ._serialized_end=1365
|
| 58 |
+
_STATICRANGEPTQ_INPUTQUANTIZEDTYPESENTRY._serialized_start=1268
|
| 59 |
+
_STATICRANGEPTQ_INPUTQUANTIZEDTYPESENTRY._serialized_end=1365
|
| 60 |
+
_WEIGHTONLYPTQ._serialized_start=1368
|
| 61 |
+
_WEIGHTONLYPTQ._serialized_end=1577
|
| 62 |
+
_WEIGHTONLYPTQ_INPUTQUANTIZEDTYPESENTRY._serialized_start=1268
|
| 63 |
+
_WEIGHTONLYPTQ_INPUTQUANTIZEDTYPESENTRY._serialized_end=1365
|
| 64 |
+
_FUNCTIONNAMEMATCHERSPEC._serialized_start=1579
|
| 65 |
+
_FUNCTIONNAMEMATCHERSPEC._serialized_end=1619
|
| 66 |
+
_MATCHERSPEC._serialized_start=1621
|
| 67 |
+
_MATCHERSPEC._serialized_end=1706
|
| 68 |
+
_METHOD._serialized_start=1709
|
| 69 |
+
_METHOD._serialized_end=1928
|
| 70 |
+
_QUANTIZATIONSPEC._serialized_start=1930
|
| 71 |
+
_QUANTIZATIONSPEC._serialized_end=2050
|
| 72 |
+
_QUANTIZATIONSPECS._serialized_start=2052
|
| 73 |
+
_QUANTIZATIONSPECS._serialized_end=2128
|
| 74 |
+
_DEBUGGERCONFIG._serialized_start=2131
|
| 75 |
+
_DEBUGGERCONFIG._serialized_end=2429
|
| 76 |
+
_DEBUGGERCONFIG_DEBUGGERTYPE._serialized_start=2285
|
| 77 |
+
_DEBUGGERCONFIG_DEBUGGERTYPE._serialized_end=2429
|
| 78 |
+
_CALIBRATIONOPTIONS._serialized_start=2432
|
| 79 |
+
_CALIBRATIONOPTIONS._serialized_end=3214
|
| 80 |
+
_CALIBRATIONOPTIONS_CALIBRATIONPARAMETERS._serialized_start=2801
|
| 81 |
+
_CALIBRATIONOPTIONS_CALIBRATIONPARAMETERS._serialized_end=2890
|
| 82 |
+
_CALIBRATIONOPTIONS_CALIBRATIONMETHOD._serialized_start=2893
|
| 83 |
+
_CALIBRATIONOPTIONS_CALIBRATIONMETHOD._serialized_end=3214
|
| 84 |
+
_QUANTIZATIONCONFIG._serialized_start=3217
|
| 85 |
+
_QUANTIZATIONCONFIG._serialized_end=3788
|
| 86 |
+
# @@protoc_insertion_point(module_scope)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/stablehlo/quantization_options_pb2.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
| 3 |
+
# source: tensorflow/compiler/mlir/quantization/stablehlo/quantization_options.proto
|
| 4 |
+
"""Generated protocol buffer code."""
|
| 5 |
+
from google.protobuf.internal import builder as _builder
|
| 6 |
+
from google.protobuf import descriptor as _descriptor
|
| 7 |
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
| 8 |
+
from google.protobuf import symbol_database as _symbol_database
|
| 9 |
+
# @@protoc_insertion_point(imports)
|
| 10 |
+
|
| 11 |
+
_sym_db = _symbol_database.Default()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\nJtensorflow/compiler/mlir/quantization/stablehlo/quantization_options.proto\x12\x16stablehlo.quantization\"^\n\x13QuantizationOptions\x12G\n\x13quantization_method\x18\x01 \x01(\x0b\x32*.stablehlo.quantization.QuantizationMethod\"\xdb\x01\n\x12QuantizationMethod\x12V\n\x1apreset_quantization_method\x18\x01 \x01(\x0b\x32\x30.stablehlo.quantization.PresetQuantizationMethodH\x00\x12V\n\x1a\x63ustom_quantization_method\x18\x02 \x01(\x0b\x32\x30.stablehlo.quantization.CustomQuantizationMethodH\x00\x42\x15\n\x13quantization_method\"\x92\x02\n\x18PresetQuantizationMethod\x12T\n\rpreset_method\x18\x01 \x01(\x0e\x32=.stablehlo.quantization.PresetQuantizationMethod.PresetMethod\"\x9f\x01\n\x0cPresetMethod\x12\x16\n\x12METHOD_UNSPECIFIED\x10\x00\x12\x0f\n\x0bWEIGHT_ONLY\x10\x01\x12,\n(POST_TRAINING_QUANTIZATION_DYNAMIC_RANGE\x10\x02\x12\x0b\n\x07\x46LOAT16\x10\x03\x12+\n\'POST_TRAINING_QUANTIZATION_STATIC_RANGE\x10\x04\"r\n\x18\x43ustomQuantizationMethod\x12V\n\x1bquantization_component_spec\x18\x01 \x03(\x0b\x32\x31.stablehlo.quantization.QuantizationComponentSpec\"\xc5\x05\n\x19QuantizationComponentSpec\x12g\n\x16quantization_component\x18\x01 \x01(\x0e\x32G.stablehlo.quantization.QuantizationComponentSpec.QuantizationComponent\x12M\n\tbit_width\x18\x02 \x01(\x0e\x32:.stablehlo.quantization.QuantizationComponentSpec.BitWidth\x12K\n\x08\x62it_type\x18\x03 \x01(\x0e\x32\x39.stablehlo.quantization.QuantizationComponentSpec.BitType\x12\x1b\n\x13\x65nable_narrow_range\x18\x04 \x01(\x08\x12\'\n\x1f\x65nable_per_channel_quantization\x18\x05 \x01(\x08\x12\x18\n\x10\x65nable_symmetric\x18\x06 \x01(\x08\"v\n\x15QuantizationComponent\x12\x19\n\x15\x43OMPONENT_UNSPECIFIED\x10\x00\x12\x18\n\x14\x43OMPONENT_ACTIVATION\x10\x01\x12\x14\n\x10\x43OMPONENT_WEIGHT\x10\x02\x12\x12\n\x0e\x43OMPONENT_BIAS\x10\x03\"k\n\x08\x42itWidth\x12\x19\n\x15\x42IT_WIDTH_UNSPECIFIED\x10\x00\x12\x0f\n\x0b\x42IT_WIDTH_4\x10\x01\x12\x0f\n\x0b\x42IT_WIDTH_8\x10\x02\x12\x10\n\x0c\x42IT_WIDTH_16\x10\x03\x12\x10\n\x0c\x42IT_WIDTH_32\x10\x04\"^\n\x07\x42itType\x12\x18\n\x14\x42IT_TYPE_UNSPECIFIED\x10\x00\x12\x10\n\x0c\x42IT_TYPE_INT\x10\x01\x12\x12\n\x0e\x42IT_TYPE_FLOAT\x10\x02\x12\x13\n\x0f\x42IT_TYPE_BFLOAT\x10\x03\x42\x03\xf8\x01\x01\x62\x06proto3')
|
| 17 |
+
|
| 18 |
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
| 19 |
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.compiler.mlir.quantization.stablehlo.quantization_options_pb2', globals())
|
| 20 |
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
| 21 |
+
|
| 22 |
+
DESCRIPTOR._options = None
|
| 23 |
+
DESCRIPTOR._serialized_options = b'\370\001\001'
|
| 24 |
+
_QUANTIZATIONOPTIONS._serialized_start=102
|
| 25 |
+
_QUANTIZATIONOPTIONS._serialized_end=196
|
| 26 |
+
_QUANTIZATIONMETHOD._serialized_start=199
|
| 27 |
+
_QUANTIZATIONMETHOD._serialized_end=418
|
| 28 |
+
_PRESETQUANTIZATIONMETHOD._serialized_start=421
|
| 29 |
+
_PRESETQUANTIZATIONMETHOD._serialized_end=695
|
| 30 |
+
_PRESETQUANTIZATIONMETHOD_PRESETMETHOD._serialized_start=536
|
| 31 |
+
_PRESETQUANTIZATIONMETHOD_PRESETMETHOD._serialized_end=695
|
| 32 |
+
_CUSTOMQUANTIZATIONMETHOD._serialized_start=697
|
| 33 |
+
_CUSTOMQUANTIZATIONMETHOD._serialized_end=811
|
| 34 |
+
_QUANTIZATIONCOMPONENTSPEC._serialized_start=814
|
| 35 |
+
_QUANTIZATIONCOMPONENTSPEC._serialized_end=1523
|
| 36 |
+
_QUANTIZATIONCOMPONENTSPEC_QUANTIZATIONCOMPONENT._serialized_start=1200
|
| 37 |
+
_QUANTIZATIONCOMPONENTSPEC_QUANTIZATIONCOMPONENT._serialized_end=1318
|
| 38 |
+
_QUANTIZATIONCOMPONENTSPEC_BITWIDTH._serialized_start=1320
|
| 39 |
+
_QUANTIZATIONCOMPONENTSPEC_BITWIDTH._serialized_end=1427
|
| 40 |
+
_QUANTIZATIONCOMPONENTSPEC_BITTYPE._serialized_start=1429
|
| 41 |
+
_QUANTIZATIONCOMPONENTSPEC_BITTYPE._serialized_end=1523
|
| 42 |
+
# @@protoc_insertion_point(module_scope)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (224 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/__pycache__/exported_model_pb2.cpython-310.pyc
ADDED
|
Binary file (2.02 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/__pycache__/quantization_options_pb2.cpython-310.pyc
ADDED
|
Binary file (4.68 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (235 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/__pycache__/calibration_algorithm.cpython-310.pyc
ADDED
|
Binary file (13.2 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/__pycache__/calibration_statistics_pb2.cpython-310.pyc
ADDED
|
Binary file (2.22 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_algorithm.py
ADDED
|
@@ -0,0 +1,395 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Defines CalibrationAlgorithm for calculating min and max values calculated by calibration method."""
|
| 16 |
+
import abc
|
| 17 |
+
import itertools
|
| 18 |
+
import logging
|
| 19 |
+
|
| 20 |
+
import numpy as np
|
| 21 |
+
|
| 22 |
+
from tensorflow.compiler.mlir.quantization.stablehlo import quantization_config_pb2 as stablehlo_quant_config_pb2
|
| 23 |
+
from tensorflow.compiler.mlir.quantization.tensorflow.calibrator import calibration_statistics_pb2 as calib_stats_pb2
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
_CalibrationMethod = (
|
| 27 |
+
stablehlo_quant_config_pb2.CalibrationOptions.CalibrationMethod
|
| 28 |
+
)
|
| 29 |
+
_REGISTRY = {}
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def _implements(calib_method: _CalibrationMethod):
|
| 33 |
+
def decorator(cls):
|
| 34 |
+
assert calib_method not in _REGISTRY
|
| 35 |
+
_REGISTRY[calib_method] = cls
|
| 36 |
+
return cls
|
| 37 |
+
|
| 38 |
+
return decorator
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class _CalibrationAlgorithmBase(abc.ABC):
|
| 42 |
+
"""Abstract base class for calibration algorithm."""
|
| 43 |
+
|
| 44 |
+
def __init__(
|
| 45 |
+
self,
|
| 46 |
+
statistics: calib_stats_pb2.CalibrationStatistics,
|
| 47 |
+
calib_opts: stablehlo_quant_config_pb2.CalibrationOptions,
|
| 48 |
+
):
|
| 49 |
+
self._statistics = statistics
|
| 50 |
+
self._calib_opts = calib_opts
|
| 51 |
+
|
| 52 |
+
@abc.abstractmethod
|
| 53 |
+
def get_min_max_value(self) -> tuple[float, float]:
|
| 54 |
+
pass
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class _HistogramCalibrationAlgorithmBase(_CalibrationAlgorithmBase):
|
| 58 |
+
"""Base class for histogram calibrators."""
|
| 59 |
+
|
| 60 |
+
def __init__(
|
| 61 |
+
self,
|
| 62 |
+
statistics: calib_stats_pb2.CalibrationStatistics,
|
| 63 |
+
calib_opts: stablehlo_quant_config_pb2.CalibrationOptions,
|
| 64 |
+
):
|
| 65 |
+
"""Builds histogram using statistics.histogram_statistics.
|
| 66 |
+
|
| 67 |
+
lower_bound hist_mid
|
| 68 |
+
v v
|
| 69 |
+
|=========|=========|=========|=========|=========|
|
| 70 |
+
bin width
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
statistics: Collected calibration statistics.
|
| 74 |
+
calib_opts: Calibration options used for calculating min and max.
|
| 75 |
+
"""
|
| 76 |
+
super().__init__(statistics, calib_opts)
|
| 77 |
+
hist_stats = statistics.histogram_statistics
|
| 78 |
+
self._bin_width = hist_stats.bin_width
|
| 79 |
+
self._lower_bound = hist_stats.lower_bound
|
| 80 |
+
self._hist_freq = np.array(hist_stats.hist_freq)
|
| 81 |
+
self._num_bins = len(self._hist_freq)
|
| 82 |
+
self._num_bits = 8
|
| 83 |
+
# i-th bin has a range [bins[i], bins[i + 1]).
|
| 84 |
+
# bins[i] = lower_bound + i * bin_width
|
| 85 |
+
# bins[i + 1] = lower_bound + (i + 1) * bin_width
|
| 86 |
+
# So hist_mids[i] = (lower_bound + bin_width / 2) + bin_width * i
|
| 87 |
+
first_mid = self._lower_bound + self._bin_width / 2
|
| 88 |
+
last_mid = first_mid + (self._num_bins - 1) * self._bin_width
|
| 89 |
+
self._hist_mids = np.linspace(first_mid, last_mid, self._num_bins)
|
| 90 |
+
|
| 91 |
+
def _get_dequantized_hist_mids_after_quantize(
|
| 92 |
+
self, quant_min: float, quant_max: float
|
| 93 |
+
) -> np.ndarray:
|
| 94 |
+
"""Quantizes and dequantizes hist_mids using quant_min and quant_max.
|
| 95 |
+
|
| 96 |
+
Quantization converts the range of numbers from [quant_min, quant_max] to
|
| 97 |
+
[0, 2^num_bits - 1]. Values less than quant_min are converted to 0, and
|
| 98 |
+
values greater than quant_max are converted to 2^num_bits - 1.
|
| 99 |
+
|
| 100 |
+
The histogram represents the distribution of the data, and our goal is to
|
| 101 |
+
find the quant_min and quant_max that best describe this distribution. To do
|
| 102 |
+
this, we quantize hist_mids using quant_min and quant_max and dequantize
|
| 103 |
+
them again. Then the difference between hist_mids and dequantized hist_mids
|
| 104 |
+
equates to quantization error when using quant_min and quant_max.
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
Args:
|
| 108 |
+
quant_min: The minimum real value that can be represented by a quantized
|
| 109 |
+
value.
|
| 110 |
+
quant_max: The maximum real value that can be represented by a quantized
|
| 111 |
+
value.
|
| 112 |
+
|
| 113 |
+
Returns:
|
| 114 |
+
dequantized hist_mids after quantizing by quant_min and quant_max
|
| 115 |
+
"""
|
| 116 |
+
maxbound = 2**self._num_bits - 1
|
| 117 |
+
minbound = 0
|
| 118 |
+
scale = (quant_max - quant_min) / maxbound
|
| 119 |
+
zero_point = -quant_min / scale
|
| 120 |
+
|
| 121 |
+
# Limit the range of zero_point and scale in case (quant_max - quant_min)
|
| 122 |
+
# is unusually small.
|
| 123 |
+
if abs(zero_point) > 9e9:
|
| 124 |
+
zero_point = 9e9
|
| 125 |
+
if abs(scale) < 1e-9:
|
| 126 |
+
scale = 1e-9
|
| 127 |
+
|
| 128 |
+
zero_point = round(zero_point)
|
| 129 |
+
quantized_hist_mids = np.clip(
|
| 130 |
+
np.round(self._hist_mids / scale) + zero_point, minbound, maxbound
|
| 131 |
+
)
|
| 132 |
+
dequantized_hist_mids = scale * (quantized_hist_mids - zero_point)
|
| 133 |
+
return dequantized_hist_mids
|
| 134 |
+
|
| 135 |
+
def _get_weighted_mean_squared_error(
|
| 136 |
+
self, quant_min, quant_max
|
| 137 |
+
) -> tuple[float, float, float]:
|
| 138 |
+
"""Gets mean squared error between hist_mids and dequantized hist_mids.
|
| 139 |
+
|
| 140 |
+
Quantization converts the range of numbers from [quant_min, quant_max] to
|
| 141 |
+
[0, 2^num_bits - 1]. Values less than quant_min are converted to 0, and
|
| 142 |
+
values greater than quant_max are converted to 2^num_bits - 1.
|
| 143 |
+
|
| 144 |
+
Args:
|
| 145 |
+
quant_min: The minimum real value that can be represented by a quantized
|
| 146 |
+
value.
|
| 147 |
+
quant_max: The maximum real value that can be represented by a quantized
|
| 148 |
+
value.
|
| 149 |
+
|
| 150 |
+
Returns:
|
| 151 |
+
(error, quant_min, quant_max): Tuple of weighted mean squared error.
|
| 152 |
+
error = (hist_mids - dequantized_hist_mids)**2 * hist_freq
|
| 153 |
+
"""
|
| 154 |
+
dequantized_hist_mids = self._get_dequantized_hist_mids_after_quantize(
|
| 155 |
+
quant_min, quant_max
|
| 156 |
+
)
|
| 157 |
+
squared_error = (self._hist_mids - dequantized_hist_mids) ** 2
|
| 158 |
+
weighted_error = np.sum(squared_error * self._hist_freq)
|
| 159 |
+
return (weighted_error, quant_min, quant_max)
|
| 160 |
+
|
| 161 |
+
def _get_min_max_value_by_expanding_range(
|
| 162 |
+
self, start_idx: int
|
| 163 |
+
) -> tuple[float, float]:
|
| 164 |
+
"""Starting from start_idx, expand left and right alternately to find the min value of mse loss.
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
start_idx: Index to start quantization.
|
| 168 |
+
|
| 169 |
+
Returns:
|
| 170 |
+
(min_value, max_value): Min and max calculated.
|
| 171 |
+
"""
|
| 172 |
+
# Tuple of (mse_error, quant_min, quant_max).
|
| 173 |
+
mse_min = (float('inf'), float('inf'), float('inf'))
|
| 174 |
+
left, right = start_idx, start_idx
|
| 175 |
+
|
| 176 |
+
# If this value is true, it moves left, otherwise it moves right.
|
| 177 |
+
move_left = True
|
| 178 |
+
while not (left == 0 and right == self._num_bins - 1):
|
| 179 |
+
# Decrease left if right can't be moved or move_left is true.
|
| 180 |
+
if (move_left and left > 0) or (right == self._num_bins - 1):
|
| 181 |
+
left = max(left - 1, 0)
|
| 182 |
+
# Else increase right.
|
| 183 |
+
else:
|
| 184 |
+
right = min(right + 1, self._num_bins - 1)
|
| 185 |
+
# Toogle the move_left.
|
| 186 |
+
move_left = not move_left
|
| 187 |
+
quant_min, quant_max = self._hist_mids[left], self._hist_mids[right]
|
| 188 |
+
mse_tuple = self._get_weighted_mean_squared_error(quant_min, quant_max)
|
| 189 |
+
mse_min = min(mse_tuple, mse_min)
|
| 190 |
+
# Extract (quant_min, quant_max) from (mse_error, quant_min, quant_max).
|
| 191 |
+
min_value, max_value = mse_min[1], mse_min[2]
|
| 192 |
+
return min_value, max_value
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
@_implements(_CalibrationMethod.CALIBRATION_METHOD_MIN_MAX)
|
| 196 |
+
class _MinMax(_CalibrationAlgorithmBase):
|
| 197 |
+
"""MinMaxCalibrationAlgorithm for calculating min and max values of calibration result.
|
| 198 |
+
|
| 199 |
+
MinMax calibration calculates the global min and global max values.
|
| 200 |
+
|
| 201 |
+
global min = min of given sample inputs
|
| 202 |
+
global max = max of given sample inputs
|
| 203 |
+
"""
|
| 204 |
+
|
| 205 |
+
def get_min_max_value(self) -> tuple[float, float]:
|
| 206 |
+
"""Calculates the global min and max values.
|
| 207 |
+
|
| 208 |
+
Returns:
|
| 209 |
+
(min_value, max_value): Min and max calculated using MinMax
|
| 210 |
+
"""
|
| 211 |
+
return (
|
| 212 |
+
self._statistics.min_max_statistics.global_min,
|
| 213 |
+
self._statistics.min_max_statistics.global_max,
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
@_implements(_CalibrationMethod.CALIBRATION_METHOD_AVERAGE_MIN_MAX)
|
| 218 |
+
class _AverageMinMax(_CalibrationAlgorithmBase):
|
| 219 |
+
"""AverageMinMaxCalibrationAlgorithm for calculating min and max values of calibration result.
|
| 220 |
+
|
| 221 |
+
AverageMinMax calibration calculates the average of min and max values.
|
| 222 |
+
average of min = sum of min values / number of samples
|
| 223 |
+
average of max = sum of max values / number of samples
|
| 224 |
+
"""
|
| 225 |
+
|
| 226 |
+
def get_min_max_value(self) -> tuple[float, float]:
|
| 227 |
+
"""Calculates the average of min and max values.
|
| 228 |
+
|
| 229 |
+
Returns:
|
| 230 |
+
(min_value, max_value): Min and max calculated using AverageMinMax
|
| 231 |
+
|
| 232 |
+
Raises:
|
| 233 |
+
ValueError: num_samples is 0.
|
| 234 |
+
"""
|
| 235 |
+
average_min_max_statistics = self._statistics.average_min_max_statistics
|
| 236 |
+
# num_samples is guaranteed to be larger than 0 because
|
| 237 |
+
# get_statistics_from_calibrator throws an exception if num_samples == 0.
|
| 238 |
+
num_samples = average_min_max_statistics.num_samples
|
| 239 |
+
if num_samples == 0:
|
| 240 |
+
raise ValueError(
|
| 241 |
+
'num_samples must not be 0 when calibration method is'
|
| 242 |
+
f' AverageMinMax: {self._calib_opts}'
|
| 243 |
+
)
|
| 244 |
+
min_value, max_value = (
|
| 245 |
+
average_min_max_statistics.min_sum / num_samples,
|
| 246 |
+
average_min_max_statistics.max_sum / num_samples,
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
return min_value, max_value
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
@_implements(_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_PERCENTILE)
|
| 253 |
+
class _HistogramPercentile(_HistogramCalibrationAlgorithmBase):
|
| 254 |
+
"""HistogramPercentile for calculating min and max values of calibration result."""
|
| 255 |
+
|
| 256 |
+
def get_min_max_value(self) -> tuple[float, float]:
|
| 257 |
+
"""Calculates min and max from statistics using calibration options.
|
| 258 |
+
|
| 259 |
+
A "percentile" is a statistical concept that represents the value below
|
| 260 |
+
which a given percentage of data falls in a dataset. It involves sorting the
|
| 261 |
+
data from smallest to largest and then finding the value at a specified
|
| 262 |
+
percentage position. For example, the 0.01 percentile represents the value
|
| 263 |
+
in a given data set that corresponds to the lowest 0.01% of the data.
|
| 264 |
+
|
| 265 |
+
HistogramPercentile calibration uses min_percentile and max_percentile to
|
| 266 |
+
find min and max.
|
| 267 |
+
|
| 268 |
+
min_percentile and max_percentile must be in range [0, 100].
|
| 269 |
+
min_percentile is 0.001 by default.
|
| 270 |
+
max_percentile is 99.999 by default.
|
| 271 |
+
|
| 272 |
+
Returns:
|
| 273 |
+
(min_value, max_value): Min and max calculated using HistogramPercentile
|
| 274 |
+
"""
|
| 275 |
+
total_freq = sum(self._hist_freq)
|
| 276 |
+
# hist_freq_cumsum is dividing cumulative sum of hist_freq by total_freq
|
| 277 |
+
# hist_freq_cumsum's value is in range [0, 1] by its definition
|
| 278 |
+
hist_freq_cumsum = np.cumsum(self._hist_freq) / total_freq
|
| 279 |
+
|
| 280 |
+
# min_percentile and max_percentile are converted from [0, 100] to [0, 1].
|
| 281 |
+
min_quantile, max_quantile = (
|
| 282 |
+
self._calib_opts.calibration_parameters.min_percentile / 100.0,
|
| 283 |
+
self._calib_opts.calibration_parameters.max_percentile / 100.0,
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
# Get index of min/max quantile.
|
| 287 |
+
min_quantile_idx, max_quantile_idx = (
|
| 288 |
+
np.searchsorted(hist_freq_cumsum, min_quantile, side='right'),
|
| 289 |
+
np.searchsorted(hist_freq_cumsum, max_quantile, side='left'),
|
| 290 |
+
)
|
| 291 |
+
|
| 292 |
+
# Get value of min/max quantile index.
|
| 293 |
+
min_value, max_value = (
|
| 294 |
+
self._hist_mids[min_quantile_idx],
|
| 295 |
+
self._hist_mids[max_quantile_idx],
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
return min_value, max_value
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
@_implements(_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE)
|
| 302 |
+
class _HistogramMseBruteforce(_HistogramCalibrationAlgorithmBase):
|
| 303 |
+
"""HistogramMseBruteforce for calculating min and max values of calibration result."""
|
| 304 |
+
|
| 305 |
+
def get_min_max_value(self) -> tuple[float, float]:
|
| 306 |
+
"""Finds the optimal quant_min and quant_max by testing all possible cases.
|
| 307 |
+
|
| 308 |
+
It guarantees optimal quant_min and quant_max for the representative
|
| 309 |
+
dataset, but not for the test dataset.
|
| 310 |
+
|
| 311 |
+
Returns:
|
| 312 |
+
(min_value, max_value): Min and max calculated using
|
| 313 |
+
HistogramMseBruteforce.
|
| 314 |
+
"""
|
| 315 |
+
if self._num_bins > 512:
|
| 316 |
+
logging.warning(
|
| 317 |
+
'num_bins=%d is too large. The HISTOGRAM_MSE_BRUTEFORCE method tests'
|
| 318 |
+
' all histogram mid value pairs, so it may take a long time.',
|
| 319 |
+
self._num_bins,
|
| 320 |
+
)
|
| 321 |
+
# Tuple of (mse_error, quant_min, quant_max).
|
| 322 |
+
mse_min = (float('inf'), float('inf'), float('inf'))
|
| 323 |
+
|
| 324 |
+
# Calculate the error for all hist_mid pairs.
|
| 325 |
+
for left, right in itertools.combinations(range(self._num_bins), 2):
|
| 326 |
+
quant_min, quant_max = self._hist_mids[left], self._hist_mids[right]
|
| 327 |
+
mse_tuple = self._get_weighted_mean_squared_error(quant_min, quant_max)
|
| 328 |
+
mse_min = min(mse_tuple, mse_min)
|
| 329 |
+
min_value, max_value = mse_min[1], mse_min[2]
|
| 330 |
+
|
| 331 |
+
return min_value, max_value
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
@_implements(_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_MAX_FREQUENCY)
|
| 335 |
+
class _HistogramMseMaxFrequency(_HistogramCalibrationAlgorithmBase):
|
| 336 |
+
"""HistogramMseMaxFrequency for calculating min and max values of calibration result."""
|
| 337 |
+
|
| 338 |
+
def get_min_max_value(self) -> tuple[float, float]:
|
| 339 |
+
"""Finds min and max starting from the index of the max frequency.
|
| 340 |
+
|
| 341 |
+
The HistogramMseMaxFrequency method starts from the bin with the highest
|
| 342 |
+
frequency and expands the range to both sides. This performs well when data
|
| 343 |
+
is well spread on both sides of the max frequency.
|
| 344 |
+
|
| 345 |
+
Returns:
|
| 346 |
+
(min_value, max_value): Min and max calculated using method to expand the
|
| 347 |
+
range based on max frequency.
|
| 348 |
+
"""
|
| 349 |
+
# Find the index of max frequency.
|
| 350 |
+
freq_max_idx = np.argmax(self._hist_freq)
|
| 351 |
+
return self._get_min_max_value_by_expanding_range(freq_max_idx)
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
@_implements(_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_SYMMETRIC)
|
| 355 |
+
class _HistogramMseSymmetric(_HistogramCalibrationAlgorithmBase):
|
| 356 |
+
"""HistogramMseSymmetric for calculating min and max values of calibration result."""
|
| 357 |
+
|
| 358 |
+
def get_min_max_value(self) -> tuple[float, float]:
|
| 359 |
+
"""Finds min and max starting from the center index.
|
| 360 |
+
|
| 361 |
+
The HistogramMseSymmetric method starts from the center bin and expands the
|
| 362 |
+
range to both sides. This works better when the data is well-centered.
|
| 363 |
+
|
| 364 |
+
Returns:
|
| 365 |
+
(min_value, max_value): Min and max calculated using the method starting
|
| 366 |
+
from center and expanding.
|
| 367 |
+
"""
|
| 368 |
+
|
| 369 |
+
# This function is currently only called in this method, but will be used in
|
| 370 |
+
# other methods in the future.
|
| 371 |
+
return self._get_min_max_value_by_expanding_range(self._num_bins // 2)
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def get_min_max_value(
|
| 375 |
+
statistics: calib_stats_pb2.CalibrationStatistics,
|
| 376 |
+
calib_opts: stablehlo_quant_config_pb2.CalibrationOptions,
|
| 377 |
+
) -> tuple[float, float]:
|
| 378 |
+
"""Calculates min and max from statistics using calibration options.
|
| 379 |
+
|
| 380 |
+
Args:
|
| 381 |
+
statistics: Collected calibration statistics.
|
| 382 |
+
calib_opts: Calibration options used for calculating min and max.
|
| 383 |
+
|
| 384 |
+
Returns:
|
| 385 |
+
(min_value, max_value): Min and max calculated using calib_opts.
|
| 386 |
+
|
| 387 |
+
Raises:
|
| 388 |
+
ValueError: Unsupported calibration method is given.
|
| 389 |
+
"""
|
| 390 |
+
calib_method = calib_opts.calibration_method
|
| 391 |
+
if calib_method not in _REGISTRY:
|
| 392 |
+
raise ValueError(f'Unsupported calibration method: {calib_method}')
|
| 393 |
+
|
| 394 |
+
calibration_algorithm = _REGISTRY[calib_method](statistics, calib_opts)
|
| 395 |
+
return calibration_algorithm.get_min_max_value()
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_pb2.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
| 3 |
+
# source: tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics.proto
|
| 4 |
+
"""Generated protocol buffer code."""
|
| 5 |
+
from google.protobuf.internal import builder as _builder
|
| 6 |
+
from google.protobuf import descriptor as _descriptor
|
| 7 |
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
| 8 |
+
from google.protobuf import symbol_database as _symbol_database
|
| 9 |
+
# @@protoc_insertion_point(imports)
|
| 10 |
+
|
| 11 |
+
_sym_db = _symbol_database.Default()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\nXtensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics.proto\x12\x15tensorflow.calibrator\"\x9c\x04\n\x15\x43\x61librationStatistics\x12Y\n\x12min_max_statistics\x18\x01 \x01(\x0b\x32=.tensorflow.calibrator.CalibrationStatistics.MinMaxStatistics\x12h\n\x1a\x61verage_min_max_statistics\x18\x02 \x01(\x0b\x32\x44.tensorflow.calibrator.CalibrationStatistics.AverageMinMaxStatistics\x12^\n\x14histogram_statistics\x18\x03 \x01(\x0b\x32@.tensorflow.calibrator.CalibrationStatistics.HistogramStatistics\x1a:\n\x10MinMaxStatistics\x12\x12\n\nglobal_min\x18\x01 \x01(\x02\x12\x12\n\nglobal_max\x18\x02 \x01(\x02\x1aP\n\x17\x41verageMinMaxStatistics\x12\x0f\n\x07min_sum\x18\x01 \x01(\x02\x12\x0f\n\x07max_sum\x18\x02 \x01(\x02\x12\x13\n\x0bnum_samples\x18\x03 \x01(\x05\x1aP\n\x13HistogramStatistics\x12\x11\n\tbin_width\x18\x01 \x01(\x02\x12\x13\n\x0blower_bound\x18\x02 \x01(\x02\x12\x11\n\thist_freq\x18\x03 \x03(\x02\"\xd0\x01\n\x18\x43\x61librationStatisticsMap\x12S\n\nstatistics\x18\x01 \x03(\x0b\x32?.tensorflow.calibrator.CalibrationStatisticsMap.StatisticsEntry\x1a_\n\x0fStatisticsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.tensorflow.calibrator.CalibrationStatistics:\x02\x38\x01\x42\x03\xf8\x01\x01\x62\x06proto3')
|
| 17 |
+
|
| 18 |
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
| 19 |
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.compiler.mlir.quantization.tensorflow.calibrator.calibration_statistics_pb2', globals())
|
| 20 |
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
| 21 |
+
|
| 22 |
+
DESCRIPTOR._options = None
|
| 23 |
+
DESCRIPTOR._serialized_options = b'\370\001\001'
|
| 24 |
+
_CALIBRATIONSTATISTICSMAP_STATISTICSENTRY._options = None
|
| 25 |
+
_CALIBRATIONSTATISTICSMAP_STATISTICSENTRY._serialized_options = b'8\001'
|
| 26 |
+
_CALIBRATIONSTATISTICS._serialized_start=116
|
| 27 |
+
_CALIBRATIONSTATISTICS._serialized_end=656
|
| 28 |
+
_CALIBRATIONSTATISTICS_MINMAXSTATISTICS._serialized_start=434
|
| 29 |
+
_CALIBRATIONSTATISTICS_MINMAXSTATISTICS._serialized_end=492
|
| 30 |
+
_CALIBRATIONSTATISTICS_AVERAGEMINMAXSTATISTICS._serialized_start=494
|
| 31 |
+
_CALIBRATIONSTATISTICS_AVERAGEMINMAXSTATISTICS._serialized_end=574
|
| 32 |
+
_CALIBRATIONSTATISTICS_HISTOGRAMSTATISTICS._serialized_start=576
|
| 33 |
+
_CALIBRATIONSTATISTICS_HISTOGRAMSTATISTICS._serialized_end=656
|
| 34 |
+
_CALIBRATIONSTATISTICSMAP._serialized_start=659
|
| 35 |
+
_CALIBRATIONSTATISTICSMAP._serialized_end=867
|
| 36 |
+
_CALIBRATIONSTATISTICSMAP_STATISTICSENTRY._serialized_start=772
|
| 37 |
+
_CALIBRATIONSTATISTICSMAP_STATISTICSENTRY._serialized_end=867
|
| 38 |
+
# @@protoc_insertion_point(module_scope)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/exported_model_pb2.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
| 3 |
+
# source: tensorflow/compiler/mlir/quantization/tensorflow/exported_model.proto
|
| 4 |
+
"""Generated protocol buffer code."""
|
| 5 |
+
from google.protobuf.internal import builder as _builder
|
| 6 |
+
from google.protobuf import descriptor as _descriptor
|
| 7 |
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
| 8 |
+
from google.protobuf import symbol_database as _symbol_database
|
| 9 |
+
# @@protoc_insertion_point(imports)
|
| 10 |
+
|
| 11 |
+
_sym_db = _symbol_database.Default()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
from tensorflow.core.framework import graph_pb2 as tensorflow_dot_core_dot_framework_dot_graph__pb2
|
| 15 |
+
from tensorflow.core.protobuf import meta_graph_pb2 as tensorflow_dot_core_dot_protobuf_dot_meta__graph__pb2
|
| 16 |
+
from tensorflow.core.protobuf import saver_pb2 as tensorflow_dot_core_dot_protobuf_dot_saver__pb2
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\nEtensorflow/compiler/mlir/quantization/tensorflow/exported_model.proto\x12\x17tensorflow.quantization\x1a%tensorflow/core/framework/graph.proto\x1a)tensorflow/core/protobuf/meta_graph.proto\x1a$tensorflow/core/protobuf/saver.proto\"\xbe\x03\n\rExportedModel\x12\'\n\tgraph_def\x18\x01 \x01(\x0b\x32\x14.tensorflow.GraphDef\x12\x16\n\x0einit_node_name\x18\x02 \x01(\t\x12\x16\n\x0e\x63heckpoint_dir\x18\x05 \x01(\t\x12U\n\x10\x66unction_aliases\x18\x06 \x03(\x0b\x32;.tensorflow.quantization.ExportedModel.FunctionAliasesEntry\x12\x31\n\x0f\x61sset_file_defs\x18\x08 \x03(\x0b\x32\x18.tensorflow.AssetFileDef\x12\'\n\tsaver_def\x18\n \x01(\x0b\x32\x14.tensorflow.SaverDef\x1a\x36\n\x14\x46unctionAliasesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01J\x04\x08\x03\x10\x04J\x04\x08\x04\x10\x05J\x04\x08\x07\x10\x08J\x04\x08\t\x10\nR\x15variable_shared_namesR\x11restore_node_nameR\x0esave_node_nameR\x17\x66ile_prefix_tensor_nameB\x03\xf8\x01\x01\x62\x06proto3')
|
| 20 |
+
|
| 21 |
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
| 22 |
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.compiler.mlir.quantization.tensorflow.exported_model_pb2', globals())
|
| 23 |
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
| 24 |
+
|
| 25 |
+
DESCRIPTOR._options = None
|
| 26 |
+
DESCRIPTOR._serialized_options = b'\370\001\001'
|
| 27 |
+
_EXPORTEDMODEL_FUNCTIONALIASESENTRY._options = None
|
| 28 |
+
_EXPORTEDMODEL_FUNCTIONALIASESENTRY._serialized_options = b'8\001'
|
| 29 |
+
_EXPORTEDMODEL._serialized_start=219
|
| 30 |
+
_EXPORTEDMODEL._serialized_end=665
|
| 31 |
+
_EXPORTEDMODEL_FUNCTIONALIASESENTRY._serialized_start=504
|
| 32 |
+
_EXPORTEDMODEL_FUNCTIONALIASESENTRY._serialized_end=558
|
| 33 |
+
# @@protoc_insertion_point(module_scope)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_quantize_model.pyi
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
from typing import Any
|
| 16 |
+
|
| 17 |
+
from tensorflow.compiler.mlir.quantization.tensorflow.calibrator import calibration_statistics_pb2
|
| 18 |
+
from tensorflow.compiler.mlir.quantization.tensorflow.python import py_function_lib
|
| 19 |
+
from tensorflow.compiler.mlir.quantization.tensorflow.python import representative_dataset as rd
|
| 20 |
+
|
| 21 |
+
# LINT.IfChange(quantize_qat_model)
|
| 22 |
+
def quantize_qat_model(
|
| 23 |
+
src_saved_model_path: str,
|
| 24 |
+
dst_saved_model_path: str,
|
| 25 |
+
quantization_options_serialized: bytes,
|
| 26 |
+
*,
|
| 27 |
+
signature_keys: list[str],
|
| 28 |
+
signature_def_map_serialized: dict[str, bytes],
|
| 29 |
+
py_function_library: py_function_lib.PyFunctionLibrary,
|
| 30 |
+
) -> Any: ... # Status
|
| 31 |
+
|
| 32 |
+
# LINT.ThenChange()
|
| 33 |
+
|
| 34 |
+
# LINT.IfChange(quantize_ptq_dynamic_range)
|
| 35 |
+
def quantize_ptq_dynamic_range(
|
| 36 |
+
src_saved_model_path: str,
|
| 37 |
+
dst_saved_model_path: str,
|
| 38 |
+
quantization_options_serialized: bytes,
|
| 39 |
+
*,
|
| 40 |
+
signature_keys: list[str],
|
| 41 |
+
signature_def_map_serialized: dict[str, bytes],
|
| 42 |
+
py_function_library: py_function_lib.PyFunctionLibrary,
|
| 43 |
+
) -> Any: ... # Status
|
| 44 |
+
|
| 45 |
+
# LINT.ThenChange()
|
| 46 |
+
|
| 47 |
+
# LINT.IfChange(quantize_weight_only)
|
| 48 |
+
def quantize_weight_only(
|
| 49 |
+
src_saved_model_path: str,
|
| 50 |
+
dst_saved_model_path: str,
|
| 51 |
+
quantization_options_serialized: bytes,
|
| 52 |
+
*,
|
| 53 |
+
signature_def_map_serialized: dict[str, bytes],
|
| 54 |
+
py_function_library: py_function_lib.PyFunctionLibrary,
|
| 55 |
+
) -> Any: ... # Status
|
| 56 |
+
|
| 57 |
+
# LINT.ThenChange()
|
| 58 |
+
|
| 59 |
+
# LINT.IfChange(quantize_ptq_static_range)
|
| 60 |
+
def quantize_ptq_static_range(
|
| 61 |
+
src_saved_model_path: str,
|
| 62 |
+
dst_saved_model_path: str,
|
| 63 |
+
quantization_options_serialized: bytes,
|
| 64 |
+
*,
|
| 65 |
+
signature_keys: list[str],
|
| 66 |
+
signature_def_map_serialized: dict[str, bytes],
|
| 67 |
+
py_function_library: py_function_lib.PyFunctionLibrary,
|
| 68 |
+
# Value type: RepresentativeDatasetFile.
|
| 69 |
+
representative_dataset_file_map_serialized: dict[str, bytes],
|
| 70 |
+
) -> Any: ... # Status
|
| 71 |
+
|
| 72 |
+
# LINT.ThenChange()
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py
ADDED
|
@@ -0,0 +1,926 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Defines TF Quantization API from SavedModel to SavedModel."""
|
| 16 |
+
|
| 17 |
+
import tempfile
|
| 18 |
+
from typing import Mapping, Optional
|
| 19 |
+
|
| 20 |
+
from absl import logging
|
| 21 |
+
|
| 22 |
+
from tensorflow.compiler.mlir.quantization.stablehlo import quantization_config_pb2 as stablehlo_quant_config_pb2
|
| 23 |
+
from tensorflow.compiler.mlir.quantization.tensorflow import quantization_options_pb2 as quant_opts_pb2
|
| 24 |
+
from tensorflow.compiler.mlir.quantization.tensorflow.python import py_function_lib
|
| 25 |
+
from tensorflow.compiler.mlir.quantization.tensorflow.python import pywrap_quantize_model
|
| 26 |
+
from tensorflow.compiler.mlir.quantization.tensorflow.python import representative_dataset as repr_dataset
|
| 27 |
+
from tensorflow.compiler.mlir.quantization.tensorflow.python import save_model
|
| 28 |
+
from tensorflow.core.protobuf import meta_graph_pb2
|
| 29 |
+
from tensorflow.python.lib.io import file_io
|
| 30 |
+
from tensorflow.python.saved_model import load as saved_model_load
|
| 31 |
+
from tensorflow.python.saved_model import loader_impl as saved_model_loader
|
| 32 |
+
from tensorflow.python.saved_model import signature_constants
|
| 33 |
+
from tensorflow.python.saved_model import tag_constants
|
| 34 |
+
from tensorflow.python.trackable import autotrackable
|
| 35 |
+
from tensorflow.python.util import tf_export
|
| 36 |
+
|
| 37 |
+
# Type aliases for quant_opts_pb2 messages.
|
| 38 |
+
_QuantizationOptions = tf_export.tf_export(
|
| 39 |
+
'quantization.experimental.QuantizationOptions'
|
| 40 |
+
)(quant_opts_pb2.QuantizationOptions)
|
| 41 |
+
|
| 42 |
+
_QuantizationMethod = tf_export.tf_export(
|
| 43 |
+
'quantization.experimental.QuantizationMethod'
|
| 44 |
+
)(quant_opts_pb2.QuantizationMethod)
|
| 45 |
+
|
| 46 |
+
_QuantizationComponentSpec = tf_export.tf_export(
|
| 47 |
+
'quantization.experimental.QuantizationComponentSpec'
|
| 48 |
+
)(quant_opts_pb2.QuantizationComponentSpec)
|
| 49 |
+
|
| 50 |
+
_UnitWiseQuantizationSpec = tf_export.tf_export(
|
| 51 |
+
'quantization.experimental.UnitWiseQuantizationSpec'
|
| 52 |
+
)(quant_opts_pb2.UnitWiseQuantizationSpec)
|
| 53 |
+
|
| 54 |
+
_PresetMethod = _QuantizationMethod.PresetMethod
|
| 55 |
+
_CalibrationMethod = (
|
| 56 |
+
stablehlo_quant_config_pb2.CalibrationOptions.CalibrationMethod
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
_QuantizationComponent = _QuantizationComponentSpec.QuantizationComponent
|
| 60 |
+
_TensorType = _QuantizationComponentSpec.TensorType
|
| 61 |
+
|
| 62 |
+
_RepresentativeDatasetFile = quant_opts_pb2.RepresentativeDatasetFile
|
| 63 |
+
|
| 64 |
+
# Mapping of signature def key -> SignatureDef.
|
| 65 |
+
_SignatureDefMap = Mapping[str, meta_graph_pb2.SignatureDef]
|
| 66 |
+
|
| 67 |
+
# Default minimum number of elements in the weights for them to be quantized
|
| 68 |
+
# during dynamic range quantization (DRQ) and weight-only quantization.
|
| 69 |
+
_DYNAMIC_RANGE_DEFAULT_MIN_NUM_ELEMENTS_FOR_WEIGHTS = 1024
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def _is_qat_saved_model(saved_model_path: str):
|
| 73 |
+
"""Checks if the SavedModel is QAT-enabled by looking for 'FakeQuant' ops."""
|
| 74 |
+
saved_model_proto = saved_model_loader.parse_saved_model(saved_model_path)
|
| 75 |
+
for meta_graph in saved_model_proto.meta_graphs:
|
| 76 |
+
if any(
|
| 77 |
+
node.op.startswith('FakeQuant') for node in meta_graph.graph_def.node
|
| 78 |
+
):
|
| 79 |
+
return True
|
| 80 |
+
for function in meta_graph.graph_def.library.function:
|
| 81 |
+
if any(node.op.startswith('FakeQuant') for node in function.node_def):
|
| 82 |
+
return True
|
| 83 |
+
return False
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def _serialize_signature_def_map(
|
| 87 |
+
signature_def_map: _SignatureDefMap,
|
| 88 |
+
) -> dict[str, bytes]:
|
| 89 |
+
"""Serializes SignatureDef values in `signature_def_map`.
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
signature_def_map: Signature key -> SignatureDef mapping.
|
| 93 |
+
|
| 94 |
+
Returns:
|
| 95 |
+
Signature def map where the values (`SignatureDef`) are serialized.
|
| 96 |
+
"""
|
| 97 |
+
signature_def_map_serialized = {}
|
| 98 |
+
for key, signature_def in signature_def_map.items():
|
| 99 |
+
signature_def_map_serialized[key] = signature_def.SerializeToString()
|
| 100 |
+
|
| 101 |
+
return signature_def_map_serialized
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def _save_representative_dataset(
|
| 105 |
+
representative_dataset: repr_dataset.RepresentativeDatasetOrMapping,
|
| 106 |
+
signature_def_map: _SignatureDefMap,
|
| 107 |
+
) -> Mapping[str, _RepresentativeDatasetFile]:
|
| 108 |
+
"""Saves the representative dataset to temporary TFRecord files.
|
| 109 |
+
|
| 110 |
+
Args:
|
| 111 |
+
representative_dataset: Representative dataset used for the calibration
|
| 112 |
+
step. Representative datasets should exist for each signature def key in
|
| 113 |
+
`signature_def_keys`.
|
| 114 |
+
signature_def_map: Signature def key -> SignatureDef mapping.
|
| 115 |
+
|
| 116 |
+
Returns:
|
| 117 |
+
A map from signature key to the saved representative dataset file.
|
| 118 |
+
"""
|
| 119 |
+
if isinstance(representative_dataset, Mapping):
|
| 120 |
+
if set(signature_def_map.keys()) != set(representative_dataset.keys()):
|
| 121 |
+
raise ValueError(
|
| 122 |
+
'The signature keys and the keys of representative dataset map '
|
| 123 |
+
f'do not match. Signature keys: {set(signature_def_map.keys())}, '
|
| 124 |
+
f'representative dataset map: {set(representative_dataset.keys())}.'
|
| 125 |
+
)
|
| 126 |
+
representative_dataset_map = representative_dataset
|
| 127 |
+
elif len(signature_def_map.keys()) > 1:
|
| 128 |
+
raise ValueError(
|
| 129 |
+
'Representative dataset is not a mapping (got: '
|
| 130 |
+
f'{type(representative_dataset)}), but there is more than one '
|
| 131 |
+
'signature key provided. Please provide a map of '
|
| 132 |
+
'{signature_key -> dataset} with more than one signature key.'
|
| 133 |
+
)
|
| 134 |
+
else:
|
| 135 |
+
representative_dataset_map = {
|
| 136 |
+
list(signature_def_map.keys())[0]: representative_dataset,
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
# Save the representative dataset to temporary TFRecord files.
|
| 140 |
+
path_map = {}
|
| 141 |
+
expected_input_key_map = {}
|
| 142 |
+
for signature_key, signature_def in signature_def_map.items():
|
| 143 |
+
# Filepath is the second return value of mkstemp.
|
| 144 |
+
_, path_map[signature_key] = tempfile.mkstemp(
|
| 145 |
+
suffix='.tfrecord', prefix=signature_key
|
| 146 |
+
)
|
| 147 |
+
expected_input_key_map[signature_key] = signature_def.inputs.keys()
|
| 148 |
+
|
| 149 |
+
return repr_dataset.TfRecordRepresentativeDatasetSaver(
|
| 150 |
+
path_map=path_map,
|
| 151 |
+
expected_input_key_map=expected_input_key_map,
|
| 152 |
+
).save(representative_dataset_map)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def _run_static_range_qat(
|
| 156 |
+
src_saved_model_path: str,
|
| 157 |
+
dst_saved_model_path: str,
|
| 158 |
+
quant_opts: _QuantizationOptions,
|
| 159 |
+
signature_def_map: _SignatureDefMap,
|
| 160 |
+
) -> None:
|
| 161 |
+
"""Runs static-range quantization for a Quantization-Aware Trained model.
|
| 162 |
+
|
| 163 |
+
Runs the quantization for a model trained using QAT.
|
| 164 |
+
|
| 165 |
+
Args:
|
| 166 |
+
src_saved_model_path: Path to the source SavedModel directory.
|
| 167 |
+
dst_saved_model_path: Path to the destination SavedModel directory.
|
| 168 |
+
quant_opts: Quantization options.
|
| 169 |
+
signature_def_map: Signature def key -> SignatureDef mapping.
|
| 170 |
+
"""
|
| 171 |
+
logging.info('Running static-range quantization for QAT model.')
|
| 172 |
+
|
| 173 |
+
pywrap_quantize_model.quantize_qat_model(
|
| 174 |
+
src_saved_model_path,
|
| 175 |
+
dst_saved_model_path,
|
| 176 |
+
quantization_options_serialized=quant_opts.SerializeToString(),
|
| 177 |
+
signature_keys=list(quant_opts.signature_keys),
|
| 178 |
+
signature_def_map_serialized=_serialize_signature_def_map(
|
| 179 |
+
signature_def_map
|
| 180 |
+
),
|
| 181 |
+
py_function_library=py_function_lib.PyFunctionLibrary(),
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def _run_static_range_ptq(
|
| 186 |
+
src_saved_model_path: str,
|
| 187 |
+
dst_saved_model_path: str,
|
| 188 |
+
quant_opts: _QuantizationOptions,
|
| 189 |
+
representative_dataset: Mapping[str, _RepresentativeDatasetFile],
|
| 190 |
+
signature_def_map: _SignatureDefMap,
|
| 191 |
+
) -> None:
|
| 192 |
+
"""Runs static-range Post-Training Quantization.
|
| 193 |
+
|
| 194 |
+
Runs static-range PTQ for the model. Runs the calibration step with
|
| 195 |
+
`representative_dataset` to collect statistics required for quantization. This
|
| 196 |
+
produces the quantized GraphDef along with the SignatureDefs which might have
|
| 197 |
+
been modified according to the changes in the graph.
|
| 198 |
+
|
| 199 |
+
Args:
|
| 200 |
+
src_saved_model_path: Path to the source SavedModel directory.
|
| 201 |
+
dst_saved_model_path: Path to the destination SavedModel directory.
|
| 202 |
+
quant_opts: Quantization options.
|
| 203 |
+
representative_dataset: A map from signature key to the saved representative
|
| 204 |
+
dataset file.
|
| 205 |
+
signature_def_map: Signature def key -> SignatureDef mapping.
|
| 206 |
+
|
| 207 |
+
Raises:
|
| 208 |
+
ValueError if the graph doesn't contain a valid signature.
|
| 209 |
+
"""
|
| 210 |
+
logging.info('Running static-range post-training quantization.')
|
| 211 |
+
|
| 212 |
+
signature_def_map_serialized = _serialize_signature_def_map(signature_def_map)
|
| 213 |
+
|
| 214 |
+
# `quantize_ptq_static_range` requires `RepresentativeDatasetFile`s to be
|
| 215 |
+
# serialized. Serialize the values to match the type.
|
| 216 |
+
dataset_file_map_serialized = {
|
| 217 |
+
signature_key: dataset_file.SerializeToString()
|
| 218 |
+
for signature_key, dataset_file in representative_dataset.items()
|
| 219 |
+
}
|
| 220 |
+
pywrap_quantize_model.quantize_ptq_static_range(
|
| 221 |
+
src_saved_model_path,
|
| 222 |
+
dst_saved_model_path,
|
| 223 |
+
quantization_options_serialized=quant_opts.SerializeToString(),
|
| 224 |
+
signature_keys=list(quant_opts.signature_keys),
|
| 225 |
+
signature_def_map_serialized=signature_def_map_serialized,
|
| 226 |
+
py_function_library=py_function_lib.PyFunctionLibrary(),
|
| 227 |
+
representative_dataset_file_map_serialized=dataset_file_map_serialized,
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def _static_range_quantize(
|
| 232 |
+
src_saved_model_path: str,
|
| 233 |
+
dst_saved_model_path: str,
|
| 234 |
+
quantization_options: _QuantizationOptions,
|
| 235 |
+
representative_dataset: Optional[
|
| 236 |
+
repr_dataset.RepresentativeDatasetOrMapping
|
| 237 |
+
] = None,
|
| 238 |
+
) -> autotrackable.AutoTrackable:
|
| 239 |
+
"""Quantizes the given SavedModel via static range quantization.
|
| 240 |
+
|
| 241 |
+
If the model is not trained with Quantization-Aware Training (QAT) technique,
|
| 242 |
+
it requires `representative_dataset` to collect statistics required for
|
| 243 |
+
quantization. If non-None `representative_dataset` is provided with a QAT
|
| 244 |
+
model input, `representative_dataset` will be ignored.
|
| 245 |
+
|
| 246 |
+
Args:
|
| 247 |
+
src_saved_model_path: Path to the saved model. When representative_dataset
|
| 248 |
+
is not provided, this should be a model trained with QAT.
|
| 249 |
+
dst_saved_model_path: The path to save the output SavedModel. The directory
|
| 250 |
+
will be overwritten if not empty.
|
| 251 |
+
quantization_options: QuantizationOptions proto describing quantization
|
| 252 |
+
related config.
|
| 253 |
+
representative_dataset: a generator that returns a dictionary in {input_key:
|
| 254 |
+
input_value} format or a tuple with signature key and a dictionary in
|
| 255 |
+
{input_key: input_value} format that feeds calibration data for quantizing
|
| 256 |
+
model. This should be provided when the model is not a QAT model.
|
| 257 |
+
|
| 258 |
+
Returns:
|
| 259 |
+
A SavedModel object with TF quantization applied.
|
| 260 |
+
|
| 261 |
+
Raises:
|
| 262 |
+
ValueError: when representative_dataset is not provided for non-QAT model.
|
| 263 |
+
RuntimeError: When a MetaGraphDef could not be found associated with `tags`
|
| 264 |
+
in the SavedModel.
|
| 265 |
+
"""
|
| 266 |
+
logging.info(
|
| 267 |
+
'Running static range quantization on model: %s', src_saved_model_path
|
| 268 |
+
)
|
| 269 |
+
logging.info('QuantizationOptions: \n%s', quantization_options)
|
| 270 |
+
|
| 271 |
+
is_qat_saved_model_or_method_no_quantize = _is_qat_saved_model(
|
| 272 |
+
src_saved_model_path
|
| 273 |
+
) or (
|
| 274 |
+
quantization_options.quantization_method.preset_method
|
| 275 |
+
== _QuantizationMethod.METHOD_NO_QUANTIZE
|
| 276 |
+
)
|
| 277 |
+
signature_def_map = save_model.get_signatures_from_saved_model(
|
| 278 |
+
src_saved_model_path,
|
| 279 |
+
quantization_options.signature_keys,
|
| 280 |
+
set(quantization_options.tags),
|
| 281 |
+
)
|
| 282 |
+
|
| 283 |
+
if (
|
| 284 |
+
representative_dataset is not None
|
| 285 |
+
and quantization_options.representative_datasets
|
| 286 |
+
):
|
| 287 |
+
raise ValueError(
|
| 288 |
+
'Do not specify both the `representative_dataset` argument and'
|
| 289 |
+
' the `representative_datasets` field in `QuantizationOptions`.'
|
| 290 |
+
)
|
| 291 |
+
|
| 292 |
+
saved_representative_dataset = quantization_options.representative_datasets
|
| 293 |
+
if representative_dataset is not None:
|
| 294 |
+
saved_representative_dataset = _save_representative_dataset(
|
| 295 |
+
representative_dataset, signature_def_map
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
# Checks if the model is from QAT or method is METHOD_NO_QUANTIZE.
|
| 299 |
+
if (
|
| 300 |
+
not saved_representative_dataset
|
| 301 |
+
and not is_qat_saved_model_or_method_no_quantize
|
| 302 |
+
):
|
| 303 |
+
raise ValueError(
|
| 304 |
+
'When `representative_dataset` is not provided, the model should be '
|
| 305 |
+
'trained with quantization-aware training (QAT).'
|
| 306 |
+
)
|
| 307 |
+
if quantization_options.min_num_elements_for_weights > 0:
|
| 308 |
+
logging.warn(
|
| 309 |
+
'min_num_elements_for_weights is set but is not supported for the '
|
| 310 |
+
'Post-training static range quantization. '
|
| 311 |
+
'The flag is ignored.'
|
| 312 |
+
)
|
| 313 |
+
|
| 314 |
+
if is_qat_saved_model_or_method_no_quantize:
|
| 315 |
+
_run_static_range_qat(
|
| 316 |
+
src_saved_model_path,
|
| 317 |
+
dst_saved_model_path,
|
| 318 |
+
quantization_options,
|
| 319 |
+
signature_def_map,
|
| 320 |
+
)
|
| 321 |
+
else:
|
| 322 |
+
_run_static_range_ptq(
|
| 323 |
+
src_saved_model_path,
|
| 324 |
+
dst_saved_model_path,
|
| 325 |
+
quantization_options,
|
| 326 |
+
saved_representative_dataset,
|
| 327 |
+
signature_def_map,
|
| 328 |
+
)
|
| 329 |
+
|
| 330 |
+
return saved_model_load.load(dst_saved_model_path)
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
def _dynamic_range_quantize(
|
| 334 |
+
src_saved_model_path: str,
|
| 335 |
+
dst_saved_model_path: str,
|
| 336 |
+
quantization_options: _QuantizationOptions,
|
| 337 |
+
) -> autotrackable.AutoTrackable:
|
| 338 |
+
"""Quantizes the given SavedModel via post-training dynamic range quantization.
|
| 339 |
+
|
| 340 |
+
Args:
|
| 341 |
+
src_saved_model_path: Path to the saved model.
|
| 342 |
+
dst_saved_model_path: The path to save the output SavedModel. The directory
|
| 343 |
+
will be overwritten if not empty.
|
| 344 |
+
quantization_options: QuantizationOptions proto describing quantization
|
| 345 |
+
related config.
|
| 346 |
+
|
| 347 |
+
Returns:
|
| 348 |
+
A SavedModel object with TF quantization applied.
|
| 349 |
+
|
| 350 |
+
Raises:
|
| 351 |
+
ValueError: when the model is QAT model.
|
| 352 |
+
"""
|
| 353 |
+
mode_str = 'dynamic-range quantization'
|
| 354 |
+
if _is_qat_saved_model(src_saved_model_path):
|
| 355 |
+
raise ValueError(
|
| 356 |
+
'The models trained with quantization-aware training (QAT) is not '
|
| 357 |
+
'supported for %s.' % mode_str
|
| 358 |
+
)
|
| 359 |
+
|
| 360 |
+
logging.info(
|
| 361 |
+
'Running post-training %s on model: %s', mode_str, src_saved_model_path
|
| 362 |
+
)
|
| 363 |
+
logging.info('QuantizationOptions: \n%s', quantization_options)
|
| 364 |
+
|
| 365 |
+
signature_def_map = save_model.get_signatures_from_saved_model(
|
| 366 |
+
src_saved_model_path,
|
| 367 |
+
quantization_options.signature_keys,
|
| 368 |
+
quantization_options.tags,
|
| 369 |
+
)
|
| 370 |
+
|
| 371 |
+
# Apply post-training dynamic range quantization to the model.
|
| 372 |
+
pywrap_quantize_model.quantize_ptq_dynamic_range(
|
| 373 |
+
src_saved_model_path,
|
| 374 |
+
dst_saved_model_path,
|
| 375 |
+
quantization_options_serialized=quantization_options.SerializeToString(),
|
| 376 |
+
signature_keys=list(quantization_options.signature_keys),
|
| 377 |
+
signature_def_map_serialized=_serialize_signature_def_map(
|
| 378 |
+
signature_def_map
|
| 379 |
+
),
|
| 380 |
+
py_function_library=py_function_lib.PyFunctionLibrary(),
|
| 381 |
+
)
|
| 382 |
+
|
| 383 |
+
return saved_model_load.load(dst_saved_model_path)
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
def _weight_only_quantize(
|
| 387 |
+
src_saved_model_path: str,
|
| 388 |
+
dst_saved_model_path: str,
|
| 389 |
+
quantization_options: quant_opts_pb2.QuantizationOptions,
|
| 390 |
+
) -> autotrackable.AutoTrackable:
|
| 391 |
+
"""Quantizes the given SavedModel via weight-only quantization.
|
| 392 |
+
|
| 393 |
+
Args:
|
| 394 |
+
src_saved_model_path: Path to the saved model.
|
| 395 |
+
dst_saved_model_path: The path to save the output SavedModel. The directory
|
| 396 |
+
will be overwritten if not empty.
|
| 397 |
+
quantization_options: QuantizationOptions proto describing quantization
|
| 398 |
+
related config.
|
| 399 |
+
|
| 400 |
+
Returns:
|
| 401 |
+
A SavedModel object with TF quantization applied.
|
| 402 |
+
|
| 403 |
+
Raises:
|
| 404 |
+
ValueError: when the model is QAT model.
|
| 405 |
+
"""
|
| 406 |
+
mode_str = 'weight-only quantization'
|
| 407 |
+
|
| 408 |
+
# QAT weight-only is not supported yet.
|
| 409 |
+
if _is_qat_saved_model(src_saved_model_path):
|
| 410 |
+
raise ValueError(
|
| 411 |
+
'The models trained with quantization-aware training (QAT) is not '
|
| 412 |
+
'supported for %s.' % mode_str
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
logging.info(
|
| 416 |
+
'Running post-training %s on model: %s', mode_str, src_saved_model_path
|
| 417 |
+
)
|
| 418 |
+
logging.info('QuantizationOptions: \n%s', quantization_options)
|
| 419 |
+
|
| 420 |
+
signature_def_map = save_model.get_signatures_from_saved_model(
|
| 421 |
+
src_saved_model_path,
|
| 422 |
+
list(quantization_options.signature_keys),
|
| 423 |
+
set(quantization_options.tags),
|
| 424 |
+
)
|
| 425 |
+
|
| 426 |
+
pywrap_quantize_model.quantize_weight_only(
|
| 427 |
+
src_saved_model_path,
|
| 428 |
+
dst_saved_model_path,
|
| 429 |
+
quantization_options_serialized=quantization_options.SerializeToString(),
|
| 430 |
+
signature_def_map_serialized=_serialize_signature_def_map(
|
| 431 |
+
signature_def_map
|
| 432 |
+
),
|
| 433 |
+
py_function_library=py_function_lib.PyFunctionLibrary(),
|
| 434 |
+
)
|
| 435 |
+
|
| 436 |
+
return saved_model_load.load(dst_saved_model_path)
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
def _verify_output_dir(output_dir: Optional[str], overwrite: bool) -> None:
|
| 440 |
+
"""Verifies the output directory.
|
| 441 |
+
|
| 442 |
+
Raises an error if `output_dir` is not suitable for writing the output saved
|
| 443 |
+
model.
|
| 444 |
+
|
| 445 |
+
Args:
|
| 446 |
+
output_dir: Output directory.
|
| 447 |
+
overwrite: An option allowing to overwrite the existing output directory if
|
| 448 |
+
set to true. Does not actually create or modify the `output_dir` in this
|
| 449 |
+
function.
|
| 450 |
+
|
| 451 |
+
Raises:
|
| 452 |
+
FileExistsError: Iff `output_dir` is not empty and `overwrite` is false.
|
| 453 |
+
"""
|
| 454 |
+
dir_not_empty = (
|
| 455 |
+
output_dir is not None
|
| 456 |
+
and file_io.file_exists_v2(output_dir)
|
| 457 |
+
and file_io.list_directory_v2(output_dir)
|
| 458 |
+
)
|
| 459 |
+
|
| 460 |
+
if dir_not_empty and not overwrite:
|
| 461 |
+
raise FileExistsError(
|
| 462 |
+
f'Output directory already exists: {output_dir} . '
|
| 463 |
+
'Please set overwrite_output_directory to true to '
|
| 464 |
+
'overwrite the existing directory.'
|
| 465 |
+
)
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
def _populate_quantization_component_spec(
|
| 469 |
+
quant_method: _QuantizationMethod,
|
| 470 |
+
) -> None:
|
| 471 |
+
"""Populates default values for QuantizationComponentSpec.
|
| 472 |
+
|
| 473 |
+
Args:
|
| 474 |
+
quant_method: The quantization method to be updated.
|
| 475 |
+
"""
|
| 476 |
+
# Make sure creating one spec per component.
|
| 477 |
+
updated_component_spec = dict()
|
| 478 |
+
|
| 479 |
+
# Populate default configuration.
|
| 480 |
+
if (
|
| 481 |
+
quant_method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_INT8
|
| 482 |
+
or quant_method.preset_method == _PresetMethod.METHOD_DYNAMIC_RANGE_INT8
|
| 483 |
+
):
|
| 484 |
+
updated_component_spec[_QuantizationComponent.COMPONENT_ACTIVATION] = (
|
| 485 |
+
_QuantizationComponentSpec(
|
| 486 |
+
quantization_component=_QuantizationComponent.COMPONENT_ACTIVATION,
|
| 487 |
+
tensor_type=_TensorType.TENSORTYPE_INT_8,
|
| 488 |
+
)
|
| 489 |
+
)
|
| 490 |
+
updated_component_spec[_QuantizationComponent.COMPONENT_WEIGHT] = (
|
| 491 |
+
_QuantizationComponentSpec(
|
| 492 |
+
quantization_component=_QuantizationComponent.COMPONENT_WEIGHT,
|
| 493 |
+
tensor_type=_TensorType.TENSORTYPE_INT_8,
|
| 494 |
+
)
|
| 495 |
+
)
|
| 496 |
+
updated_component_spec[_QuantizationComponent.COMPONENT_BIAS] = (
|
| 497 |
+
_QuantizationComponentSpec(
|
| 498 |
+
quantization_component=_QuantizationComponent.COMPONENT_BIAS,
|
| 499 |
+
tensor_type=_TensorType.TENSORTYPE_INT_32,
|
| 500 |
+
)
|
| 501 |
+
)
|
| 502 |
+
elif (
|
| 503 |
+
quant_method.preset_method
|
| 504 |
+
== _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8
|
| 505 |
+
):
|
| 506 |
+
updated_component_spec[_QuantizationComponent.COMPONENT_WEIGHT] = (
|
| 507 |
+
_QuantizationComponentSpec(
|
| 508 |
+
quantization_component=_QuantizationComponent.COMPONENT_WEIGHT,
|
| 509 |
+
tensor_type=_TensorType.TENSORTYPE_INT_8,
|
| 510 |
+
)
|
| 511 |
+
)
|
| 512 |
+
|
| 513 |
+
# Override if quantization_component_spec is specified.
|
| 514 |
+
if quant_method.quantization_component_specs:
|
| 515 |
+
# Check if the component spec is supported configuration in TF-Quant.
|
| 516 |
+
for component_spec in quant_method.quantization_component_specs:
|
| 517 |
+
if component_spec.quantization_component in [
|
| 518 |
+
_QuantizationComponent.COMPONENT_WEIGHT,
|
| 519 |
+
_QuantizationComponent.COMPONENT_ACTIVATION,
|
| 520 |
+
]:
|
| 521 |
+
if component_spec.tensor_type != _TensorType.TENSORTYPE_INT_8:
|
| 522 |
+
raise ValueError(
|
| 523 |
+
'Only int8 precision is supported for input operands.'
|
| 524 |
+
)
|
| 525 |
+
else:
|
| 526 |
+
if component_spec.tensor_type != _TensorType.TENSORTYPE_INT_32:
|
| 527 |
+
raise ValueError('Only int32 precision is supported for bias.')
|
| 528 |
+
# Update with the custom spec.
|
| 529 |
+
updated_component_spec[component_spec.quantization_component] = (
|
| 530 |
+
component_spec
|
| 531 |
+
)
|
| 532 |
+
|
| 533 |
+
# Update the componet spec
|
| 534 |
+
del quant_method.quantization_component_specs[:]
|
| 535 |
+
quant_method.quantization_component_specs.extend(
|
| 536 |
+
updated_component_spec.values()
|
| 537 |
+
)
|
| 538 |
+
|
| 539 |
+
if (
|
| 540 |
+
quant_method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_INT8
|
| 541 |
+
or quant_method.preset_method == _PresetMethod.METHOD_DYNAMIC_RANGE_INT8
|
| 542 |
+
) and (len(quant_method.quantization_component_specs) != 3):
|
| 543 |
+
raise ValueError('Only 3 components are needed for', quant_method)
|
| 544 |
+
elif (
|
| 545 |
+
quant_method.preset_method
|
| 546 |
+
== _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8
|
| 547 |
+
) and len(quant_method.quantization_component_specs) != 1:
|
| 548 |
+
raise ValueError('At least one component spec needs to be specified.')
|
| 549 |
+
|
| 550 |
+
|
| 551 |
+
def _populate_unitwise_quantization_specs(
|
| 552 |
+
quantization_options: _QuantizationOptions,
|
| 553 |
+
) -> None:
|
| 554 |
+
"""Verifies and pupulates unitwise quantization specs."""
|
| 555 |
+
if not quantization_options.unit_wise_quantization_specs:
|
| 556 |
+
return
|
| 557 |
+
|
| 558 |
+
sorted_top_level_component_specs = sorted(
|
| 559 |
+
quantization_options.quantization_method.quantization_component_specs,
|
| 560 |
+
key=lambda x: x.quantization_component,
|
| 561 |
+
)
|
| 562 |
+
|
| 563 |
+
for unitwise_spec in quantization_options.unit_wise_quantization_specs:
|
| 564 |
+
if not unitwise_spec.unit:
|
| 565 |
+
raise ValueError(
|
| 566 |
+
'UnitWiseQuantizationSpec must contain at least one unit.'
|
| 567 |
+
)
|
| 568 |
+
|
| 569 |
+
for unit in unitwise_spec.unit:
|
| 570 |
+
if not unit.op_type and not unit.node_name:
|
| 571 |
+
raise ValueError('Either `op_type` or `node_name` must be specified.')
|
| 572 |
+
|
| 573 |
+
_populate_quantization_component_spec(unitwise_spec.quantization_method)
|
| 574 |
+
|
| 575 |
+
component_specs = (
|
| 576 |
+
unitwise_spec.quantization_method.quantization_component_specs
|
| 577 |
+
)
|
| 578 |
+
if component_specs and (
|
| 579 |
+
sorted_top_level_component_specs
|
| 580 |
+
!= sorted(component_specs, key=lambda x: x.quantization_component)
|
| 581 |
+
):
|
| 582 |
+
raise ValueError(
|
| 583 |
+
'Currently unit-wise quantization spec only supports NO_QUANTIZE and'
|
| 584 |
+
' same quantization method as the top-level `quantization_method`'
|
| 585 |
+
)
|
| 586 |
+
|
| 587 |
+
|
| 588 |
+
def _populate_calibration_options(
|
| 589 |
+
quantization_options: quant_opts_pb2.QuantizationOptions,
|
| 590 |
+
):
|
| 591 |
+
"""Populates default values for CalibrationOptions.
|
| 592 |
+
|
| 593 |
+
Args:
|
| 594 |
+
quantization_options: An instance of QuantizationOptions with a field
|
| 595 |
+
specifying CalibrationOptions
|
| 596 |
+
"""
|
| 597 |
+
|
| 598 |
+
calib_opts = quantization_options.calibration_options
|
| 599 |
+
if (
|
| 600 |
+
calib_opts.calibration_method
|
| 601 |
+
== _CalibrationMethod.CALIBRATION_METHOD_UNSPECIFIED
|
| 602 |
+
):
|
| 603 |
+
calib_opts.calibration_method = (
|
| 604 |
+
_CalibrationMethod.CALIBRATION_METHOD_MIN_MAX
|
| 605 |
+
)
|
| 606 |
+
elif (
|
| 607 |
+
calib_opts.calibration_method
|
| 608 |
+
== _CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_PERCENTILE
|
| 609 |
+
):
|
| 610 |
+
if not calib_opts.calibration_parameters.num_bins:
|
| 611 |
+
calib_opts.calibration_parameters.num_bins = 512
|
| 612 |
+
if not calib_opts.calibration_parameters.min_percentile:
|
| 613 |
+
calib_opts.calibration_parameters.min_percentile = 0.001
|
| 614 |
+
if not calib_opts.calibration_parameters.max_percentile:
|
| 615 |
+
calib_opts.calibration_parameters.max_percentile = 99.999
|
| 616 |
+
# Check the activation_tensor_type of HISTOGRAM_MSE methods.
|
| 617 |
+
elif calib_opts.calibration_method in [
|
| 618 |
+
_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE,
|
| 619 |
+
_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_MAX_FREQUENCY,
|
| 620 |
+
_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_SYMMETRIC,
|
| 621 |
+
]:
|
| 622 |
+
activation_tensor_type = (
|
| 623 |
+
quantization_options.quantization_method.quantization_component_specs[
|
| 624 |
+
_QuantizationComponent.COMPONENT_ACTIVATION
|
| 625 |
+
].tensor_type
|
| 626 |
+
)
|
| 627 |
+
# Unlike the HISTOGRAM_PERCENTILE method, the HISTOGRAM_MSE method uses
|
| 628 |
+
# num_bits because it actually quantizes and dequantizes values.
|
| 629 |
+
if activation_tensor_type != _TensorType.TENSORTYPE_INT_8:
|
| 630 |
+
raise ValueError(
|
| 631 |
+
'Only TENSORTYPE_INT_8 is supported for HISTOGRAM_MSE calibration'
|
| 632 |
+
f' methods. calibration_method={calib_opts.calibration_method}'
|
| 633 |
+
)
|
| 634 |
+
|
| 635 |
+
if not calib_opts.calibration_parameters.num_bins:
|
| 636 |
+
calib_opts.calibration_parameters.num_bins = 512
|
| 637 |
+
|
| 638 |
+
if calib_opts.calibration_data_dir:
|
| 639 |
+
save_model.create_empty_output_dir(
|
| 640 |
+
calib_opts.calibration_data_dir,
|
| 641 |
+
overwrite=calib_opts.force_regenerate_calibration_data,
|
| 642 |
+
)
|
| 643 |
+
|
| 644 |
+
|
| 645 |
+
def _populate_quantization_options_default_values(
|
| 646 |
+
quantization_options: _QuantizationOptions,
|
| 647 |
+
) -> None:
|
| 648 |
+
"""Populates default values for QuantizationOptions.
|
| 649 |
+
|
| 650 |
+
Populates unspecified or unset fields of QuantizationOptions with the default
|
| 651 |
+
values.
|
| 652 |
+
|
| 653 |
+
* If `op_set` is unspecified, it defaults to `OpSet.XLA`.
|
| 654 |
+
* If `freeze_all_variables` is not set, it defaults to `True`.
|
| 655 |
+
* Check if configurations are set correctly:
|
| 656 |
+
- Per-channel quantization is supported for Uniform Quantized opset only.
|
| 657 |
+
|
| 658 |
+
Args:
|
| 659 |
+
quantization_options: An instance of QuantizationOptions.
|
| 660 |
+
"""
|
| 661 |
+
if quantization_options.op_set == quant_opts_pb2.OpSet.OP_SET_UNSPECIFIED:
|
| 662 |
+
quantization_options.op_set = quant_opts_pb2.OpSet.XLA
|
| 663 |
+
|
| 664 |
+
if not quantization_options.tags:
|
| 665 |
+
quantization_options.tags.append(tag_constants.SERVING)
|
| 666 |
+
|
| 667 |
+
if not quantization_options.signature_keys:
|
| 668 |
+
quantization_options.signature_keys.append(
|
| 669 |
+
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
|
| 670 |
+
)
|
| 671 |
+
|
| 672 |
+
if not quantization_options.HasField('freeze_all_variables'):
|
| 673 |
+
quantization_options.freeze_all_variables = True
|
| 674 |
+
|
| 675 |
+
if quantization_options.enable_legacy_weight_only:
|
| 676 |
+
raise ValueError(
|
| 677 |
+
'Legacy weight-only is deprecated. Use weight-only quantization method.'
|
| 678 |
+
)
|
| 679 |
+
|
| 680 |
+
# Converter assumes options are specified. So set SRQ explicitly.
|
| 681 |
+
if (
|
| 682 |
+
quantization_options.quantization_method.preset_method
|
| 683 |
+
== _PresetMethod.METHOD_UNSPECIFIED
|
| 684 |
+
):
|
| 685 |
+
logging.debug(
|
| 686 |
+
'"preset_method" for QuantizationMethod is not specified.'
|
| 687 |
+
'Static range quantization is used by default.'
|
| 688 |
+
)
|
| 689 |
+
quantization_options.quantization_method.preset_method = (
|
| 690 |
+
_PresetMethod.METHOD_STATIC_RANGE_INT8
|
| 691 |
+
)
|
| 692 |
+
|
| 693 |
+
# Check default quantization option values for weight-only quantization.
|
| 694 |
+
# TODO(b/242805842): Find good minimum_elements_for_weights number for server.
|
| 695 |
+
# please also update default value in tflite converter:
|
| 696 |
+
# tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.cc;l=201
|
| 697 |
+
if quantization_options.min_num_elements_for_weights == 0:
|
| 698 |
+
quantization_options.min_num_elements_for_weights = (
|
| 699 |
+
_DYNAMIC_RANGE_DEFAULT_MIN_NUM_ELEMENTS_FOR_WEIGHTS
|
| 700 |
+
)
|
| 701 |
+
logging.warning(
|
| 702 |
+
(
|
| 703 |
+
'QuantizationOptions.min_num_elements_for_weights is not set (0).'
|
| 704 |
+
' Setting to the default value: %d.'
|
| 705 |
+
),
|
| 706 |
+
_DYNAMIC_RANGE_DEFAULT_MIN_NUM_ELEMENTS_FOR_WEIGHTS,
|
| 707 |
+
)
|
| 708 |
+
|
| 709 |
+
if not quantization_options.HasField('enable_per_channel_quantization'):
|
| 710 |
+
quantization_options.enable_per_channel_quantization = False
|
| 711 |
+
|
| 712 |
+
if quantization_options.enable_per_channel_quantization and not (
|
| 713 |
+
(
|
| 714 |
+
quantization_options.op_set == quant_opts_pb2.OpSet.UNIFORM_QUANTIZED
|
| 715 |
+
or quantization_options.quantization_method.preset_method
|
| 716 |
+
== _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8
|
| 717 |
+
)
|
| 718 |
+
or (
|
| 719 |
+
quantization_options.op_set
|
| 720 |
+
in (quant_opts_pb2.OpSet.XLA, quant_opts_pb2.OpSet.STABLEHLO)
|
| 721 |
+
and quantization_options.quantization_method.preset_method
|
| 722 |
+
== _PresetMethod.METHOD_STATIC_RANGE_INT8
|
| 723 |
+
)
|
| 724 |
+
):
|
| 725 |
+
raise ValueError(
|
| 726 |
+
'Currently, per-channel quantization is supported for Uniform Quantized'
|
| 727 |
+
' opset, weight only quantization, or XLA/StableHLO opset with static'
|
| 728 |
+
' range quantization.'
|
| 729 |
+
)
|
| 730 |
+
|
| 731 |
+
if (
|
| 732 |
+
quantization_options.quantization_method.preset_method
|
| 733 |
+
== _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8
|
| 734 |
+
and (
|
| 735 |
+
quantization_options.op_set == quant_opts_pb2.OpSet.UNIFORM_QUANTIZED
|
| 736 |
+
or quantization_options.op_set == quant_opts_pb2.OpSet.TF
|
| 737 |
+
)
|
| 738 |
+
):
|
| 739 |
+
raise ValueError('TF/Uniform quantized opset does not support weight-only.')
|
| 740 |
+
|
| 741 |
+
if (quantization_options.op_set == quant_opts_pb2.OpSet.STABLEHLO) and (
|
| 742 |
+
quantization_options.quantization_method.preset_method
|
| 743 |
+
!= _PresetMethod.METHOD_STATIC_RANGE_INT8
|
| 744 |
+
and quantization_options.quantization_method.preset_method
|
| 745 |
+
!= _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8
|
| 746 |
+
):
|
| 747 |
+
raise ValueError(
|
| 748 |
+
'StableHLO quantized opset currently only supports static range'
|
| 749 |
+
' quantization and weight-only quantizationvia TF Quantizer.'
|
| 750 |
+
)
|
| 751 |
+
|
| 752 |
+
# Set `force_graph_mode_calibration` to True to avoid skipping op execution,
|
| 753 |
+
# which are not connected to return ops, during calibration execution.
|
| 754 |
+
# TODO: b/335031954 - Bring back support to run calibration in Eager mode.
|
| 755 |
+
logging.debug(
|
| 756 |
+
'Setting `force_graph_mode_calibration = True` to ensure the calibration'
|
| 757 |
+
' mode is executed properly.'
|
| 758 |
+
)
|
| 759 |
+
quantization_options.force_graph_mode_calibration = True
|
| 760 |
+
|
| 761 |
+
if quantization_options.HasField('debugger_config'):
|
| 762 |
+
if not quantization_options.debugger_config.log_dir_path:
|
| 763 |
+
quantization_options.debugger_config.log_dir_path = '/tmp/dumps'
|
| 764 |
+
|
| 765 |
+
if (
|
| 766 |
+
quantization_options.debugger_config.debugger_type
|
| 767 |
+
== stablehlo_quant_config_pb2.DebuggerConfig.DebuggerType.DEBUGGER_TYPE_UNSPECIFIED
|
| 768 |
+
):
|
| 769 |
+
raise ValueError(
|
| 770 |
+
'Debugger is enabled but debugger type was not specified.'
|
| 771 |
+
)
|
| 772 |
+
|
| 773 |
+
if (
|
| 774 |
+
quantization_options.debugger_config.debugger_type
|
| 775 |
+
== stablehlo_quant_config_pb2.DebuggerConfig.DebuggerType.DEBUGGER_TYPE_WHOLE_MODEL
|
| 776 |
+
and not quantization_options.debugger_config.unquantized_dump_model_path
|
| 777 |
+
):
|
| 778 |
+
raise ValueError(
|
| 779 |
+
'Debugger type whole model verify was used but'
|
| 780 |
+
' unquantized_dump_model_path was not specified.'
|
| 781 |
+
)
|
| 782 |
+
|
| 783 |
+
# Check and populate quantization component spec.
|
| 784 |
+
_populate_quantization_component_spec(
|
| 785 |
+
quantization_options.quantization_method
|
| 786 |
+
)
|
| 787 |
+
# Verify and populate unit-wise quantization specs.
|
| 788 |
+
_populate_unitwise_quantization_specs(quantization_options)
|
| 789 |
+
|
| 790 |
+
if (
|
| 791 |
+
quantization_options.quantization_method.preset_method
|
| 792 |
+
== _PresetMethod.METHOD_STATIC_RANGE_INT8
|
| 793 |
+
):
|
| 794 |
+
# Check and populate calibration options.
|
| 795 |
+
_populate_calibration_options(quantization_options)
|
| 796 |
+
|
| 797 |
+
|
| 798 |
+
@tf_export.tf_export('quantization.experimental.quantize_saved_model')
|
| 799 |
+
def quantize(
|
| 800 |
+
saved_model_path: str,
|
| 801 |
+
output_directory: Optional[str] = None,
|
| 802 |
+
quantization_options: Optional[_QuantizationOptions] = None,
|
| 803 |
+
representative_dataset: Optional[
|
| 804 |
+
repr_dataset.RepresentativeDatasetOrMapping
|
| 805 |
+
] = None,
|
| 806 |
+
*,
|
| 807 |
+
overwrite_output_directory: bool = False,
|
| 808 |
+
) -> autotrackable.AutoTrackable:
|
| 809 |
+
"""Quantizes the SavedModel with the given quantization options.
|
| 810 |
+
|
| 811 |
+
Example usage:
|
| 812 |
+
```python
|
| 813 |
+
# Quantizing a model trained with QAT.
|
| 814 |
+
quantization_options = tf.quantization.experimental.QuantizationOptions(
|
| 815 |
+
signature_keys=['your_signature_key'],
|
| 816 |
+
)
|
| 817 |
+
tf.quantization.experimental.quantize_saved_model(
|
| 818 |
+
'/tmp/input_model',
|
| 819 |
+
'/tmp/output_model',
|
| 820 |
+
quantization_options=quantization_options,
|
| 821 |
+
)
|
| 822 |
+
|
| 823 |
+
# When quantizing a model trained without QAT (Post-Training Quantization),
|
| 824 |
+
# a representative dataset is required.
|
| 825 |
+
representative_dataset = [{"input": tf.random.uniform(shape=(3, 3))}
|
| 826 |
+
for _ in range(256)]
|
| 827 |
+
tf.quantization.experimental.quantize_saved_model(
|
| 828 |
+
'/tmp/input_model',
|
| 829 |
+
'/tmp/output_model',
|
| 830 |
+
quantization_options=quantization_options,
|
| 831 |
+
representative_dataset={'your_signature_key': representative_dataset},
|
| 832 |
+
)
|
| 833 |
+
|
| 834 |
+
# In addition to preset quantization methods, fine-grained control of
|
| 835 |
+
# quantization for each component is also supported.
|
| 836 |
+
_QuantizationComponentSpec = (
|
| 837 |
+
tf.quantization.experimental.QuantizationComponentSpec
|
| 838 |
+
)
|
| 839 |
+
quantization_options = tf.quantization.experimental.QuantizationOptions(
|
| 840 |
+
signature_keys=['your_signature_key'],
|
| 841 |
+
quantization_method=tf.quantization.experimental.QuantizationMethod(
|
| 842 |
+
quantization_component_specs=[
|
| 843 |
+
_QuantizationComponentSpec(
|
| 844 |
+
quantization_component=(
|
| 845 |
+
_QuantizationComponentSpec.COMPONENT_ACTIVATION
|
| 846 |
+
),
|
| 847 |
+
tensor_type=_QuantizationComponentSpec.TENSORTYPE_INT_8,
|
| 848 |
+
)
|
| 849 |
+
]
|
| 850 |
+
)
|
| 851 |
+
)
|
| 852 |
+
tf.quantization.experimental.quantize_saved_model(
|
| 853 |
+
'/tmp/input_model',
|
| 854 |
+
'/tmp/output_model',
|
| 855 |
+
quantization_options=quantization_options,
|
| 856 |
+
)
|
| 857 |
+
```
|
| 858 |
+
|
| 859 |
+
Args:
|
| 860 |
+
saved_model_path: Path to the saved model. When representative_dataset is
|
| 861 |
+
not provided, this should be a model trained with QAT.
|
| 862 |
+
output_directory: The path to save the output SavedModel. Set
|
| 863 |
+
`overwrite_output_directory` to `True` to overwrite any existing contents
|
| 864 |
+
in the directory if not empty.
|
| 865 |
+
quantization_options: A set of options for quantization. If None, it uses
|
| 866 |
+
post-training static range quantization with XLA opset by default.
|
| 867 |
+
representative_dataset: an iterator that returns a dictionary of {input_key:
|
| 868 |
+
input_value} or a map from signature key to a dictionary of {input_key:
|
| 869 |
+
input_value} that feeds calibration data for quantizing model. The
|
| 870 |
+
representative should be provided when the model is a PTQ model. It can be
|
| 871 |
+
provided either via this parameter or via the `representative_datasets`
|
| 872 |
+
field in `QuantizationOptions`.
|
| 873 |
+
overwrite_output_directory: If set to true, overwrites the output directory
|
| 874 |
+
iff it isn't empty. The default value is false.
|
| 875 |
+
|
| 876 |
+
Returns:
|
| 877 |
+
A SavedModel object with TF quantization applied, or None if no quantization
|
| 878 |
+
is performed.
|
| 879 |
+
|
| 880 |
+
Raises:
|
| 881 |
+
ValueError: When 1) representative_dataset is not provided for non QAT model
|
| 882 |
+
for enabling static range quantization, 2) invalid value is provided as
|
| 883 |
+
a quantization method, or 3) provide representative dataset via both
|
| 884 |
+
argument and QuantizationOptions.
|
| 885 |
+
ValueError: When the specified quantization method is not yet supported.
|
| 886 |
+
"""
|
| 887 |
+
_verify_output_dir(output_directory, overwrite_output_directory)
|
| 888 |
+
|
| 889 |
+
# Set default values for None arguments.
|
| 890 |
+
if output_directory is None:
|
| 891 |
+
output_directory = tempfile.mkdtemp()
|
| 892 |
+
|
| 893 |
+
if quantization_options is None:
|
| 894 |
+
quantization_options = _QuantizationOptions()
|
| 895 |
+
|
| 896 |
+
_populate_quantization_options_default_values(quantization_options)
|
| 897 |
+
|
| 898 |
+
method: _QuantizationMethod = quantization_options.quantization_method
|
| 899 |
+
if (
|
| 900 |
+
method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_INT8
|
| 901 |
+
or method.preset_method == _PresetMethod.METHOD_NO_QUANTIZE
|
| 902 |
+
):
|
| 903 |
+
return _static_range_quantize(
|
| 904 |
+
saved_model_path,
|
| 905 |
+
output_directory,
|
| 906 |
+
quantization_options,
|
| 907 |
+
representative_dataset,
|
| 908 |
+
)
|
| 909 |
+
elif method.preset_method == _PresetMethod.METHOD_DYNAMIC_RANGE_INT8:
|
| 910 |
+
return _dynamic_range_quantize(
|
| 911 |
+
saved_model_path,
|
| 912 |
+
output_directory,
|
| 913 |
+
quantization_options,
|
| 914 |
+
)
|
| 915 |
+
elif (
|
| 916 |
+
method.preset_method == _PresetMethod.METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8
|
| 917 |
+
):
|
| 918 |
+
return _weight_only_quantize(
|
| 919 |
+
saved_model_path,
|
| 920 |
+
output_directory,
|
| 921 |
+
quantization_options,
|
| 922 |
+
)
|
| 923 |
+
else:
|
| 924 |
+
raise ValueError(
|
| 925 |
+
'Quantization method {method.preset_method} is not supported.'
|
| 926 |
+
)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/python/representative_dataset.py
ADDED
|
@@ -0,0 +1,402 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Defines types required for representative datasets for quantization."""
|
| 16 |
+
|
| 17 |
+
from collections.abc import Collection, Sized
|
| 18 |
+
import os
|
| 19 |
+
from typing import Iterable, Mapping, Optional, Union
|
| 20 |
+
|
| 21 |
+
import numpy as np
|
| 22 |
+
|
| 23 |
+
from tensorflow.compiler.mlir.quantization.tensorflow import quantization_options_pb2
|
| 24 |
+
from tensorflow.core.protobuf import meta_graph_pb2
|
| 25 |
+
from tensorflow.python.client import session
|
| 26 |
+
from tensorflow.python.data.ops import readers
|
| 27 |
+
from tensorflow.python.eager import context
|
| 28 |
+
from tensorflow.python.framework import tensor_util
|
| 29 |
+
from tensorflow.python.lib.io import python_io
|
| 30 |
+
from tensorflow.python.platform import tf_logging as logging
|
| 31 |
+
from tensorflow.python.types import core
|
| 32 |
+
from tensorflow.python.util import tf_export
|
| 33 |
+
|
| 34 |
+
# A representative sample is a map of: input_key -> input_value.
|
| 35 |
+
# Ex.: {'dense_input': tf.constant([1, 2, 3])}
|
| 36 |
+
# Ex.: {'x1': np.ndarray([4, 5, 6]}
|
| 37 |
+
RepresentativeSample = Mapping[str, core.TensorLike]
|
| 38 |
+
|
| 39 |
+
# A representative dataset is an iterable of representative samples.
|
| 40 |
+
RepresentativeDataset = Iterable[RepresentativeSample]
|
| 41 |
+
|
| 42 |
+
# A type representing a map from: signature key -> representative dataset.
|
| 43 |
+
# Ex.: {'serving_default': [tf.constant([1, 2, 3]), tf.constant([4, 5, 6])],
|
| 44 |
+
# 'other_signature_key': [tf.constant([[2, 2], [9, 9]])]}
|
| 45 |
+
RepresentativeDatasetMapping = Mapping[str, RepresentativeDataset]
|
| 46 |
+
|
| 47 |
+
# A type alias expressing that it can be either a RepresentativeDataset or
|
| 48 |
+
# a mapping of signature key to RepresentativeDataset.
|
| 49 |
+
RepresentativeDatasetOrMapping = Union[
|
| 50 |
+
RepresentativeDataset, RepresentativeDatasetMapping
|
| 51 |
+
]
|
| 52 |
+
|
| 53 |
+
# Type aliases for quantization_options_pb2 messages.
|
| 54 |
+
_RepresentativeDataSample = quantization_options_pb2.RepresentativeDataSample
|
| 55 |
+
_RepresentativeDatasetFile = quantization_options_pb2.RepresentativeDatasetFile
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class RepresentativeDatasetSaver:
|
| 59 |
+
"""Representative dataset saver.
|
| 60 |
+
|
| 61 |
+
Exposes a single method `save` that saves the provided representative dataset
|
| 62 |
+
into files.
|
| 63 |
+
|
| 64 |
+
This is useful when you would like to keep a snapshot of your representative
|
| 65 |
+
dataset at a file system or when you need to pass the representative dataset
|
| 66 |
+
as files.
|
| 67 |
+
"""
|
| 68 |
+
|
| 69 |
+
def save(
|
| 70 |
+
self, representative_dataset: RepresentativeDatasetMapping
|
| 71 |
+
) -> Mapping[str, _RepresentativeDatasetFile]:
|
| 72 |
+
"""Saves the representative dataset.
|
| 73 |
+
|
| 74 |
+
Args:
|
| 75 |
+
representative_dataset: RepresentativeDatasetMapping which is a
|
| 76 |
+
signature_def_key -> representative dataset mapping.
|
| 77 |
+
"""
|
| 78 |
+
raise NotImplementedError('Method "save" is not implemented.')
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
@tf_export.tf_export(
|
| 82 |
+
'quantization.experimental.TfRecordRepresentativeDatasetSaver'
|
| 83 |
+
)
|
| 84 |
+
class TfRecordRepresentativeDatasetSaver(RepresentativeDatasetSaver):
|
| 85 |
+
"""Representative dataset saver in TFRecord format.
|
| 86 |
+
|
| 87 |
+
Saves representative datasets for quantization calibration in TFRecord format.
|
| 88 |
+
The samples are serialized as `RepresentativeDataSample`.
|
| 89 |
+
|
| 90 |
+
The `save` method return a signature key to `RepresentativeDatasetFile` map,
|
| 91 |
+
which can be used for QuantizationOptions.
|
| 92 |
+
|
| 93 |
+
Example usage:
|
| 94 |
+
|
| 95 |
+
```python
|
| 96 |
+
# Creating the representative dataset.
|
| 97 |
+
representative_dataset = [{"input": tf.random.uniform(shape=(3, 3))}
|
| 98 |
+
for _ in range(256)]
|
| 99 |
+
|
| 100 |
+
# Saving to a TFRecord file.
|
| 101 |
+
dataset_file_map = (
|
| 102 |
+
tf.quantization.experimental.TfRecordRepresentativeDatasetSaver(
|
| 103 |
+
path_map={'serving_default': '/tmp/representative_dataset_path'}
|
| 104 |
+
).save({'serving_default': representative_dataset})
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
# Using in QuantizationOptions.
|
| 108 |
+
quantization_options = tf.quantization.experimental.QuantizationOptions(
|
| 109 |
+
signature_keys=['serving_default'],
|
| 110 |
+
representative_datasets=dataset_file_map,
|
| 111 |
+
)
|
| 112 |
+
tf.quantization.experimental.quantize_saved_model(
|
| 113 |
+
'/tmp/input_model',
|
| 114 |
+
'/tmp/output_model',
|
| 115 |
+
quantization_options=quantization_options,
|
| 116 |
+
)
|
| 117 |
+
```
|
| 118 |
+
"""
|
| 119 |
+
|
| 120 |
+
def __init__(
|
| 121 |
+
self,
|
| 122 |
+
path_map: Mapping[str, os.PathLike[str]],
|
| 123 |
+
expected_input_key_map: Optional[Mapping[str, Collection[str]]] = None,
|
| 124 |
+
):
|
| 125 |
+
"""Initializes TFRecord represenatative dataset saver.
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
path_map: Signature def key -> path mapping. Each path is a TFRecord file
|
| 129 |
+
to which a `RepresentativeDataset` is saved. The signature def keys
|
| 130 |
+
should be a subset of the `SignatureDef` keys of the
|
| 131 |
+
`representative_dataset` argument of the `save()` call.
|
| 132 |
+
expected_input_key_map: Signature def key -> expected input keys. If set,
|
| 133 |
+
validate that the sample has same set of input keys before saving.
|
| 134 |
+
|
| 135 |
+
Raises:
|
| 136 |
+
KeyError: If path_map and expected_input_key_map have different keys.
|
| 137 |
+
"""
|
| 138 |
+
self.path_map: Mapping[str, os.PathLike[str]] = path_map
|
| 139 |
+
self.expected_input_key_map: Mapping[str, Collection[str]] = {}
|
| 140 |
+
if expected_input_key_map is not None:
|
| 141 |
+
if set(path_map.keys()) != set(expected_input_key_map.keys()):
|
| 142 |
+
raise KeyError(
|
| 143 |
+
'The `path_map` and `expected_input_key_map` should have the same'
|
| 144 |
+
' set of keys.'
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
self.expected_input_key_map = expected_input_key_map
|
| 148 |
+
|
| 149 |
+
def _save_tf_record_dataset(
|
| 150 |
+
self,
|
| 151 |
+
repr_ds: RepresentativeDataset,
|
| 152 |
+
signature_def_key: str,
|
| 153 |
+
) -> _RepresentativeDatasetFile:
|
| 154 |
+
"""Saves `repr_ds` to a TFRecord file.
|
| 155 |
+
|
| 156 |
+
Each sample in `repr_ds` is serialized as `RepresentativeDataSample`.
|
| 157 |
+
|
| 158 |
+
Args:
|
| 159 |
+
repr_ds: `RepresentativeDataset` to save.
|
| 160 |
+
signature_def_key: The signature def key associated with `repr_ds`.
|
| 161 |
+
|
| 162 |
+
Returns:
|
| 163 |
+
a RepresentativeDatasetFile instance contains the path to the saved file.
|
| 164 |
+
|
| 165 |
+
Raises:
|
| 166 |
+
KeyError: If the set of input keys in the dataset samples doesn't match
|
| 167 |
+
the set of expected input keys.
|
| 168 |
+
"""
|
| 169 |
+
# When running in graph mode (TF1), tf.Tensor types should be converted to
|
| 170 |
+
# numpy ndarray types to be compatible with `make_tensor_proto`.
|
| 171 |
+
if not context.executing_eagerly():
|
| 172 |
+
with session.Session() as sess:
|
| 173 |
+
repr_ds = replace_tensors_by_numpy_ndarrays(repr_ds, sess)
|
| 174 |
+
|
| 175 |
+
expected_input_keys = self.expected_input_key_map.get(
|
| 176 |
+
signature_def_key, None
|
| 177 |
+
)
|
| 178 |
+
tfrecord_file_path = self.path_map[signature_def_key]
|
| 179 |
+
with python_io.TFRecordWriter(tfrecord_file_path) as writer:
|
| 180 |
+
for repr_sample in repr_ds:
|
| 181 |
+
if (
|
| 182 |
+
expected_input_keys is not None
|
| 183 |
+
and set(repr_sample.keys()) != expected_input_keys
|
| 184 |
+
):
|
| 185 |
+
raise KeyError(
|
| 186 |
+
'Invalid input keys for representative sample. The function'
|
| 187 |
+
f' expects input keys of: {set(expected_input_keys)}. Got:'
|
| 188 |
+
f' {set(repr_sample.keys())}. Please provide correct input keys'
|
| 189 |
+
' for representative samples.'
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
sample = _RepresentativeDataSample()
|
| 193 |
+
for input_name, input_value in repr_sample.items():
|
| 194 |
+
sample.tensor_proto_inputs[input_name].CopyFrom(
|
| 195 |
+
tensor_util.make_tensor_proto(input_value)
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
writer.write(sample.SerializeToString())
|
| 199 |
+
|
| 200 |
+
logging.info(
|
| 201 |
+
'Saved representative dataset for signature def: %s to: %s',
|
| 202 |
+
signature_def_key,
|
| 203 |
+
tfrecord_file_path,
|
| 204 |
+
)
|
| 205 |
+
return _RepresentativeDatasetFile(
|
| 206 |
+
tfrecord_file_path=str(tfrecord_file_path)
|
| 207 |
+
)
|
| 208 |
+
|
| 209 |
+
def save(
|
| 210 |
+
self, representative_dataset: RepresentativeDatasetMapping
|
| 211 |
+
) -> Mapping[str, _RepresentativeDatasetFile]:
|
| 212 |
+
"""Saves the representative dataset.
|
| 213 |
+
|
| 214 |
+
Args:
|
| 215 |
+
representative_dataset: Signature def key -> representative dataset
|
| 216 |
+
mapping. Each dataset is saved in a separate TFRecord file whose path
|
| 217 |
+
matches the signature def key of `path_map`.
|
| 218 |
+
|
| 219 |
+
Raises:
|
| 220 |
+
ValueError: When the signature def key in `representative_dataset` is not
|
| 221 |
+
present in the `path_map`.
|
| 222 |
+
|
| 223 |
+
Returns:
|
| 224 |
+
A map from signature key to the RepresentativeDatasetFile instance
|
| 225 |
+
contains the path to the saved file.
|
| 226 |
+
"""
|
| 227 |
+
dataset_file_map = {}
|
| 228 |
+
for signature_def_key, repr_ds in representative_dataset.items():
|
| 229 |
+
if signature_def_key not in self.path_map:
|
| 230 |
+
raise ValueError(
|
| 231 |
+
'SignatureDef key does not exist in the provided path_map:'
|
| 232 |
+
f' {signature_def_key}'
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
dataset_file_map[signature_def_key] = self._save_tf_record_dataset(
|
| 236 |
+
repr_ds, signature_def_key
|
| 237 |
+
)
|
| 238 |
+
return dataset_file_map
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
class RepresentativeDatasetLoader:
|
| 242 |
+
"""Representative dataset loader.
|
| 243 |
+
|
| 244 |
+
Exposes the `load` method that loads the representative dataset from files.
|
| 245 |
+
"""
|
| 246 |
+
|
| 247 |
+
def load(self) -> RepresentativeDatasetMapping:
|
| 248 |
+
"""Loads the representative datasets.
|
| 249 |
+
|
| 250 |
+
Returns:
|
| 251 |
+
representative dataset mapping: A loaded signature def key ->
|
| 252 |
+
representative mapping.
|
| 253 |
+
"""
|
| 254 |
+
raise NotImplementedError('Method "load" is not implemented.')
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
class TfRecordRepresentativeDatasetLoader(RepresentativeDatasetLoader):
|
| 258 |
+
"""TFRecord representative dataset loader.
|
| 259 |
+
|
| 260 |
+
Loads representative dataset stored in TFRecord files.
|
| 261 |
+
"""
|
| 262 |
+
|
| 263 |
+
def __init__(
|
| 264 |
+
self,
|
| 265 |
+
dataset_file_map: Mapping[str, _RepresentativeDatasetFile],
|
| 266 |
+
) -> None:
|
| 267 |
+
"""Initializes TFRecord represenatative dataset loader.
|
| 268 |
+
|
| 269 |
+
Args:
|
| 270 |
+
dataset_file_map: Signature key -> `RepresentativeDatasetFile` mapping.
|
| 271 |
+
|
| 272 |
+
Raises:
|
| 273 |
+
DecodeError: If the sample is not RepresentativeDataSample.
|
| 274 |
+
"""
|
| 275 |
+
self.dataset_file_map = dataset_file_map
|
| 276 |
+
|
| 277 |
+
def _load_tf_record(self, tf_record_path: str) -> RepresentativeDataset:
|
| 278 |
+
"""Loads TFRecord containing samples of type`RepresentativeDataSample`."""
|
| 279 |
+
samples = []
|
| 280 |
+
with context.eager_mode():
|
| 281 |
+
for sample_bytes in readers.TFRecordDatasetV2(filenames=[tf_record_path]):
|
| 282 |
+
sample_proto = _RepresentativeDataSample.FromString(
|
| 283 |
+
sample_bytes.numpy()
|
| 284 |
+
)
|
| 285 |
+
sample = {}
|
| 286 |
+
for input_key, tensor_proto in sample_proto.tensor_proto_inputs.items():
|
| 287 |
+
sample[input_key] = tensor_util.MakeNdarray(tensor_proto)
|
| 288 |
+
samples.append(sample)
|
| 289 |
+
return samples
|
| 290 |
+
|
| 291 |
+
def load(self) -> RepresentativeDatasetMapping:
|
| 292 |
+
"""Loads the representative datasets.
|
| 293 |
+
|
| 294 |
+
Returns:
|
| 295 |
+
representative dataset mapping: A signature def key -> representative
|
| 296 |
+
mapping. The loader loads `RepresentativeDataset` for each path in
|
| 297 |
+
`self.dataset_file_map` and associates the loaded dataset to the
|
| 298 |
+
corresponding signature def key.
|
| 299 |
+
"""
|
| 300 |
+
repr_dataset_map = {}
|
| 301 |
+
for signature_def_key, dataset_file in self.dataset_file_map.items():
|
| 302 |
+
if dataset_file.HasField('tfrecord_file_path'):
|
| 303 |
+
repr_dataset_map[signature_def_key] = self._load_tf_record(
|
| 304 |
+
dataset_file.tfrecord_file_path
|
| 305 |
+
)
|
| 306 |
+
else:
|
| 307 |
+
raise ValueError('Unsupported Representative Dataset filetype')
|
| 308 |
+
|
| 309 |
+
return repr_dataset_map
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def replace_tensors_by_numpy_ndarrays(
|
| 313 |
+
repr_ds: RepresentativeDataset, sess: session.Session
|
| 314 |
+
) -> RepresentativeDataset:
|
| 315 |
+
"""Replaces tf.Tensors in samples by their evaluated numpy arrays.
|
| 316 |
+
|
| 317 |
+
Note: This should be run in graph mode (default in TF1) only.
|
| 318 |
+
|
| 319 |
+
Args:
|
| 320 |
+
repr_ds: Representative dataset to replace the tf.Tensors with their
|
| 321 |
+
evaluated values. `repr_ds` is iterated through, so it may not be reusable
|
| 322 |
+
(e.g. if it is a generator object).
|
| 323 |
+
sess: Session instance used to evaluate tf.Tensors.
|
| 324 |
+
|
| 325 |
+
Returns:
|
| 326 |
+
The new representative dataset where each tf.Tensor is replaced by its
|
| 327 |
+
evaluated numpy ndarrays.
|
| 328 |
+
"""
|
| 329 |
+
new_repr_ds = []
|
| 330 |
+
for sample in repr_ds:
|
| 331 |
+
new_sample = {}
|
| 332 |
+
for input_key, input_data in sample.items():
|
| 333 |
+
# Evaluate the Tensor to get the actual value.
|
| 334 |
+
if isinstance(input_data, core.Tensor):
|
| 335 |
+
input_data = input_data.eval(session=sess)
|
| 336 |
+
|
| 337 |
+
new_sample[input_key] = input_data
|
| 338 |
+
|
| 339 |
+
new_repr_ds.append(new_sample)
|
| 340 |
+
return new_repr_ds
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def get_num_samples(repr_ds: RepresentativeDataset) -> Optional[int]:
|
| 344 |
+
"""Returns the number of samples if known.
|
| 345 |
+
|
| 346 |
+
Args:
|
| 347 |
+
repr_ds: Representative dataset.
|
| 348 |
+
|
| 349 |
+
Returns:
|
| 350 |
+
Returns the total number of samples in `repr_ds` if it can be determined
|
| 351 |
+
without iterating the entier dataset. Returns None iff otherwise. When it
|
| 352 |
+
returns None it does not mean the representative dataset is infinite or it
|
| 353 |
+
is malformed; it simply means the size cannot be determined without
|
| 354 |
+
iterating the whole dataset.
|
| 355 |
+
"""
|
| 356 |
+
if isinstance(repr_ds, Sized):
|
| 357 |
+
try:
|
| 358 |
+
return len(repr_ds)
|
| 359 |
+
except Exception as ex: # pylint: disable=broad-except
|
| 360 |
+
# There are some cases where calling __len__() raises an exception.
|
| 361 |
+
# Handle this as if the size is unknown.
|
| 362 |
+
logging.info('Cannot determine the size of the dataset (%s).', ex)
|
| 363 |
+
return None
|
| 364 |
+
else:
|
| 365 |
+
return None
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
def create_feed_dict_from_input_data(
|
| 369 |
+
input_data: RepresentativeSample,
|
| 370 |
+
signature_def: meta_graph_pb2.SignatureDef,
|
| 371 |
+
) -> Mapping[str, np.ndarray]:
|
| 372 |
+
"""Constructs a feed_dict from input data.
|
| 373 |
+
|
| 374 |
+
Note: This function should only be used in graph mode.
|
| 375 |
+
|
| 376 |
+
This is a helper function that converts an 'input key -> input value' mapping
|
| 377 |
+
to a feed dict. A feed dict is an 'input tensor name -> input value' mapping
|
| 378 |
+
and can be directly passed to the `feed_dict` argument of `sess.run()`.
|
| 379 |
+
|
| 380 |
+
Args:
|
| 381 |
+
input_data: Input key -> input value mapping. The input keys should match
|
| 382 |
+
the input keys of `signature_def`.
|
| 383 |
+
signature_def: A SignatureDef representing the function that `input_data` is
|
| 384 |
+
an input to.
|
| 385 |
+
|
| 386 |
+
Returns:
|
| 387 |
+
Feed dict, which is intended to be used as input for `sess.run`. It is
|
| 388 |
+
essentially a mapping: input tensor name -> input value. Note that the input
|
| 389 |
+
value in the feed dict is not a `Tensor`.
|
| 390 |
+
"""
|
| 391 |
+
feed_dict = {}
|
| 392 |
+
for input_key, input_value in input_data.items():
|
| 393 |
+
input_tensor_name = signature_def.inputs[input_key].name
|
| 394 |
+
|
| 395 |
+
value = input_value
|
| 396 |
+
if isinstance(input_value, core.Tensor):
|
| 397 |
+
# Take the data out of the tensor.
|
| 398 |
+
value = input_value.eval()
|
| 399 |
+
|
| 400 |
+
feed_dict[input_tensor_name] = value
|
| 401 |
+
|
| 402 |
+
return feed_dict
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/python/save_model.py
ADDED
|
@@ -0,0 +1,346 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Defines utilities involving SavedModel."""
|
| 16 |
+
from typing import Collection, Dict, Mapping, Optional, Sequence
|
| 17 |
+
|
| 18 |
+
from absl import logging
|
| 19 |
+
|
| 20 |
+
# pylint: disable=g-importing-member
|
| 21 |
+
from google.protobuf.any_pb2 import Any
|
| 22 |
+
# pylint: enable=g-importing-member
|
| 23 |
+
from tensorflow.core.framework import graph_pb2
|
| 24 |
+
from tensorflow.core.protobuf import meta_graph_pb2
|
| 25 |
+
from tensorflow.core.protobuf import saver_pb2
|
| 26 |
+
from tensorflow.python.client import session
|
| 27 |
+
from tensorflow.python.framework import importer
|
| 28 |
+
from tensorflow.python.framework import ops
|
| 29 |
+
from tensorflow.python.lib.io import file_io
|
| 30 |
+
from tensorflow.python.saved_model import builder
|
| 31 |
+
from tensorflow.python.saved_model import constants as saved_model_constants
|
| 32 |
+
from tensorflow.python.saved_model import loader_impl as saved_model_loader
|
| 33 |
+
from tensorflow.python.saved_model import tag_constants
|
| 34 |
+
from tensorflow.python.training import saver
|
| 35 |
+
|
| 36 |
+
# Mapping of signature def key -> SignatureDef.
|
| 37 |
+
_SignatureDefMap = Mapping[str, meta_graph_pb2.SignatureDef]
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def get_signatures_from_saved_model(
|
| 41 |
+
saved_model_path: str,
|
| 42 |
+
signature_keys: Optional[Sequence[str]] = None,
|
| 43 |
+
tags: Optional[Collection[str]] = None,
|
| 44 |
+
) -> Dict[str, meta_graph_pb2.SignatureDef]:
|
| 45 |
+
"""Gets a map from signature keys to their SignatureDef.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
saved_model_path: Path to the saved model.
|
| 49 |
+
signature_keys: List of keys identifying SignatureDef to retrieve. If None,
|
| 50 |
+
retrieve all except the init signature.
|
| 51 |
+
tags: Set of tags identifying the MetaGraphDef within the SavedModel.
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
A map from signature_key to its SignatureDef.
|
| 55 |
+
"""
|
| 56 |
+
if tags is None:
|
| 57 |
+
tags = {tag_constants.SERVING}
|
| 58 |
+
|
| 59 |
+
loader = saved_model_loader.SavedModelLoader(saved_model_path)
|
| 60 |
+
meta_graphdef = loader.get_meta_graph_def_from_tags(tags)
|
| 61 |
+
signatures = {}
|
| 62 |
+
for key, signature_def in meta_graphdef.signature_def.items():
|
| 63 |
+
if key == saved_model_constants.INIT_OP_SIGNATURE_KEY:
|
| 64 |
+
continue
|
| 65 |
+
if signature_keys is not None and key not in signature_keys:
|
| 66 |
+
continue
|
| 67 |
+
signatures[key] = signature_def
|
| 68 |
+
|
| 69 |
+
return signatures
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def _restore_output_tensor_names(
|
| 73 |
+
graph_def: graph_pb2.GraphDef,
|
| 74 |
+
) -> graph_pb2.GraphDef:
|
| 75 |
+
"""Restores the output tensor names of the converted model.
|
| 76 |
+
|
| 77 |
+
During the conversion, the output tensor names of the original model are
|
| 78 |
+
embedded in the `tf_saved_model.index_path` attribute of the RetVal nodes and
|
| 79 |
+
might become the name of Retval nodes as well (with an index suffix if there
|
| 80 |
+
are multiple output tensors from one node). Since Retval nodes are not used in
|
| 81 |
+
SavedModel, this function removes them and restore the names to the actual
|
| 82 |
+
output tensors.
|
| 83 |
+
|
| 84 |
+
Args:
|
| 85 |
+
graph_def: the converted GraphDef.
|
| 86 |
+
|
| 87 |
+
Returns:
|
| 88 |
+
The GraphDef with Retval nodes removed and output tensor names restored.
|
| 89 |
+
"""
|
| 90 |
+
output_renaming_map = {}
|
| 91 |
+
with session.Session(graph=ops.Graph()):
|
| 92 |
+
importer.import_graph_def(graph_def, name='')
|
| 93 |
+
graph = ops.get_default_graph()
|
| 94 |
+
for op in graph.get_operations():
|
| 95 |
+
if op.type == '_Retval':
|
| 96 |
+
expected_node_name = op.name
|
| 97 |
+
if op.get_attr('tf_saved_model.index_path') is not None:
|
| 98 |
+
index_path_name = op.get_attr('tf_saved_model.index_path')[0]
|
| 99 |
+
index_path_name = index_path_name.decode('utf-8').split(':')[0]
|
| 100 |
+
try:
|
| 101 |
+
# Only use the index_path name if it points to a Retval node.
|
| 102 |
+
index_path_node = graph.get_operation_by_name(index_path_name)
|
| 103 |
+
if index_path_node.type == '_Retval':
|
| 104 |
+
expected_node_name = index_path_name
|
| 105 |
+
except KeyError:
|
| 106 |
+
pass
|
| 107 |
+
retval_input_node_name = op.inputs[0].op.name
|
| 108 |
+
output_renaming_map[retval_input_node_name] = expected_node_name
|
| 109 |
+
|
| 110 |
+
for node in reversed(graph_def.node):
|
| 111 |
+
if node.name in output_renaming_map:
|
| 112 |
+
node.name = output_renaming_map[node.name]
|
| 113 |
+
elif node.op == '_Retval':
|
| 114 |
+
graph_def.node.remove(node)
|
| 115 |
+
else:
|
| 116 |
+
# Update the inputs referring to the pre-renaming node.
|
| 117 |
+
for idx, input_name in enumerate(node.input):
|
| 118 |
+
if input_name in output_renaming_map:
|
| 119 |
+
node.input[idx] = output_renaming_map[input_name]
|
| 120 |
+
# Update the control inputs referring to the pre-renaming node.
|
| 121 |
+
updating_inputs = []
|
| 122 |
+
for input_name in reversed(node.input):
|
| 123 |
+
if input_name.startswith('^') and input_name[1:] in output_renaming_map:
|
| 124 |
+
updating_inputs.append(input_name[1:])
|
| 125 |
+
node.input.remove(input_name)
|
| 126 |
+
for updating_input in updating_inputs:
|
| 127 |
+
node.input.append('^' + output_renaming_map[updating_input])
|
| 128 |
+
return graph_def
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def create_empty_output_dir(
|
| 132 |
+
output_directory: str, overwrite: bool = True
|
| 133 |
+
) -> None:
|
| 134 |
+
"""Creates the `output_directory`.
|
| 135 |
+
|
| 136 |
+
If `output_directory` already exists, it recursively deletes all contents
|
| 137 |
+
inside the directory.
|
| 138 |
+
|
| 139 |
+
Also creates the parent & intermediate directories.
|
| 140 |
+
|
| 141 |
+
Args:
|
| 142 |
+
output_directory: Output directory.
|
| 143 |
+
overwrite: Where to clean the output directory if exists.
|
| 144 |
+
"""
|
| 145 |
+
if overwrite and file_io.file_exists_v2(output_directory):
|
| 146 |
+
logging.info(
|
| 147 |
+
'Deleting existing output directory: %s .',
|
| 148 |
+
output_directory,
|
| 149 |
+
)
|
| 150 |
+
file_io.delete_recursively_v2(output_directory)
|
| 151 |
+
|
| 152 |
+
file_io.recursive_create_dir_v2(output_directory)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def _validate_signatures(
|
| 156 |
+
signature_def_map: _SignatureDefMap, exported_graph: ops.Graph
|
| 157 |
+
) -> _SignatureDefMap:
|
| 158 |
+
"""Validates if the tensor names in signatures are consistent with the graph.
|
| 159 |
+
|
| 160 |
+
This function checks if the input and output tensor names in the signatures
|
| 161 |
+
exist if the graph. The output tensor names might change during conversion,
|
| 162 |
+
we try to fix that with `_restore_output_tensor_names`. Besides, if there
|
| 163 |
+
are duplicated tensor names, they we will be prefixed with the signature name.
|
| 164 |
+
However, if that doesn't work the signatures can't be used with the converted
|
| 165 |
+
graph.
|
| 166 |
+
|
| 167 |
+
Args:
|
| 168 |
+
signature_def_map: the signatures to validate.
|
| 169 |
+
exported_graph: The PTQ-exported GraphDef.
|
| 170 |
+
|
| 171 |
+
Returns:
|
| 172 |
+
The signatures with tensor names prefixed with signature name if necessary.
|
| 173 |
+
|
| 174 |
+
Raises:
|
| 175 |
+
ValueError: Iff the signatures are not consistent with the graph.
|
| 176 |
+
"""
|
| 177 |
+
for signature_key, signature_def in signature_def_map.items():
|
| 178 |
+
for tensor_info in signature_def.inputs.values():
|
| 179 |
+
try:
|
| 180 |
+
exported_graph.get_tensor_by_name(tensor_info.name)
|
| 181 |
+
except KeyError as exc:
|
| 182 |
+
try:
|
| 183 |
+
prefixed_name = signature_key + '_' + tensor_info.name
|
| 184 |
+
exported_graph.get_tensor_by_name(prefixed_name)
|
| 185 |
+
tensor_info.name = prefixed_name
|
| 186 |
+
except KeyError:
|
| 187 |
+
raise ValueError(
|
| 188 |
+
'Cannot find the input tensor with name %s in the graph.'
|
| 189 |
+
% tensor_info.name
|
| 190 |
+
) from exc
|
| 191 |
+
|
| 192 |
+
for tensor_info in signature_def.outputs.values():
|
| 193 |
+
try:
|
| 194 |
+
exported_graph.get_tensor_by_name(tensor_info.name)
|
| 195 |
+
except KeyError as exc:
|
| 196 |
+
try:
|
| 197 |
+
prefixed_name = signature_key + '_' + tensor_info.name
|
| 198 |
+
exported_graph.get_tensor_by_name(prefixed_name)
|
| 199 |
+
tensor_info.name = prefixed_name
|
| 200 |
+
except KeyError:
|
| 201 |
+
raise ValueError(
|
| 202 |
+
'Cannot find the output tensor with name %s in the graph.'
|
| 203 |
+
% tensor_info.name
|
| 204 |
+
) from exc
|
| 205 |
+
|
| 206 |
+
return signature_def_map
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def _find_op(
|
| 210 |
+
graph: ops.Graph, op_name: Optional[str]
|
| 211 |
+
) -> Optional[ops.Operation]:
|
| 212 |
+
"""Finds the operation with `op_name`.
|
| 213 |
+
|
| 214 |
+
Args:
|
| 215 |
+
graph: The graph to find from.
|
| 216 |
+
op_name: Name of the node.
|
| 217 |
+
|
| 218 |
+
Returns:
|
| 219 |
+
The operation that corresponds to `op_name`. Returns None iff op_name is an
|
| 220 |
+
empty string or None.
|
| 221 |
+
|
| 222 |
+
Raises:
|
| 223 |
+
ValueError: `op_name` is malformed.
|
| 224 |
+
"""
|
| 225 |
+
if not op_name:
|
| 226 |
+
return None
|
| 227 |
+
|
| 228 |
+
init_op = graph.get_operation_by_name(op_name)
|
| 229 |
+
logging.debug('Op found in the graph: %s', op_name)
|
| 230 |
+
|
| 231 |
+
return init_op
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def _save_function_alias(
|
| 235 |
+
saved_model_dir: str,
|
| 236 |
+
tags: Collection[str],
|
| 237 |
+
function_aliases: Mapping[str, str],
|
| 238 |
+
) -> None:
|
| 239 |
+
"""Saves the function alias to the SavedModel.
|
| 240 |
+
|
| 241 |
+
SavedModelBuilder (TF1 saved model saver) does not support saving function
|
| 242 |
+
aliases, so this function loads the SavedModel proto and adds the
|
| 243 |
+
`function_aliases` field.
|
| 244 |
+
|
| 245 |
+
Args:
|
| 246 |
+
saved_model_dir: Path to the saved model directory.
|
| 247 |
+
tags: A collection of tags to specify the meta graph.
|
| 248 |
+
function_aliases: Function name -> function alias mapping.
|
| 249 |
+
"""
|
| 250 |
+
loader = saved_model_loader.SavedModelLoader(saved_model_dir)
|
| 251 |
+
meta_graph_def = loader.get_meta_graph_def_from_tags(tags)
|
| 252 |
+
|
| 253 |
+
for function_name, function_alias in function_aliases.items():
|
| 254 |
+
meta_graph_def.meta_info_def.function_aliases[function_name] = (
|
| 255 |
+
function_alias
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
saved_model_proto_serialized = loader.saved_model.SerializeToString()
|
| 259 |
+
|
| 260 |
+
# TODO(b/266015731): Also update and set the SavedModel fingerprint.
|
| 261 |
+
path = file_io.join(
|
| 262 |
+
saved_model_dir, saved_model_constants.SAVED_MODEL_FILENAME_PB
|
| 263 |
+
)
|
| 264 |
+
file_io.atomic_write_string_to_file(path, saved_model_proto_serialized)
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
def save_model_v1(
|
| 268 |
+
graph_def: graph_pb2.GraphDef,
|
| 269 |
+
output_dir: str,
|
| 270 |
+
signature_def_map: _SignatureDefMap,
|
| 271 |
+
tags: Collection[str],
|
| 272 |
+
init_op_name: Optional[str] = None,
|
| 273 |
+
saver_def: Optional[saver_pb2.SaverDef] = None,
|
| 274 |
+
checkpoint_dir: Optional[str] = None,
|
| 275 |
+
function_aliases: Optional[Mapping[str, str]] = None,
|
| 276 |
+
asset_file_defs: Sequence[meta_graph_pb2.AssetFileDef] = (),
|
| 277 |
+
) -> None:
|
| 278 |
+
"""Saves the model.
|
| 279 |
+
|
| 280 |
+
Saves the provided graph def as SavedModel.
|
| 281 |
+
Uses TF1 SavedModel semantics (i.e. no object graph).
|
| 282 |
+
|
| 283 |
+
Args:
|
| 284 |
+
graph_def: Graph to save.
|
| 285 |
+
output_dir: Output directory for the SavedModel.
|
| 286 |
+
signature_def_map: Mapping of signature def key -> SignatureDef.
|
| 287 |
+
tags: Tags for the meta graph def.
|
| 288 |
+
init_op_name: Name of the node for initialization.
|
| 289 |
+
saver_def: `saver_pb2.SaverDef` to create a `saver.Saver` from. The created
|
| 290 |
+
saver will be used to save and load variables. This may be `None` if no
|
| 291 |
+
variables exist in the graph.
|
| 292 |
+
checkpoint_dir: Path to checkpoint file where variable values are saved.
|
| 293 |
+
function_aliases: Function name -> function alias mapping.
|
| 294 |
+
asset_file_defs: `AssetFileDef`s that associates the asset files and the
|
| 295 |
+
name of the tensors to which the asset file names should be fed. The
|
| 296 |
+
caller should make sure the asset files exist in the output saved model
|
| 297 |
+
directory.
|
| 298 |
+
|
| 299 |
+
Raises:
|
| 300 |
+
ValueError iff the graph does not contain a valid signature or the file
|
| 301 |
+
prefix tensor is not found in the graph.
|
| 302 |
+
"""
|
| 303 |
+
create_empty_output_dir(output_dir)
|
| 304 |
+
v1_builder = builder.SavedModelBuilder(output_dir)
|
| 305 |
+
|
| 306 |
+
graph_def = _restore_output_tensor_names(graph_def)
|
| 307 |
+
with session.Session(graph=ops.Graph()) as sess:
|
| 308 |
+
importer.import_graph_def(graph_def, name='')
|
| 309 |
+
|
| 310 |
+
signature_def_map = _validate_signatures(
|
| 311 |
+
signature_def_map, ops.get_default_graph()
|
| 312 |
+
)
|
| 313 |
+
|
| 314 |
+
# Add `AssetFileDef`s to the collection so that correct values are fed to
|
| 315 |
+
# the tensors that accept asset file paths.
|
| 316 |
+
for asset_file_def in asset_file_defs:
|
| 317 |
+
asset_any_proto = Any()
|
| 318 |
+
asset_any_proto.Pack(asset_file_def)
|
| 319 |
+
ops.add_to_collection(
|
| 320 |
+
saved_model_constants.ASSETS_KEY,
|
| 321 |
+
asset_any_proto,
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
model_saver = None
|
| 325 |
+
# If `saver_def` is not None, it means there are variables in the graph.
|
| 326 |
+
if saver_def:
|
| 327 |
+
model_saver = saver.Saver(saver_def=saver_def)
|
| 328 |
+
logging.info('Saver created with SaverDef: %s', saver_def)
|
| 329 |
+
|
| 330 |
+
# Variables should be restored once before exporting as saved model
|
| 331 |
+
# because the variables are not initialized when the GraphDef was
|
| 332 |
+
# imported.
|
| 333 |
+
model_saver.restore(sess, checkpoint_dir)
|
| 334 |
+
|
| 335 |
+
v1_builder.add_meta_graph_and_variables(
|
| 336 |
+
sess,
|
| 337 |
+
tags,
|
| 338 |
+
signature_def_map=signature_def_map,
|
| 339 |
+
main_op=_find_op(sess.graph, op_name=init_op_name),
|
| 340 |
+
saver=model_saver,
|
| 341 |
+
)
|
| 342 |
+
|
| 343 |
+
v1_builder.save()
|
| 344 |
+
|
| 345 |
+
if function_aliases:
|
| 346 |
+
_save_function_alias(output_dir, tags, function_aliases)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/quantization_options_pb2.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
| 3 |
+
# source: tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto
|
| 4 |
+
"""Generated protocol buffer code."""
|
| 5 |
+
from google.protobuf.internal import builder as _builder
|
| 6 |
+
from google.protobuf import descriptor as _descriptor
|
| 7 |
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
| 8 |
+
from google.protobuf import symbol_database as _symbol_database
|
| 9 |
+
# @@protoc_insertion_point(imports)
|
| 10 |
+
|
| 11 |
+
_sym_db = _symbol_database.Default()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
from tensorflow.compiler.mlir.quantization.stablehlo import quantization_config_pb2 as tensorflow_dot_compiler_dot_mlir_dot_quantization_dot_stablehlo_dot_quantization__config__pb2
|
| 15 |
+
from tensorflow.core.framework import tensor_pb2 as tensorflow_dot_core_dot_framework_dot_tensor__pb2
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\nKtensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto\x12\x17tensorflow.quantization\x1aItensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto\x1a&tensorflow/core/framework/tensor.proto\"\xed\x02\n\x12QuantizationMethod\x12O\n\rpreset_method\x18\x04 \x01(\x0e\x32\x38.tensorflow.quantization.QuantizationMethod.PresetMethod\x12X\n\x1cquantization_component_specs\x18\x03 \x03(\x0b\x32\x32.tensorflow.quantization.QuantizationComponentSpec\"\xa5\x01\n\x0cPresetMethod\x12\x16\n\x12METHOD_UNSPECIFIED\x10\x00\x12\x16\n\x12METHOD_NO_QUANTIZE\x10\x01\x12\x1c\n\x18METHOD_STATIC_RANGE_INT8\x10\x02\x12\x1d\n\x19METHOD_DYNAMIC_RANGE_INT8\x10\x03\x12(\n$METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8\x10\x04J\x04\x08\x01\x10\x03\"\xbe\x03\n\x19QuantizationComponentSpec\x12h\n\x16quantization_component\x18\x01 \x01(\x0e\x32H.tensorflow.quantization.QuantizationComponentSpec.QuantizationComponent\x12R\n\x0btensor_type\x18\x02 \x01(\x0e\x32=.tensorflow.quantization.QuantizationComponentSpec.TensorType\"v\n\x15QuantizationComponent\x12\x19\n\x15\x43OMPONENT_UNSPECIFIED\x10\x00\x12\x18\n\x14\x43OMPONENT_ACTIVATION\x10\x01\x12\x14\n\x10\x43OMPONENT_WEIGHT\x10\x02\x12\x12\n\x0e\x43OMPONENT_BIAS\x10\x03\"k\n\nTensorType\x12\x1a\n\x16TENSORTYPE_UNSPECIFIED\x10\x00\x12\x14\n\x10TENSORTYPE_INT_4\x10\x01\x12\x14\n\x10TENSORTYPE_INT_8\x10\x02\x12\x15\n\x11TENSORTYPE_INT_32\x10\x03\"\x87\x02\n\x18UnitWiseQuantizationSpec\x12P\n\x04unit\x18\x05 \x03(\x0b\x32\x42.tensorflow.quantization.UnitWiseQuantizationSpec.QuantizationUnit\x12H\n\x13quantization_method\x18\x06 \x01(\x0b\x32+.tensorflow.quantization.QuantizationMethod\x1aI\n\x10QuantizationUnit\x12\x0f\n\x07op_type\x18\x01 \x01(\t\x12\x11\n\tnode_name\x18\x02 \x01(\t\x12\x11\n\tfunc_name\x18\x03 \x01(\tJ\x04\x08\x01\x10\x05\"\xd4\x01\n\x18RepresentativeDataSample\x12\x65\n\x13tensor_proto_inputs\x18\x02 \x03(\x0b\x32H.tensorflow.quantization.RepresentativeDataSample.TensorProtoInputsEntry\x1aQ\n\x16TensorProtoInputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.tensorflow.TensorProto:\x02\x38\x01\"I\n\x19RepresentativeDatasetFile\x12\x1c\n\x12tfrecord_file_path\x18\x01 \x01(\tH\x00\x42\x0e\n\x0c\x64\x61taset_file\"\xca\x07\n\x13QuantizationOptions\x12H\n\x13quantization_method\x18\x01 \x01(\x0b\x32+.tensorflow.quantization.QuantizationMethod\x12.\n\x06op_set\x18\x02 \x01(\x0e\x32\x1e.tensorflow.quantization.OpSet\x12W\n\x1cunit_wise_quantization_specs\x18\x11 \x03(\x0b\x32\x31.tensorflow.quantization.UnitWiseQuantizationSpec\x12\x0c\n\x04tags\x18\x05 \x03(\t\x12\x16\n\x0esignature_keys\x18\x06 \x03(\t\x12i\n\x17representative_datasets\x18\x07 \x03(\x0b\x32H.tensorflow.quantization.QuantizationOptions.RepresentativeDatasetsEntry\x12$\n\x1cmin_num_elements_for_weights\x18\x08 \x01(\x03\x12!\n\x14\x66reeze_all_variables\x18\t \x01(\x08H\x00\x88\x01\x01\x12,\n\x1f\x65nable_per_channel_quantization\x18\n \x01(\x08H\x01\x88\x01\x01\x12 \n\x18\x65nable_two_input_tensors\x18\x0b \x01(\x08\x12-\n%experimental_enable_tpu_model_support\x18\x0c \x01(\x08\x12!\n\x19\x65nable_legacy_weight_only\x18\r \x01(\x08\x12$\n\x1c\x66orce_graph_mode_calibration\x18\x0e \x01(\x08\x12G\n\x13\x63\x61libration_options\x18\x0f \x01(\x0b\x32*.stablehlo.quantization.CalibrationOptions\x12?\n\x0f\x64\x65\x62ugger_config\x18\x10 \x01(\x0b\x32&.stablehlo.quantization.DebuggerConfig\x1aq\n\x1bRepresentativeDatasetsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x41\n\x05value\x18\x02 \x01(\x0b\x32\x32.tensorflow.quantization.RepresentativeDatasetFile:\x02\x38\x01\x42\x17\n\x15_freeze_all_variablesB\"\n _enable_per_channel_quantizationJ\x04\x08\x03\x10\x04*V\n\x05OpSet\x12\x16\n\x12OP_SET_UNSPECIFIED\x10\x00\x12\x06\n\x02TF\x10\x01\x12\x07\n\x03XLA\x10\x02\x12\x15\n\x11UNIFORM_QUANTIZED\x10\x03\x12\r\n\tSTABLEHLO\x10\x04\x42\x03\xf8\x01\x01\x62\x06proto3')
|
| 19 |
+
|
| 20 |
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
| 21 |
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.compiler.mlir.quantization.tensorflow.quantization_options_pb2', globals())
|
| 22 |
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
| 23 |
+
|
| 24 |
+
DESCRIPTOR._options = None
|
| 25 |
+
DESCRIPTOR._serialized_options = b'\370\001\001'
|
| 26 |
+
_REPRESENTATIVEDATASAMPLE_TENSORPROTOINPUTSENTRY._options = None
|
| 27 |
+
_REPRESENTATIVEDATASAMPLE_TENSORPROTOINPUTSENTRY._serialized_options = b'8\001'
|
| 28 |
+
_QUANTIZATIONOPTIONS_REPRESENTATIVEDATASETSENTRY._options = None
|
| 29 |
+
_QUANTIZATIONOPTIONS_REPRESENTATIVEDATASETSENTRY._serialized_options = b'8\001'
|
| 30 |
+
_OPSET._serialized_start=2565
|
| 31 |
+
_OPSET._serialized_end=2651
|
| 32 |
+
_QUANTIZATIONMETHOD._serialized_start=220
|
| 33 |
+
_QUANTIZATIONMETHOD._serialized_end=585
|
| 34 |
+
_QUANTIZATIONMETHOD_PRESETMETHOD._serialized_start=414
|
| 35 |
+
_QUANTIZATIONMETHOD_PRESETMETHOD._serialized_end=579
|
| 36 |
+
_QUANTIZATIONCOMPONENTSPEC._serialized_start=588
|
| 37 |
+
_QUANTIZATIONCOMPONENTSPEC._serialized_end=1034
|
| 38 |
+
_QUANTIZATIONCOMPONENTSPEC_QUANTIZATIONCOMPONENT._serialized_start=807
|
| 39 |
+
_QUANTIZATIONCOMPONENTSPEC_QUANTIZATIONCOMPONENT._serialized_end=925
|
| 40 |
+
_QUANTIZATIONCOMPONENTSPEC_TENSORTYPE._serialized_start=927
|
| 41 |
+
_QUANTIZATIONCOMPONENTSPEC_TENSORTYPE._serialized_end=1034
|
| 42 |
+
_UNITWISEQUANTIZATIONSPEC._serialized_start=1037
|
| 43 |
+
_UNITWISEQUANTIZATIONSPEC._serialized_end=1300
|
| 44 |
+
_UNITWISEQUANTIZATIONSPEC_QUANTIZATIONUNIT._serialized_start=1221
|
| 45 |
+
_UNITWISEQUANTIZATIONSPEC_QUANTIZATIONUNIT._serialized_end=1294
|
| 46 |
+
_REPRESENTATIVEDATASAMPLE._serialized_start=1303
|
| 47 |
+
_REPRESENTATIVEDATASAMPLE._serialized_end=1515
|
| 48 |
+
_REPRESENTATIVEDATASAMPLE_TENSORPROTOINPUTSENTRY._serialized_start=1434
|
| 49 |
+
_REPRESENTATIVEDATASAMPLE_TENSORPROTOINPUTSENTRY._serialized_end=1515
|
| 50 |
+
_REPRESENTATIVEDATASETFILE._serialized_start=1517
|
| 51 |
+
_REPRESENTATIVEDATASETFILE._serialized_end=1590
|
| 52 |
+
_QUANTIZATIONOPTIONS._serialized_start=1593
|
| 53 |
+
_QUANTIZATIONOPTIONS._serialized_end=2563
|
| 54 |
+
_QUANTIZATIONOPTIONS_REPRESENTATIVEDATASETSENTRY._serialized_start=2383
|
| 55 |
+
_QUANTIZATIONOPTIONS_REPRESENTATIVEDATASETSENTRY._serialized_end=2496
|
| 56 |
+
# @@protoc_insertion_point(module_scope)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/compiler/mlir/stablehlo/__init__.py
ADDED
|
File without changes
|