Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +5 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/ops/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/ops/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/ops/__pycache__/gen_audio_microfrontend_op.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/ops/gen_audio_microfrontend_op.py +423 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/__pycache__/audio_microfrontend_op.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/_audio_microfrontend_op.so +3 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/audio_microfrontend_op.py +110 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/__pycache__/model_runtime_info_pb2.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/__pycache__/profiling_info_pb2.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/model_runtime_info_pb2.py +55 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/profiling_info_pb2.py +36 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/analyzer.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/conversion_metadata_schema_py_generated.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/convert.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/convert_phase.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/convert_saved_model.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/interpreter.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/lite.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/lite_constants.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/op_hint.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/schema_py_generated.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/schema_util.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/tflite_convert.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/tflite_keras_util.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/util.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/_pywrap_analyzer_wrapper.pyi +16 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/_pywrap_analyzer_wrapper.so +3 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/authoring/__init__.py +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/authoring/__pycache__/__init__.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/authoring/__pycache__/authoring.cpython-310.pyc +0 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/authoring/authoring.py +301 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/convert.py +1250 -0
- SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/convert_saved_model.py +186 -0
.gitattributes
CHANGED
|
@@ -198,3 +198,8 @@ SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/gr
|
|
| 198 |
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/profiler/internal/_pywrap_profiler.so filter=lfs diff=lfs merge=lfs -text
|
| 199 |
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_tf_session.so filter=lfs diff=lfs merge=lfs -text
|
| 200 |
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/impl/testing/pybind_for_testing.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 198 |
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/profiler/internal/_pywrap_profiler.so filter=lfs diff=lfs merge=lfs -text
|
| 199 |
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_tf_session.so filter=lfs diff=lfs merge=lfs -text
|
| 200 |
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/impl/testing/pybind_for_testing.so filter=lfs diff=lfs merge=lfs -text
|
| 201 |
+
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/interpreter_wrapper/_pywrap_tensorflow_interpreter_wrapper.so filter=lfs diff=lfs merge=lfs -text
|
| 202 |
+
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/optimize/_pywrap_tensorflow_lite_calibration_wrapper.so filter=lfs diff=lfs merge=lfs -text
|
| 203 |
+
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/_pywrap_tensorflow_lite_metrics_wrapper.so filter=lfs diff=lfs merge=lfs -text
|
| 204 |
+
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/_pywrap_analyzer_wrapper.so filter=lfs diff=lfs merge=lfs -text
|
| 205 |
+
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/_audio_microfrontend_op.so filter=lfs diff=lfs merge=lfs -text
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (204 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (218 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/ops/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/ops/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (222 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/ops/__pycache__/gen_audio_microfrontend_op.cpython-310.pyc
ADDED
|
Binary file (11.6 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/ops/gen_audio_microfrontend_op.py
ADDED
|
@@ -0,0 +1,423 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Python wrappers around TensorFlow ops.
|
| 2 |
+
|
| 3 |
+
This file is MACHINE GENERATED! Do not edit.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import collections
|
| 7 |
+
|
| 8 |
+
from tensorflow.python import pywrap_tfe as pywrap_tfe
|
| 9 |
+
from tensorflow.python.eager import context as _context
|
| 10 |
+
from tensorflow.python.eager import core as _core
|
| 11 |
+
from tensorflow.python.eager import execute as _execute
|
| 12 |
+
from tensorflow.python.framework import dtypes as _dtypes
|
| 13 |
+
from tensorflow.security.fuzzing.py import annotation_types as _atypes
|
| 14 |
+
|
| 15 |
+
from tensorflow.python.framework import op_def_registry as _op_def_registry
|
| 16 |
+
from tensorflow.python.framework import ops as _ops
|
| 17 |
+
from tensorflow.python.framework import op_def_library as _op_def_library
|
| 18 |
+
from tensorflow.python.util.deprecation import deprecated_endpoints
|
| 19 |
+
from tensorflow.python.util import dispatch as _dispatch
|
| 20 |
+
from tensorflow.python.util.tf_export import tf_export
|
| 21 |
+
|
| 22 |
+
from typing import TypeVar, List, Any
|
| 23 |
+
from typing_extensions import Annotated
|
| 24 |
+
|
| 25 |
+
TV_AudioMicrofrontend_out_type = TypeVar("TV_AudioMicrofrontend_out_type", _atypes.Float32, _atypes.UInt16)
|
| 26 |
+
|
| 27 |
+
@_dispatch.add_fallback_dispatch_list
|
| 28 |
+
@_dispatch.add_type_based_api_dispatcher
|
| 29 |
+
@tf_export('audio_microfrontend')
|
| 30 |
+
def audio_microfrontend(audio: Annotated[Any, _atypes.Int16], sample_rate:int=16000, window_size:int=25, window_step:int=10, num_channels:int=32, upper_band_limit:float=7500, lower_band_limit:float=125, smoothing_bits:int=10, even_smoothing:float=0.025, odd_smoothing:float=0.06, min_signal_remaining:float=0.05, enable_pcan:bool=False, pcan_strength:float=0.95, pcan_offset:float=80, gain_bits:int=21, enable_log:bool=True, scale_shift:int=6, left_context:int=0, right_context:int=0, frame_stride:int=1, zero_padding:bool=False, out_scale:int=1, out_type:TV_AudioMicrofrontend_out_type=_dtypes.uint16, name=None) -> Annotated[Any, TV_AudioMicrofrontend_out_type]:
|
| 31 |
+
r"""Audio Microfrontend Op.
|
| 32 |
+
|
| 33 |
+
This Op converts a sequence of audio data into one or more
|
| 34 |
+
feature vectors containing filterbanks of the input. The
|
| 35 |
+
conversion process uses a lightweight library to perform:
|
| 36 |
+
|
| 37 |
+
1. A slicing window function
|
| 38 |
+
2. Short-time FFTs
|
| 39 |
+
3. Filterbank calculations
|
| 40 |
+
4. Noise reduction
|
| 41 |
+
5. PCAN Auto Gain Control
|
| 42 |
+
6. Logarithmic scaling
|
| 43 |
+
|
| 44 |
+
Arguments
|
| 45 |
+
audio: 1D Tensor, int16 audio data in temporal ordering.
|
| 46 |
+
sample_rate: Integer, the sample rate of the audio in Hz.
|
| 47 |
+
window_size: Integer, length of desired time frames in ms.
|
| 48 |
+
window_step: Integer, length of step size for the next frame in ms.
|
| 49 |
+
num_channels: Integer, the number of filterbank channels to use.
|
| 50 |
+
upper_band_limit: Float, the highest frequency included in the filterbanks.
|
| 51 |
+
lower_band_limit: Float, the lowest frequency included in the filterbanks.
|
| 52 |
+
smoothing_bits: Int, scale up signal by 2^(smoothing_bits) before reduction.
|
| 53 |
+
even_smoothing: Float, smoothing coefficient for even-numbered channels.
|
| 54 |
+
odd_smoothing: Float, smoothing coefficient for odd-numbered channels.
|
| 55 |
+
min_signal_remaining: Float, fraction of signal to preserve in smoothing.
|
| 56 |
+
enable_pcan: Bool, enable PCAN auto gain control.
|
| 57 |
+
pcan_strength: Float, gain normalization exponent.
|
| 58 |
+
pcan_offset: Float, positive value added in the normalization denominator.
|
| 59 |
+
gain_bits: Int, number of fractional bits in the gain.
|
| 60 |
+
enable_log: Bool, enable logarithmic scaling of filterbanks.
|
| 61 |
+
scale_shift: Integer, scale filterbanks by 2^(scale_shift).
|
| 62 |
+
left_context: Integer, number of preceding frames to attach to each frame.
|
| 63 |
+
right_context: Integer, number of preceding frames to attach to each frame.
|
| 64 |
+
frame_stride: Integer, M frames to skip over, where output[n] = frame[n*M].
|
| 65 |
+
zero_padding: Bool, if left/right context is out-of-bounds, attach frame of
|
| 66 |
+
zeroes. Otherwise, frame[0] or frame[size-1] will be copied.
|
| 67 |
+
out_scale: Integer, divide all filterbanks by this number.
|
| 68 |
+
out_type: DType, type of the output Tensor, defaults to UINT16.
|
| 69 |
+
|
| 70 |
+
Returns
|
| 71 |
+
filterbanks: 2D Tensor, each row is a time frame, each column is a channel.
|
| 72 |
+
|
| 73 |
+
Args:
|
| 74 |
+
audio: A `Tensor` of type `int16`.
|
| 75 |
+
sample_rate: An optional `int`. Defaults to `16000`.
|
| 76 |
+
window_size: An optional `int`. Defaults to `25`.
|
| 77 |
+
window_step: An optional `int`. Defaults to `10`.
|
| 78 |
+
num_channels: An optional `int`. Defaults to `32`.
|
| 79 |
+
upper_band_limit: An optional `float`. Defaults to `7500`.
|
| 80 |
+
lower_band_limit: An optional `float`. Defaults to `125`.
|
| 81 |
+
smoothing_bits: An optional `int`. Defaults to `10`.
|
| 82 |
+
even_smoothing: An optional `float`. Defaults to `0.025`.
|
| 83 |
+
odd_smoothing: An optional `float`. Defaults to `0.06`.
|
| 84 |
+
min_signal_remaining: An optional `float`. Defaults to `0.05`.
|
| 85 |
+
enable_pcan: An optional `bool`. Defaults to `False`.
|
| 86 |
+
pcan_strength: An optional `float`. Defaults to `0.95`.
|
| 87 |
+
pcan_offset: An optional `float`. Defaults to `80`.
|
| 88 |
+
gain_bits: An optional `int`. Defaults to `21`.
|
| 89 |
+
enable_log: An optional `bool`. Defaults to `True`.
|
| 90 |
+
scale_shift: An optional `int`. Defaults to `6`.
|
| 91 |
+
left_context: An optional `int`. Defaults to `0`.
|
| 92 |
+
right_context: An optional `int`. Defaults to `0`.
|
| 93 |
+
frame_stride: An optional `int`. Defaults to `1`.
|
| 94 |
+
zero_padding: An optional `bool`. Defaults to `False`.
|
| 95 |
+
out_scale: An optional `int`. Defaults to `1`.
|
| 96 |
+
out_type: An optional `tf.DType` from: `tf.uint16, tf.float32`. Defaults to `tf.uint16`.
|
| 97 |
+
name: A name for the operation (optional).
|
| 98 |
+
|
| 99 |
+
Returns:
|
| 100 |
+
A `Tensor` of type `out_type`.
|
| 101 |
+
"""
|
| 102 |
+
_ctx = _context._context or _context.context()
|
| 103 |
+
tld = _ctx._thread_local_data
|
| 104 |
+
if tld.is_eager:
|
| 105 |
+
try:
|
| 106 |
+
_result = pywrap_tfe.TFE_Py_FastPathExecute(
|
| 107 |
+
_ctx, "AudioMicrofrontend", name, audio, "sample_rate", sample_rate,
|
| 108 |
+
"window_size", window_size, "window_step", window_step,
|
| 109 |
+
"num_channels", num_channels, "upper_band_limit", upper_band_limit,
|
| 110 |
+
"lower_band_limit", lower_band_limit, "smoothing_bits",
|
| 111 |
+
smoothing_bits, "even_smoothing", even_smoothing, "odd_smoothing",
|
| 112 |
+
odd_smoothing, "min_signal_remaining", min_signal_remaining,
|
| 113 |
+
"enable_pcan", enable_pcan, "pcan_strength", pcan_strength,
|
| 114 |
+
"pcan_offset", pcan_offset, "gain_bits", gain_bits, "enable_log",
|
| 115 |
+
enable_log, "scale_shift", scale_shift, "left_context", left_context,
|
| 116 |
+
"right_context", right_context, "frame_stride", frame_stride,
|
| 117 |
+
"zero_padding", zero_padding, "out_scale", out_scale, "out_type",
|
| 118 |
+
out_type)
|
| 119 |
+
return _result
|
| 120 |
+
except _core._NotOkStatusException as e:
|
| 121 |
+
_ops.raise_from_not_ok_status(e, name)
|
| 122 |
+
except _core._FallbackException:
|
| 123 |
+
pass
|
| 124 |
+
try:
|
| 125 |
+
_result = _dispatcher_for_audio_microfrontend(
|
| 126 |
+
(audio, sample_rate, window_size, window_step, num_channels,
|
| 127 |
+
upper_band_limit, lower_band_limit, smoothing_bits, even_smoothing,
|
| 128 |
+
odd_smoothing, min_signal_remaining, enable_pcan, pcan_strength,
|
| 129 |
+
pcan_offset, gain_bits, enable_log, scale_shift, left_context,
|
| 130 |
+
right_context, frame_stride, zero_padding, out_scale, out_type,
|
| 131 |
+
name,), None)
|
| 132 |
+
if _result is not NotImplemented:
|
| 133 |
+
return _result
|
| 134 |
+
return audio_microfrontend_eager_fallback(
|
| 135 |
+
audio, sample_rate=sample_rate, window_size=window_size,
|
| 136 |
+
window_step=window_step, num_channels=num_channels,
|
| 137 |
+
upper_band_limit=upper_band_limit,
|
| 138 |
+
lower_band_limit=lower_band_limit, smoothing_bits=smoothing_bits,
|
| 139 |
+
even_smoothing=even_smoothing, odd_smoothing=odd_smoothing,
|
| 140 |
+
min_signal_remaining=min_signal_remaining, enable_pcan=enable_pcan,
|
| 141 |
+
pcan_strength=pcan_strength, pcan_offset=pcan_offset,
|
| 142 |
+
gain_bits=gain_bits, enable_log=enable_log, scale_shift=scale_shift,
|
| 143 |
+
left_context=left_context, right_context=right_context,
|
| 144 |
+
frame_stride=frame_stride, zero_padding=zero_padding,
|
| 145 |
+
out_scale=out_scale, out_type=out_type, name=name, ctx=_ctx)
|
| 146 |
+
except _core._SymbolicException:
|
| 147 |
+
pass # Add nodes to the TensorFlow graph.
|
| 148 |
+
except (TypeError, ValueError):
|
| 149 |
+
_result = _dispatch.dispatch(
|
| 150 |
+
audio_microfrontend, (), dict(audio=audio,
|
| 151 |
+
sample_rate=sample_rate,
|
| 152 |
+
window_size=window_size,
|
| 153 |
+
window_step=window_step,
|
| 154 |
+
num_channels=num_channels,
|
| 155 |
+
upper_band_limit=upper_band_limit,
|
| 156 |
+
lower_band_limit=lower_band_limit,
|
| 157 |
+
smoothing_bits=smoothing_bits,
|
| 158 |
+
even_smoothing=even_smoothing,
|
| 159 |
+
odd_smoothing=odd_smoothing,
|
| 160 |
+
min_signal_remaining=min_signal_remaining,
|
| 161 |
+
enable_pcan=enable_pcan,
|
| 162 |
+
pcan_strength=pcan_strength,
|
| 163 |
+
pcan_offset=pcan_offset,
|
| 164 |
+
gain_bits=gain_bits,
|
| 165 |
+
enable_log=enable_log,
|
| 166 |
+
scale_shift=scale_shift,
|
| 167 |
+
left_context=left_context,
|
| 168 |
+
right_context=right_context,
|
| 169 |
+
frame_stride=frame_stride,
|
| 170 |
+
zero_padding=zero_padding,
|
| 171 |
+
out_scale=out_scale,
|
| 172 |
+
out_type=out_type, name=name)
|
| 173 |
+
)
|
| 174 |
+
if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
|
| 175 |
+
return _result
|
| 176 |
+
raise
|
| 177 |
+
else:
|
| 178 |
+
_result = _dispatcher_for_audio_microfrontend(
|
| 179 |
+
(audio, sample_rate, window_size, window_step, num_channels,
|
| 180 |
+
upper_band_limit, lower_band_limit, smoothing_bits, even_smoothing,
|
| 181 |
+
odd_smoothing, min_signal_remaining, enable_pcan, pcan_strength,
|
| 182 |
+
pcan_offset, gain_bits, enable_log, scale_shift, left_context,
|
| 183 |
+
right_context, frame_stride, zero_padding, out_scale, out_type,
|
| 184 |
+
name,), None)
|
| 185 |
+
if _result is not NotImplemented:
|
| 186 |
+
return _result
|
| 187 |
+
# Add nodes to the TensorFlow graph.
|
| 188 |
+
if sample_rate is None:
|
| 189 |
+
sample_rate = 16000
|
| 190 |
+
sample_rate = _execute.make_int(sample_rate, "sample_rate")
|
| 191 |
+
if window_size is None:
|
| 192 |
+
window_size = 25
|
| 193 |
+
window_size = _execute.make_int(window_size, "window_size")
|
| 194 |
+
if window_step is None:
|
| 195 |
+
window_step = 10
|
| 196 |
+
window_step = _execute.make_int(window_step, "window_step")
|
| 197 |
+
if num_channels is None:
|
| 198 |
+
num_channels = 32
|
| 199 |
+
num_channels = _execute.make_int(num_channels, "num_channels")
|
| 200 |
+
if upper_band_limit is None:
|
| 201 |
+
upper_band_limit = 7500
|
| 202 |
+
upper_band_limit = _execute.make_float(upper_band_limit, "upper_band_limit")
|
| 203 |
+
if lower_band_limit is None:
|
| 204 |
+
lower_band_limit = 125
|
| 205 |
+
lower_band_limit = _execute.make_float(lower_band_limit, "lower_band_limit")
|
| 206 |
+
if smoothing_bits is None:
|
| 207 |
+
smoothing_bits = 10
|
| 208 |
+
smoothing_bits = _execute.make_int(smoothing_bits, "smoothing_bits")
|
| 209 |
+
if even_smoothing is None:
|
| 210 |
+
even_smoothing = 0.025
|
| 211 |
+
even_smoothing = _execute.make_float(even_smoothing, "even_smoothing")
|
| 212 |
+
if odd_smoothing is None:
|
| 213 |
+
odd_smoothing = 0.06
|
| 214 |
+
odd_smoothing = _execute.make_float(odd_smoothing, "odd_smoothing")
|
| 215 |
+
if min_signal_remaining is None:
|
| 216 |
+
min_signal_remaining = 0.05
|
| 217 |
+
min_signal_remaining = _execute.make_float(min_signal_remaining, "min_signal_remaining")
|
| 218 |
+
if enable_pcan is None:
|
| 219 |
+
enable_pcan = False
|
| 220 |
+
enable_pcan = _execute.make_bool(enable_pcan, "enable_pcan")
|
| 221 |
+
if pcan_strength is None:
|
| 222 |
+
pcan_strength = 0.95
|
| 223 |
+
pcan_strength = _execute.make_float(pcan_strength, "pcan_strength")
|
| 224 |
+
if pcan_offset is None:
|
| 225 |
+
pcan_offset = 80
|
| 226 |
+
pcan_offset = _execute.make_float(pcan_offset, "pcan_offset")
|
| 227 |
+
if gain_bits is None:
|
| 228 |
+
gain_bits = 21
|
| 229 |
+
gain_bits = _execute.make_int(gain_bits, "gain_bits")
|
| 230 |
+
if enable_log is None:
|
| 231 |
+
enable_log = True
|
| 232 |
+
enable_log = _execute.make_bool(enable_log, "enable_log")
|
| 233 |
+
if scale_shift is None:
|
| 234 |
+
scale_shift = 6
|
| 235 |
+
scale_shift = _execute.make_int(scale_shift, "scale_shift")
|
| 236 |
+
if left_context is None:
|
| 237 |
+
left_context = 0
|
| 238 |
+
left_context = _execute.make_int(left_context, "left_context")
|
| 239 |
+
if right_context is None:
|
| 240 |
+
right_context = 0
|
| 241 |
+
right_context = _execute.make_int(right_context, "right_context")
|
| 242 |
+
if frame_stride is None:
|
| 243 |
+
frame_stride = 1
|
| 244 |
+
frame_stride = _execute.make_int(frame_stride, "frame_stride")
|
| 245 |
+
if zero_padding is None:
|
| 246 |
+
zero_padding = False
|
| 247 |
+
zero_padding = _execute.make_bool(zero_padding, "zero_padding")
|
| 248 |
+
if out_scale is None:
|
| 249 |
+
out_scale = 1
|
| 250 |
+
out_scale = _execute.make_int(out_scale, "out_scale")
|
| 251 |
+
if out_type is None:
|
| 252 |
+
out_type = _dtypes.uint16
|
| 253 |
+
out_type = _execute.make_type(out_type, "out_type")
|
| 254 |
+
try:
|
| 255 |
+
_, _, _op, _outputs = _op_def_library._apply_op_helper(
|
| 256 |
+
"AudioMicrofrontend", audio=audio, sample_rate=sample_rate,
|
| 257 |
+
window_size=window_size,
|
| 258 |
+
window_step=window_step,
|
| 259 |
+
num_channels=num_channels,
|
| 260 |
+
upper_band_limit=upper_band_limit,
|
| 261 |
+
lower_band_limit=lower_band_limit,
|
| 262 |
+
smoothing_bits=smoothing_bits,
|
| 263 |
+
even_smoothing=even_smoothing,
|
| 264 |
+
odd_smoothing=odd_smoothing,
|
| 265 |
+
min_signal_remaining=min_signal_remaining,
|
| 266 |
+
enable_pcan=enable_pcan,
|
| 267 |
+
pcan_strength=pcan_strength,
|
| 268 |
+
pcan_offset=pcan_offset, gain_bits=gain_bits,
|
| 269 |
+
enable_log=enable_log, scale_shift=scale_shift,
|
| 270 |
+
left_context=left_context,
|
| 271 |
+
right_context=right_context,
|
| 272 |
+
frame_stride=frame_stride,
|
| 273 |
+
zero_padding=zero_padding, out_scale=out_scale,
|
| 274 |
+
out_type=out_type, name=name)
|
| 275 |
+
except (TypeError, ValueError):
|
| 276 |
+
_result = _dispatch.dispatch(
|
| 277 |
+
audio_microfrontend, (), dict(audio=audio, sample_rate=sample_rate,
|
| 278 |
+
window_size=window_size,
|
| 279 |
+
window_step=window_step,
|
| 280 |
+
num_channels=num_channels,
|
| 281 |
+
upper_band_limit=upper_band_limit,
|
| 282 |
+
lower_band_limit=lower_band_limit,
|
| 283 |
+
smoothing_bits=smoothing_bits,
|
| 284 |
+
even_smoothing=even_smoothing,
|
| 285 |
+
odd_smoothing=odd_smoothing,
|
| 286 |
+
min_signal_remaining=min_signal_remaining,
|
| 287 |
+
enable_pcan=enable_pcan,
|
| 288 |
+
pcan_strength=pcan_strength,
|
| 289 |
+
pcan_offset=pcan_offset,
|
| 290 |
+
gain_bits=gain_bits,
|
| 291 |
+
enable_log=enable_log,
|
| 292 |
+
scale_shift=scale_shift,
|
| 293 |
+
left_context=left_context,
|
| 294 |
+
right_context=right_context,
|
| 295 |
+
frame_stride=frame_stride,
|
| 296 |
+
zero_padding=zero_padding,
|
| 297 |
+
out_scale=out_scale,
|
| 298 |
+
out_type=out_type, name=name)
|
| 299 |
+
)
|
| 300 |
+
if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
|
| 301 |
+
return _result
|
| 302 |
+
raise
|
| 303 |
+
_result = _outputs[:]
|
| 304 |
+
if _execute.must_record_gradient():
|
| 305 |
+
_attrs = ("sample_rate", _op._get_attr_int("sample_rate"), "window_size",
|
| 306 |
+
_op._get_attr_int("window_size"), "window_step",
|
| 307 |
+
_op._get_attr_int("window_step"), "num_channels",
|
| 308 |
+
_op._get_attr_int("num_channels"), "upper_band_limit",
|
| 309 |
+
_op.get_attr("upper_band_limit"), "lower_band_limit",
|
| 310 |
+
_op.get_attr("lower_band_limit"), "smoothing_bits",
|
| 311 |
+
_op._get_attr_int("smoothing_bits"), "even_smoothing",
|
| 312 |
+
_op.get_attr("even_smoothing"), "odd_smoothing",
|
| 313 |
+
_op.get_attr("odd_smoothing"), "min_signal_remaining",
|
| 314 |
+
_op.get_attr("min_signal_remaining"), "enable_pcan",
|
| 315 |
+
_op._get_attr_bool("enable_pcan"), "pcan_strength",
|
| 316 |
+
_op.get_attr("pcan_strength"), "pcan_offset",
|
| 317 |
+
_op.get_attr("pcan_offset"), "gain_bits",
|
| 318 |
+
_op._get_attr_int("gain_bits"), "enable_log",
|
| 319 |
+
_op._get_attr_bool("enable_log"), "scale_shift",
|
| 320 |
+
_op._get_attr_int("scale_shift"), "left_context",
|
| 321 |
+
_op._get_attr_int("left_context"), "right_context",
|
| 322 |
+
_op._get_attr_int("right_context"), "frame_stride",
|
| 323 |
+
_op._get_attr_int("frame_stride"), "zero_padding",
|
| 324 |
+
_op._get_attr_bool("zero_padding"), "out_scale",
|
| 325 |
+
_op._get_attr_int("out_scale"), "out_type",
|
| 326 |
+
_op._get_attr_type("out_type"))
|
| 327 |
+
_inputs_flat = _op.inputs
|
| 328 |
+
_execute.record_gradient(
|
| 329 |
+
"AudioMicrofrontend", _inputs_flat, _attrs, _result)
|
| 330 |
+
_result, = _result
|
| 331 |
+
return _result
|
| 332 |
+
|
| 333 |
+
AudioMicrofrontend = tf_export("raw_ops.AudioMicrofrontend")(_ops.to_raw_op(audio_microfrontend))
|
| 334 |
+
_dispatcher_for_audio_microfrontend = audio_microfrontend._tf_type_based_dispatcher.Dispatch
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
def audio_microfrontend_eager_fallback(audio: Annotated[Any, _atypes.Int16], sample_rate: int, window_size: int, window_step: int, num_channels: int, upper_band_limit: float, lower_band_limit: float, smoothing_bits: int, even_smoothing: float, odd_smoothing: float, min_signal_remaining: float, enable_pcan: bool, pcan_strength: float, pcan_offset: float, gain_bits: int, enable_log: bool, scale_shift: int, left_context: int, right_context: int, frame_stride: int, zero_padding: bool, out_scale: int, out_type: TV_AudioMicrofrontend_out_type, name, ctx) -> Annotated[Any, TV_AudioMicrofrontend_out_type]:
|
| 338 |
+
if sample_rate is None:
|
| 339 |
+
sample_rate = 16000
|
| 340 |
+
sample_rate = _execute.make_int(sample_rate, "sample_rate")
|
| 341 |
+
if window_size is None:
|
| 342 |
+
window_size = 25
|
| 343 |
+
window_size = _execute.make_int(window_size, "window_size")
|
| 344 |
+
if window_step is None:
|
| 345 |
+
window_step = 10
|
| 346 |
+
window_step = _execute.make_int(window_step, "window_step")
|
| 347 |
+
if num_channels is None:
|
| 348 |
+
num_channels = 32
|
| 349 |
+
num_channels = _execute.make_int(num_channels, "num_channels")
|
| 350 |
+
if upper_band_limit is None:
|
| 351 |
+
upper_band_limit = 7500
|
| 352 |
+
upper_band_limit = _execute.make_float(upper_band_limit, "upper_band_limit")
|
| 353 |
+
if lower_band_limit is None:
|
| 354 |
+
lower_band_limit = 125
|
| 355 |
+
lower_band_limit = _execute.make_float(lower_band_limit, "lower_band_limit")
|
| 356 |
+
if smoothing_bits is None:
|
| 357 |
+
smoothing_bits = 10
|
| 358 |
+
smoothing_bits = _execute.make_int(smoothing_bits, "smoothing_bits")
|
| 359 |
+
if even_smoothing is None:
|
| 360 |
+
even_smoothing = 0.025
|
| 361 |
+
even_smoothing = _execute.make_float(even_smoothing, "even_smoothing")
|
| 362 |
+
if odd_smoothing is None:
|
| 363 |
+
odd_smoothing = 0.06
|
| 364 |
+
odd_smoothing = _execute.make_float(odd_smoothing, "odd_smoothing")
|
| 365 |
+
if min_signal_remaining is None:
|
| 366 |
+
min_signal_remaining = 0.05
|
| 367 |
+
min_signal_remaining = _execute.make_float(min_signal_remaining, "min_signal_remaining")
|
| 368 |
+
if enable_pcan is None:
|
| 369 |
+
enable_pcan = False
|
| 370 |
+
enable_pcan = _execute.make_bool(enable_pcan, "enable_pcan")
|
| 371 |
+
if pcan_strength is None:
|
| 372 |
+
pcan_strength = 0.95
|
| 373 |
+
pcan_strength = _execute.make_float(pcan_strength, "pcan_strength")
|
| 374 |
+
if pcan_offset is None:
|
| 375 |
+
pcan_offset = 80
|
| 376 |
+
pcan_offset = _execute.make_float(pcan_offset, "pcan_offset")
|
| 377 |
+
if gain_bits is None:
|
| 378 |
+
gain_bits = 21
|
| 379 |
+
gain_bits = _execute.make_int(gain_bits, "gain_bits")
|
| 380 |
+
if enable_log is None:
|
| 381 |
+
enable_log = True
|
| 382 |
+
enable_log = _execute.make_bool(enable_log, "enable_log")
|
| 383 |
+
if scale_shift is None:
|
| 384 |
+
scale_shift = 6
|
| 385 |
+
scale_shift = _execute.make_int(scale_shift, "scale_shift")
|
| 386 |
+
if left_context is None:
|
| 387 |
+
left_context = 0
|
| 388 |
+
left_context = _execute.make_int(left_context, "left_context")
|
| 389 |
+
if right_context is None:
|
| 390 |
+
right_context = 0
|
| 391 |
+
right_context = _execute.make_int(right_context, "right_context")
|
| 392 |
+
if frame_stride is None:
|
| 393 |
+
frame_stride = 1
|
| 394 |
+
frame_stride = _execute.make_int(frame_stride, "frame_stride")
|
| 395 |
+
if zero_padding is None:
|
| 396 |
+
zero_padding = False
|
| 397 |
+
zero_padding = _execute.make_bool(zero_padding, "zero_padding")
|
| 398 |
+
if out_scale is None:
|
| 399 |
+
out_scale = 1
|
| 400 |
+
out_scale = _execute.make_int(out_scale, "out_scale")
|
| 401 |
+
if out_type is None:
|
| 402 |
+
out_type = _dtypes.uint16
|
| 403 |
+
out_type = _execute.make_type(out_type, "out_type")
|
| 404 |
+
audio = _ops.convert_to_tensor(audio, _dtypes.int16)
|
| 405 |
+
_inputs_flat = [audio]
|
| 406 |
+
_attrs = ("sample_rate", sample_rate, "window_size", window_size,
|
| 407 |
+
"window_step", window_step, "num_channels", num_channels,
|
| 408 |
+
"upper_band_limit", upper_band_limit, "lower_band_limit", lower_band_limit,
|
| 409 |
+
"smoothing_bits", smoothing_bits, "even_smoothing", even_smoothing,
|
| 410 |
+
"odd_smoothing", odd_smoothing, "min_signal_remaining",
|
| 411 |
+
min_signal_remaining, "enable_pcan", enable_pcan, "pcan_strength",
|
| 412 |
+
pcan_strength, "pcan_offset", pcan_offset, "gain_bits", gain_bits,
|
| 413 |
+
"enable_log", enable_log, "scale_shift", scale_shift, "left_context",
|
| 414 |
+
left_context, "right_context", right_context, "frame_stride", frame_stride,
|
| 415 |
+
"zero_padding", zero_padding, "out_scale", out_scale, "out_type", out_type)
|
| 416 |
+
_result = _execute.execute(b"AudioMicrofrontend", 1, inputs=_inputs_flat,
|
| 417 |
+
attrs=_attrs, ctx=ctx, name=name)
|
| 418 |
+
if _execute.must_record_gradient():
|
| 419 |
+
_execute.record_gradient(
|
| 420 |
+
"AudioMicrofrontend", _inputs_flat, _attrs, _result)
|
| 421 |
+
_result, = _result
|
| 422 |
+
return _result
|
| 423 |
+
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (225 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (229 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/__pycache__/audio_microfrontend_op.cpython-310.pyc
ADDED
|
Binary file (3.94 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/_audio_microfrontend_op.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6c87c0f07860502c2f646feeeee3524feb706e1b430d5ddf0314133a260531af
|
| 3 |
+
size 1240400
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/audio_microfrontend_op.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""AudioMicrofrontend Op creates filterbanks from audio data."""
|
| 16 |
+
|
| 17 |
+
from tensorflow.lite.experimental.microfrontend.ops import gen_audio_microfrontend_op
|
| 18 |
+
from tensorflow.python.framework import dtypes
|
| 19 |
+
from tensorflow.python.framework import load_library
|
| 20 |
+
from tensorflow.python.framework import ops
|
| 21 |
+
from tensorflow.python.ops import array_ops
|
| 22 |
+
from tensorflow.python.platform import resource_loader
|
| 23 |
+
|
| 24 |
+
_audio_microfrontend_op = load_library.load_op_library(
|
| 25 |
+
resource_loader.get_path_to_datafile("_audio_microfrontend_op.so"))
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def audio_microfrontend(audio,
|
| 29 |
+
sample_rate=16000,
|
| 30 |
+
window_size=25,
|
| 31 |
+
window_step=10,
|
| 32 |
+
num_channels=32,
|
| 33 |
+
upper_band_limit=7500.0,
|
| 34 |
+
lower_band_limit=125.0,
|
| 35 |
+
smoothing_bits=10,
|
| 36 |
+
even_smoothing=0.025,
|
| 37 |
+
odd_smoothing=0.06,
|
| 38 |
+
min_signal_remaining=0.05,
|
| 39 |
+
enable_pcan=True,
|
| 40 |
+
pcan_strength=0.95,
|
| 41 |
+
pcan_offset=80.0,
|
| 42 |
+
gain_bits=21,
|
| 43 |
+
enable_log=True,
|
| 44 |
+
scale_shift=6,
|
| 45 |
+
left_context=0,
|
| 46 |
+
right_context=0,
|
| 47 |
+
frame_stride=1,
|
| 48 |
+
zero_padding=False,
|
| 49 |
+
out_scale=1,
|
| 50 |
+
out_type=dtypes.uint16):
|
| 51 |
+
"""Audio Microfrontend Op.
|
| 52 |
+
|
| 53 |
+
This Op converts a sequence of audio data into one or more
|
| 54 |
+
feature vectors containing filterbanks of the input. The
|
| 55 |
+
conversion process uses a lightweight library to perform:
|
| 56 |
+
|
| 57 |
+
1. A slicing window function
|
| 58 |
+
2. Short-time FFTs
|
| 59 |
+
3. Filterbank calculations
|
| 60 |
+
4. Noise reduction
|
| 61 |
+
5. PCAN Auto Gain Control
|
| 62 |
+
6. Logarithmic scaling
|
| 63 |
+
|
| 64 |
+
Args:
|
| 65 |
+
audio: 1D Tensor, int16 audio data in temporal ordering.
|
| 66 |
+
sample_rate: Integer, the sample rate of the audio in Hz.
|
| 67 |
+
window_size: Integer, length of desired time frames in ms.
|
| 68 |
+
window_step: Integer, length of step size for the next frame in ms.
|
| 69 |
+
num_channels: Integer, the number of filterbank channels to use.
|
| 70 |
+
upper_band_limit: Float, the highest frequency included in the filterbanks.
|
| 71 |
+
lower_band_limit: Float, the lowest frequency included in the filterbanks.
|
| 72 |
+
smoothing_bits: Int, scale up signal by 2^(smoothing_bits) before reduction.
|
| 73 |
+
even_smoothing: Float, smoothing coefficient for even-numbered channels.
|
| 74 |
+
odd_smoothing: Float, smoothing coefficient for odd-numbered channels.
|
| 75 |
+
min_signal_remaining: Float, fraction of signal to preserve in smoothing.
|
| 76 |
+
enable_pcan: Bool, enable PCAN auto gain control.
|
| 77 |
+
pcan_strength: Float, gain normalization exponent.
|
| 78 |
+
pcan_offset: Float, positive value added in the normalization denominator.
|
| 79 |
+
gain_bits: Int, number of fractional bits in the gain.
|
| 80 |
+
enable_log: Bool, enable logarithmic scaling of filterbanks.
|
| 81 |
+
scale_shift: Integer, scale filterbanks by 2^(scale_shift).
|
| 82 |
+
left_context: Integer, number of preceding frames to attach to each frame.
|
| 83 |
+
right_context: Integer, number of preceding frames to attach to each frame.
|
| 84 |
+
frame_stride: Integer, M frames to skip over, where output[n] = frame[n*M].
|
| 85 |
+
zero_padding: Bool, if left/right context is out-of-bounds, attach frame of
|
| 86 |
+
zeroes. Otherwise, frame[0] or frame[size-1] will be copied.
|
| 87 |
+
out_scale: Integer, divide all filterbanks by this number.
|
| 88 |
+
out_type: DType, type of the output Tensor, defaults to UINT16.
|
| 89 |
+
|
| 90 |
+
Returns:
|
| 91 |
+
filterbanks: 2D Tensor, each row is a time frame, each column is a channel.
|
| 92 |
+
|
| 93 |
+
Raises:
|
| 94 |
+
ValueError: If the audio tensor is not explicitly a vector.
|
| 95 |
+
"""
|
| 96 |
+
audio_shape = audio.shape
|
| 97 |
+
if audio_shape.ndims is None:
|
| 98 |
+
raise ValueError("Input to `AudioMicrofrontend` should have known rank.")
|
| 99 |
+
if len(audio_shape) > 1:
|
| 100 |
+
audio = array_ops.reshape(audio, [-1])
|
| 101 |
+
|
| 102 |
+
return gen_audio_microfrontend_op.audio_microfrontend(
|
| 103 |
+
audio, sample_rate, window_size, window_step, num_channels,
|
| 104 |
+
upper_band_limit, lower_band_limit, smoothing_bits, even_smoothing,
|
| 105 |
+
odd_smoothing, min_signal_remaining, enable_pcan, pcan_strength,
|
| 106 |
+
pcan_offset, gain_bits, enable_log, scale_shift, left_context,
|
| 107 |
+
right_context, frame_stride, zero_padding, out_scale, out_type)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
ops.NotDifferentiable("AudioMicrofrontend")
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (201 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (207 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/__pycache__/model_runtime_info_pb2.cpython-310.pyc
ADDED
|
Binary file (3.25 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/__pycache__/profiling_info_pb2.cpython-310.pyc
ADDED
|
Binary file (2.16 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/model_runtime_info_pb2.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
| 3 |
+
# source: tensorflow/lite/profiling/proto/model_runtime_info.proto
|
| 4 |
+
"""Generated protocol buffer code."""
|
| 5 |
+
from google.protobuf.internal import builder as _builder
|
| 6 |
+
from google.protobuf import descriptor as _descriptor
|
| 7 |
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
| 8 |
+
from google.protobuf import symbol_database as _symbol_database
|
| 9 |
+
# @@protoc_insertion_point(imports)
|
| 10 |
+
|
| 11 |
+
_sym_db = _symbol_database.Default()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
from tensorflow.lite.profiling.proto import profiling_info_pb2 as tensorflow_dot_lite_dot_profiling_dot_proto_dot_profiling__info__pb2
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n8tensorflow/lite/profiling/proto/model_runtime_info.proto\x12\x10tflite.profiling\x1a\x34tensorflow/lite/profiling/proto/profiling_info.proto\"_\n\x13ModelRuntimeDetails\x12\x12\n\nmodel_name\x18\x01 \x01(\t\x12\x34\n\tsubgraphs\x18\x02 \x03(\x0b\x32!.tflite.profiling.RuntimeSubgraph\"\xa9\x02\n\x0fRuntimeSubgraph\x12\x13\n\x0bsubgraph_id\x18\x01 \x01(\x05\x12%\n\x05\x65\x64ges\x18\x02 \x03(\x0b\x32\x16.tflite.profiling.Edge\x12%\n\x05nodes\x18\x03 \x03(\x0b\x32\x16.tflite.profiling.Node\x12\x1a\n\x0e\x65xecution_plan\x18\x04 \x03(\x05\x42\x02\x10\x01\x12\x45\n\rsubgraph_type\x18\x05 \x01(\x0e\x32..tflite.profiling.RuntimeSubgraph.SubgraphType\"P\n\x0cSubgraphType\x12\x14\n\x10UNKNOWN_SUBGRAPH\x10\x00\x12\x13\n\x0fTFLITE_SUBGRAPH\x10\x01\x12\x15\n\x11\x44\x45LEGATE_SUBGRAPH\x10\x02\"\xba\x02\n\x04Node\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0c\n\x04type\x18\x03 \x01(\t\x12\x12\n\x06inputs\x18\x04 \x03(\x05\x42\x02\x10\x01\x12\x13\n\x07outputs\x18\x05 \x03(\x05\x42\x02\x10\x01\x12\x19\n\rintermediates\x18\x06 \x03(\x05\x42\x02\x10\x01\x12\x17\n\x0btemporaries\x18\x07 \x03(\x05\x42\x02\x10\x01\x12\x38\n\x0fop_profile_data\x18\n \x01(\x0b\x32\x1f.tflite.profiling.OpProfileData\x12\x46\n\x15\x64\x65legate_node_details\x18\x08 \x01(\x0b\x32%.tflite.profiling.DelegateNodeDetailsH\x00\x12\x1e\n\x14\x64\x65legated_to_node_id\x18\t \x01(\x05H\x00\x42\x0b\n\tnode_info\"R\n\x13\x44\x65legateNodeDetails\x12\x15\n\rdelegate_name\x18\x01 \x01(\t\x12$\n\x18tflite_node_ids_replaced\x18\x02 \x03(\x05\x42\x02\x10\x01\"\x81\x05\n\x04\x45\x64ge\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x32\n\tdata_type\x18\x03 \x01(\x0e\x32\x1f.tflite.profiling.Edge.DataType\x12\x11\n\x05shape\x18\x04 \x03(\x05\x42\x02\x10\x01\x12\x17\n\x0f\x61llocation_type\x18\x05 \x01(\t\x12\x36\n\x0blayout_type\x18\x06 \x01(\x0e\x32!.tflite.profiling.Edge.LayoutType\x12\x0c\n\x04size\x18\x07 \x01(\x05\"\x85\x02\n\x08\x44\x61taType\x12\x10\n\x0cUNKNOWN_TYPE\x10\x00\x12\x0b\n\x07\x46LOAT32\x10\x01\x12\t\n\x05INT32\x10\x02\x12\t\n\x05UINT8\x10\x03\x12\t\n\x05INT64\x10\x04\x12\n\n\x06STRING\x10\x05\x12\x08\n\x04\x42OOL\x10\x06\x12\t\n\x05INT16\x10\x07\x12\r\n\tCOMPLEX64\x10\x08\x12\x08\n\x04INT8\x10\t\x12\x0b\n\x07\x46LOAT16\x10\n\x12\x0b\n\x07\x46LOAT64\x10\x0b\x12\x0e\n\nCOMPLEX128\x10\x0c\x12\n\n\x06UINT64\x10\r\x12\x0c\n\x08RESOURCE\x10\x0e\x12\x0b\n\x07VARIANT\x10\x0f\x12\n\n\x06UINT32\x10\x10\x12\n\n\x06UINT16\x10\x11\x12\x08\n\x04INT4\x10\x12\x12\x0c\n\x08\x42\x46LOAT16\x10\x13\"\xb0\x01\n\nLayoutType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06SCALAR\x10\x01\x12\n\n\x06LINEAR\x10\x02\x12\x06\n\x02HW\x10\x03\x12\x07\n\x03\x43HW\x10\x04\x12\x07\n\x03HWC\x10\x05\x12\x08\n\x04OIHW\x10\x06\x12\x08\n\x04OHWI\x10\x07\x12\x08\n\x04IHWO\x10\x08\x12\x08\n\x04IOHW\x10\t\x12\x08\n\x04\x42HWC\x10\n\x12\x08\n\x04HWDC\x10\x0b\x12\t\n\x05\x42HWDC\x10\x0c\x12\x07\n\x03HWD\x10\r\x12\t\n\x05OHWDI\x10\x0e\x12\x08\n\x04HWIO\x10\x0f\x42\x02P\x01')
|
| 18 |
+
|
| 19 |
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
| 20 |
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.lite.profiling.proto.model_runtime_info_pb2', globals())
|
| 21 |
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
| 22 |
+
|
| 23 |
+
DESCRIPTOR._options = None
|
| 24 |
+
DESCRIPTOR._serialized_options = b'P\001'
|
| 25 |
+
_RUNTIMESUBGRAPH.fields_by_name['execution_plan']._options = None
|
| 26 |
+
_RUNTIMESUBGRAPH.fields_by_name['execution_plan']._serialized_options = b'\020\001'
|
| 27 |
+
_NODE.fields_by_name['inputs']._options = None
|
| 28 |
+
_NODE.fields_by_name['inputs']._serialized_options = b'\020\001'
|
| 29 |
+
_NODE.fields_by_name['outputs']._options = None
|
| 30 |
+
_NODE.fields_by_name['outputs']._serialized_options = b'\020\001'
|
| 31 |
+
_NODE.fields_by_name['intermediates']._options = None
|
| 32 |
+
_NODE.fields_by_name['intermediates']._serialized_options = b'\020\001'
|
| 33 |
+
_NODE.fields_by_name['temporaries']._options = None
|
| 34 |
+
_NODE.fields_by_name['temporaries']._serialized_options = b'\020\001'
|
| 35 |
+
_DELEGATENODEDETAILS.fields_by_name['tflite_node_ids_replaced']._options = None
|
| 36 |
+
_DELEGATENODEDETAILS.fields_by_name['tflite_node_ids_replaced']._serialized_options = b'\020\001'
|
| 37 |
+
_EDGE.fields_by_name['shape']._options = None
|
| 38 |
+
_EDGE.fields_by_name['shape']._serialized_options = b'\020\001'
|
| 39 |
+
_MODELRUNTIMEDETAILS._serialized_start=132
|
| 40 |
+
_MODELRUNTIMEDETAILS._serialized_end=227
|
| 41 |
+
_RUNTIMESUBGRAPH._serialized_start=230
|
| 42 |
+
_RUNTIMESUBGRAPH._serialized_end=527
|
| 43 |
+
_RUNTIMESUBGRAPH_SUBGRAPHTYPE._serialized_start=447
|
| 44 |
+
_RUNTIMESUBGRAPH_SUBGRAPHTYPE._serialized_end=527
|
| 45 |
+
_NODE._serialized_start=530
|
| 46 |
+
_NODE._serialized_end=844
|
| 47 |
+
_DELEGATENODEDETAILS._serialized_start=846
|
| 48 |
+
_DELEGATENODEDETAILS._serialized_end=928
|
| 49 |
+
_EDGE._serialized_start=931
|
| 50 |
+
_EDGE._serialized_end=1572
|
| 51 |
+
_EDGE_DATATYPE._serialized_start=1132
|
| 52 |
+
_EDGE_DATATYPE._serialized_end=1393
|
| 53 |
+
_EDGE_LAYOUTTYPE._serialized_start=1396
|
| 54 |
+
_EDGE_LAYOUTTYPE._serialized_end=1572
|
| 55 |
+
# @@protoc_insertion_point(module_scope)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/profiling_info_pb2.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
| 3 |
+
# source: tensorflow/lite/profiling/proto/profiling_info.proto
|
| 4 |
+
"""Generated protocol buffer code."""
|
| 5 |
+
from google.protobuf.internal import builder as _builder
|
| 6 |
+
from google.protobuf import descriptor as _descriptor
|
| 7 |
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
| 8 |
+
from google.protobuf import symbol_database as _symbol_database
|
| 9 |
+
# @@protoc_insertion_point(imports)
|
| 10 |
+
|
| 11 |
+
_sym_db = _symbol_database.Default()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n4tensorflow/lite/profiling/proto/profiling_info.proto\x12\x10tflite.profiling\"\xa7\x01\n\x16\x42\x65nchmarkProfilingData\x12\x12\n\nmodel_name\x18\x01 \x01(\t\x12:\n\x0cinit_profile\x18\x02 \x01(\x0b\x32$.tflite.profiling.ModelProfilingData\x12=\n\x0fruntime_profile\x18\x03 \x01(\x0b\x32$.tflite.profiling.ModelProfilingData\"\x9c\x01\n\x12ModelProfilingData\x12\x42\n\x11subgraph_profiles\x18\x01 \x03(\x0b\x32\'.tflite.profiling.SubGraphProfilingData\x12\x42\n\x11\x64\x65legate_profiles\x18\x02 \x03(\x0b\x32\'.tflite.profiling.DelegateProfilingData\"\x80\x01\n\x15SubGraphProfilingData\x12\x15\n\rsubgraph_name\x18\x01 \x01(\t\x12\x16\n\x0esubgraph_index\x18\x02 \x01(\x05\x12\x38\n\x0fper_op_profiles\x18\x03 \x03(\x0b\x32\x1f.tflite.profiling.OpProfileData\"h\n\x15\x44\x65legateProfilingData\x12\x15\n\rdelegate_name\x18\x01 \x01(\t\x12\x38\n\x0fper_op_profiles\x18\x02 \x03(\x0b\x32\x1f.tflite.profiling.OpProfileData\"\x93\x01\n\x0fOpProfilingStat\x12\r\n\x05\x66irst\x18\x01 \x01(\x03\x12\x0c\n\x04last\x18\x02 \x01(\x03\x12\x0b\n\x03\x61vg\x18\x03 \x01(\x03\x12\x0e\n\x06stddev\x18\x04 \x01(\x02\x12\x10\n\x08variance\x18\x05 \x01(\x02\x12\x0b\n\x03min\x18\x06 \x01(\x03\x12\x0b\n\x03max\x18\x07 \x01(\x03\x12\x0b\n\x03sum\x18\x08 \x01(\x03\x12\r\n\x05\x63ount\x18\t \x01(\x03\"\xcf\x01\n\rOpProfileData\x12\x11\n\tnode_type\x18\x01 \x01(\t\x12\x41\n\x16inference_microseconds\x18\x02 \x01(\x0b\x32!.tflite.profiling.OpProfilingStat\x12\x31\n\x06mem_kb\x18\x03 \x01(\x0b\x32!.tflite.profiling.OpProfilingStat\x12\x14\n\x0ctimes_called\x18\x04 \x01(\x03\x12\x0c\n\x04name\x18\x05 \x01(\t\x12\x11\n\trun_order\x18\x06 \x01(\x03\x42\x02P\x01')
|
| 17 |
+
|
| 18 |
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
| 19 |
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.lite.profiling.proto.profiling_info_pb2', globals())
|
| 20 |
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
| 21 |
+
|
| 22 |
+
DESCRIPTOR._options = None
|
| 23 |
+
DESCRIPTOR._serialized_options = b'P\001'
|
| 24 |
+
_BENCHMARKPROFILINGDATA._serialized_start=75
|
| 25 |
+
_BENCHMARKPROFILINGDATA._serialized_end=242
|
| 26 |
+
_MODELPROFILINGDATA._serialized_start=245
|
| 27 |
+
_MODELPROFILINGDATA._serialized_end=401
|
| 28 |
+
_SUBGRAPHPROFILINGDATA._serialized_start=404
|
| 29 |
+
_SUBGRAPHPROFILINGDATA._serialized_end=532
|
| 30 |
+
_DELEGATEPROFILINGDATA._serialized_start=534
|
| 31 |
+
_DELEGATEPROFILINGDATA._serialized_end=638
|
| 32 |
+
_OPPROFILINGSTAT._serialized_start=641
|
| 33 |
+
_OPPROFILINGSTAT._serialized_end=788
|
| 34 |
+
_OPPROFILEDATA._serialized_start=791
|
| 35 |
+
_OPPROFILEDATA._serialized_end=998
|
| 36 |
+
# @@protoc_insertion_point(module_scope)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (198 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/analyzer.cpython-310.pyc
ADDED
|
Binary file (3.31 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/conversion_metadata_schema_py_generated.cpython-310.pyc
ADDED
|
Binary file (17 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/convert.cpython-310.pyc
ADDED
|
Binary file (36 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/convert_phase.cpython-310.pyc
ADDED
|
Binary file (5.78 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/convert_saved_model.cpython-310.pyc
ADDED
|
Binary file (6.17 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/interpreter.cpython-310.pyc
ADDED
|
Binary file (36.1 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/lite.cpython-310.pyc
ADDED
|
Binary file (95.1 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/lite_constants.cpython-310.pyc
ADDED
|
Binary file (1.77 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/op_hint.cpython-310.pyc
ADDED
|
Binary file (40.1 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/schema_py_generated.cpython-310.pyc
ADDED
|
Binary file (480 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/schema_util.cpython-310.pyc
ADDED
|
Binary file (1.08 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/tflite_convert.cpython-310.pyc
ADDED
|
Binary file (18.7 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/tflite_keras_util.cpython-310.pyc
ADDED
|
Binary file (6.73 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/util.cpython-310.pyc
ADDED
|
Binary file (29.3 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (215 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/_pywrap_analyzer_wrapper.pyi
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
|
| 16 |
+
def ModelAnalyzer(arg0: str, arg1: bool, arg2: bool) -> str: ...
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/_pywrap_analyzer_wrapper.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ad6e053502d3f80aa92e5b1fb1c61236a2cb3c34c1f500e3ccce89d112362d1b
|
| 3 |
+
size 2573672
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/authoring/__init__.py
ADDED
|
File without changes
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/authoring/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (208 Bytes). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/authoring/__pycache__/authoring.cpython-310.pyc
ADDED
|
Binary file (9.41 kB). View file
|
|
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/authoring/authoring.py
ADDED
|
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""TensorFlow Authoring tool package for TFLite compatibility.
|
| 16 |
+
|
| 17 |
+
WARNING: The package is experimental and subject to change.
|
| 18 |
+
|
| 19 |
+
This package provides a way to check TFLite compatibility at model authoring
|
| 20 |
+
time.
|
| 21 |
+
|
| 22 |
+
Example:
|
| 23 |
+
@tf.lite.experimental.authoring.compatible
|
| 24 |
+
@tf.function(input_signature=[
|
| 25 |
+
tf.TensorSpec(shape=[None], dtype=tf.float32)
|
| 26 |
+
])
|
| 27 |
+
def f(x):
|
| 28 |
+
return tf.cosh(x)
|
| 29 |
+
|
| 30 |
+
result = f(tf.constant([0.0]))
|
| 31 |
+
|
| 32 |
+
> COMPATIBILITY WARNING: op 'tf.Cosh' require(s) "Select TF Ops" for model
|
| 33 |
+
> conversion for TensorFlow Lite.
|
| 34 |
+
> Op: tf.Cosh
|
| 35 |
+
> - tensorflow/python/framework/op_def_library.py:xxx
|
| 36 |
+
> - tensorflow/python/ops/gen_math_ops.py:xxx
|
| 37 |
+
> - simple_authoring.py:xxx
|
| 38 |
+
"""
|
| 39 |
+
import functools
|
| 40 |
+
from tensorflow.compiler.mlir.lite.metrics import converter_error_data_pb2
|
| 41 |
+
# pylint: disable=g-import-not-at-top
|
| 42 |
+
from tensorflow.lite.python import convert
|
| 43 |
+
from tensorflow.lite.python import lite
|
| 44 |
+
from tensorflow.python.util.tf_export import tf_export as _tf_export
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
_CUSTOM_OPS_HDR = "Custom ops: "
|
| 48 |
+
_TF_OPS_HDR = "TF Select ops: "
|
| 49 |
+
_AUTHORING_ERROR_HDR = "COMPATIBILITY ERROR"
|
| 50 |
+
_AUTHORING_WARNING_HDR = "COMPATIBILITY WARNING"
|
| 51 |
+
_FUNC_GRAPH_SRC_PATH = "tensorflow/python/framework/func_graph.py"
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class CompatibilityError(Exception):
|
| 55 |
+
"""Raised when an error occurs with TFLite compatibility."""
|
| 56 |
+
pass
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class _Compatible:
|
| 60 |
+
"""A decorator class to check TFLite compatibility created by `lite.experimental.authoring.compatible`."""
|
| 61 |
+
|
| 62 |
+
def __init__(self,
|
| 63 |
+
target,
|
| 64 |
+
converter_target_spec=None,
|
| 65 |
+
converter_allow_custom_ops=None,
|
| 66 |
+
raise_exception=False):
|
| 67 |
+
"""Initialize the decorator object.
|
| 68 |
+
|
| 69 |
+
Here is the description of the object variables.
|
| 70 |
+
- _func : decorated function.
|
| 71 |
+
- _obj_func : for class object, we need to use this object to provide `self`
|
| 72 |
+
instance as 1 first argument.
|
| 73 |
+
- _verified : whether the compatibility is checked or not.
|
| 74 |
+
|
| 75 |
+
Args:
|
| 76 |
+
target: decorated function.
|
| 77 |
+
converter_target_spec : target_spec of TFLite converter parameter.
|
| 78 |
+
converter_allow_custom_ops : allow_custom_ops of TFLite converter
|
| 79 |
+
parameter.
|
| 80 |
+
raise_exception : to raise an exception on compatibility issues.
|
| 81 |
+
User need to use get_compatibility_log() to check details.
|
| 82 |
+
"""
|
| 83 |
+
functools.update_wrapper(self, target)
|
| 84 |
+
self._func = target
|
| 85 |
+
self._obj_func = None
|
| 86 |
+
self._verified = False
|
| 87 |
+
self._log_messages = []
|
| 88 |
+
self._raise_exception = raise_exception
|
| 89 |
+
self._converter_target_spec = converter_target_spec
|
| 90 |
+
self._converter_allow_custom_ops = converter_allow_custom_ops
|
| 91 |
+
|
| 92 |
+
def __get__(self, instance, cls):
|
| 93 |
+
"""A Python descriptor interface."""
|
| 94 |
+
self._obj_func = self._func.__get__(instance, cls)
|
| 95 |
+
return self
|
| 96 |
+
|
| 97 |
+
def _get_func(self):
|
| 98 |
+
"""Returns decorated function object.
|
| 99 |
+
|
| 100 |
+
For a class method, use self._obj_func to provide `self` instance.
|
| 101 |
+
"""
|
| 102 |
+
if self._obj_func is not None:
|
| 103 |
+
return self._obj_func
|
| 104 |
+
else:
|
| 105 |
+
return self._func
|
| 106 |
+
|
| 107 |
+
def __call__(self, *args, **kwargs): # pylint: disable=g-doc-args
|
| 108 |
+
"""Calls decorated function object.
|
| 109 |
+
|
| 110 |
+
Also verifies if the function is compatible with TFLite.
|
| 111 |
+
|
| 112 |
+
Returns:
|
| 113 |
+
A execution result of the decorated function.
|
| 114 |
+
"""
|
| 115 |
+
|
| 116 |
+
if not self._verified:
|
| 117 |
+
model = self._get_func()
|
| 118 |
+
concrete_func = model.get_concrete_function(*args, **kwargs)
|
| 119 |
+
converter = lite.TFLiteConverterV2.from_concrete_functions(
|
| 120 |
+
[concrete_func], model)
|
| 121 |
+
# Set provided converter parameters
|
| 122 |
+
if self._converter_target_spec is not None:
|
| 123 |
+
converter.target_spec = self._converter_target_spec
|
| 124 |
+
if self._converter_allow_custom_ops is not None:
|
| 125 |
+
converter.allow_custom_ops = self._converter_allow_custom_ops
|
| 126 |
+
try:
|
| 127 |
+
converter.convert()
|
| 128 |
+
except convert.ConverterError as err:
|
| 129 |
+
self._decode_error(err)
|
| 130 |
+
finally:
|
| 131 |
+
self._verified = True
|
| 132 |
+
|
| 133 |
+
return self._get_func()(*args, **kwargs)
|
| 134 |
+
|
| 135 |
+
def get_concrete_function(self, *args, **kwargs):
|
| 136 |
+
"""Returns a concrete function of the decorated function."""
|
| 137 |
+
return self._get_func().get_concrete_function(*args, **kwargs)
|
| 138 |
+
|
| 139 |
+
def _get_location_string(self, location):
|
| 140 |
+
"""Dump location of ConveterError.errors.location."""
|
| 141 |
+
callstack = []
|
| 142 |
+
for single_call in reversed(location.call):
|
| 143 |
+
if (location.type ==
|
| 144 |
+
converter_error_data_pb2.ConverterErrorData.CALLSITELOC):
|
| 145 |
+
callstack.append(
|
| 146 |
+
f" - {single_call.source.filename}:{single_call.source.line}")
|
| 147 |
+
else:
|
| 148 |
+
callstack.append(str(single_call))
|
| 149 |
+
callstack_dump = "\n".join(callstack)
|
| 150 |
+
return callstack_dump
|
| 151 |
+
|
| 152 |
+
def _dump_error_details(self, ops, locations):
|
| 153 |
+
"""Dump the list of ops and locations."""
|
| 154 |
+
for i in range(0, len(ops)):
|
| 155 |
+
callstack_dump = self._get_location_string(locations[i])
|
| 156 |
+
err_string = f"Op: {ops[i]}\n{callstack_dump}\n"
|
| 157 |
+
self._log(err_string)
|
| 158 |
+
|
| 159 |
+
def _decode_error_legacy(self, err):
|
| 160 |
+
"""Parses the given legacy ConverterError for OSS."""
|
| 161 |
+
for line in str(err).splitlines():
|
| 162 |
+
# Check custom op usage error.
|
| 163 |
+
if line.startswith(_CUSTOM_OPS_HDR):
|
| 164 |
+
custom_ops = line[len(_CUSTOM_OPS_HDR):]
|
| 165 |
+
err_string = (
|
| 166 |
+
f"{_AUTHORING_ERROR_HDR}: op '{custom_ops}' is(are) not natively "
|
| 167 |
+
"supported by TensorFlow Lite. You need to provide a custom "
|
| 168 |
+
"operator. https://www.tensorflow.org/lite/guide/ops_custom")
|
| 169 |
+
self._log(err_string)
|
| 170 |
+
# Check TensorFlow op usage error.
|
| 171 |
+
elif line.startswith(_TF_OPS_HDR):
|
| 172 |
+
tf_ops = line[len(_TF_OPS_HDR):]
|
| 173 |
+
err_string = (
|
| 174 |
+
f"{_AUTHORING_WARNING_HDR}: op '{tf_ops}' require(s) \"Select TF "
|
| 175 |
+
"Ops\" for model conversion for TensorFlow Lite. "
|
| 176 |
+
"https://www.tensorflow.org/lite/guide/ops_select")
|
| 177 |
+
self._log(err_string)
|
| 178 |
+
|
| 179 |
+
def _decode_converter_error(self, err):
|
| 180 |
+
"""Parses the given ConverterError which has detailed error information."""
|
| 181 |
+
custom_ops = []
|
| 182 |
+
custom_ops_location = []
|
| 183 |
+
tf_ops = []
|
| 184 |
+
tf_ops_location = []
|
| 185 |
+
gpu_not_compatible_ops = []
|
| 186 |
+
for err in err.errors:
|
| 187 |
+
# Check custom op usage error.
|
| 188 |
+
if err.error_code == converter_error_data_pb2.ConverterErrorData.ERROR_NEEDS_CUSTOM_OPS:
|
| 189 |
+
custom_ops.append(err.operator.name)
|
| 190 |
+
custom_ops_location.append(err.location)
|
| 191 |
+
# Check TensorFlow op usage error.
|
| 192 |
+
elif err.error_code == converter_error_data_pb2.ConverterErrorData.ERROR_NEEDS_FLEX_OPS:
|
| 193 |
+
tf_ops.append(err.operator.name)
|
| 194 |
+
tf_ops_location.append(err.location)
|
| 195 |
+
# Check GPU delegate compatibility error.
|
| 196 |
+
elif err.error_code == converter_error_data_pb2.ConverterErrorData.ERROR_GPU_NOT_COMPATIBLE:
|
| 197 |
+
gpu_not_compatible_ops.append(err.operator.name)
|
| 198 |
+
# Log the first line of ConveterError.errors.error_message only
|
| 199 |
+
# since the seond line is "Error code: xxxx"
|
| 200 |
+
self._log(err.error_message.splitlines()[0])
|
| 201 |
+
self._log(self._get_location_string(err.location) + "\n")
|
| 202 |
+
else:
|
| 203 |
+
# Log other errors.
|
| 204 |
+
self._log(f"{_AUTHORING_ERROR_HDR}: {err.error_message}")
|
| 205 |
+
self._log(self._get_location_string(err.location) + "\n")
|
| 206 |
+
|
| 207 |
+
if custom_ops:
|
| 208 |
+
custom_ops_str = ", ".join(sorted(custom_ops))
|
| 209 |
+
err_string = (
|
| 210 |
+
f"{_AUTHORING_ERROR_HDR}: op '{custom_ops_str}' is(are) not natively "
|
| 211 |
+
"supported by TensorFlow Lite. You need to provide a custom "
|
| 212 |
+
"operator. https://www.tensorflow.org/lite/guide/ops_custom")
|
| 213 |
+
self._log(err_string)
|
| 214 |
+
self._dump_error_details(custom_ops, custom_ops_location)
|
| 215 |
+
|
| 216 |
+
if tf_ops:
|
| 217 |
+
tf_ops_str = ", ".join(sorted(tf_ops))
|
| 218 |
+
err_string = (
|
| 219 |
+
f"{_AUTHORING_WARNING_HDR}: op '{tf_ops_str}' require(s) \"Select TF"
|
| 220 |
+
" Ops\" for model conversion for TensorFlow Lite. "
|
| 221 |
+
"https://www.tensorflow.org/lite/guide/ops_select")
|
| 222 |
+
self._log(err_string)
|
| 223 |
+
self._dump_error_details(tf_ops, tf_ops_location)
|
| 224 |
+
|
| 225 |
+
if gpu_not_compatible_ops:
|
| 226 |
+
not_compatible_ops_str = ", ".join(sorted(gpu_not_compatible_ops))
|
| 227 |
+
err_string = (
|
| 228 |
+
f"{_AUTHORING_WARNING_HDR}: op '{not_compatible_ops_str}' aren't "
|
| 229 |
+
"compatible with TensorFlow Lite GPU delegate. "
|
| 230 |
+
"https://www.tensorflow.org/lite/performance/gpu")
|
| 231 |
+
self._log(err_string)
|
| 232 |
+
|
| 233 |
+
def _decode_error(self, err):
|
| 234 |
+
"""Parses the given ConverterError and generates compatibility warnings."""
|
| 235 |
+
if hasattr(err, "errors"):
|
| 236 |
+
self._decode_converter_error(err)
|
| 237 |
+
else:
|
| 238 |
+
self._decode_error_legacy(err)
|
| 239 |
+
|
| 240 |
+
if self._raise_exception and self._log_messages:
|
| 241 |
+
raise CompatibilityError(f"CompatibilityException at {repr(self._func)}")
|
| 242 |
+
|
| 243 |
+
def _log(self, message):
|
| 244 |
+
"""Log and print authoring warning / error message."""
|
| 245 |
+
self._log_messages.append(message)
|
| 246 |
+
print(message)
|
| 247 |
+
|
| 248 |
+
def get_compatibility_log(self):
|
| 249 |
+
"""Returns list of compatibility log messages.
|
| 250 |
+
|
| 251 |
+
WARNING: This method should only be used for unit tests.
|
| 252 |
+
|
| 253 |
+
Returns:
|
| 254 |
+
The list of log messages by the recent compatibility check.
|
| 255 |
+
Raises:
|
| 256 |
+
RuntimeError: when the compatibility was NOT checked.
|
| 257 |
+
"""
|
| 258 |
+
if not self._verified:
|
| 259 |
+
raise RuntimeError("target compatibility isn't verified yet")
|
| 260 |
+
return self._log_messages
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
@_tf_export("lite.experimental.authoring.compatible")
|
| 264 |
+
def compatible(target=None, converter_target_spec=None, **kwargs):
|
| 265 |
+
"""Wraps `tf.function` into a callable function with TFLite compatibility checking.
|
| 266 |
+
|
| 267 |
+
Example:
|
| 268 |
+
|
| 269 |
+
```python
|
| 270 |
+
@tf.lite.experimental.authoring.compatible
|
| 271 |
+
@tf.function(input_signature=[
|
| 272 |
+
tf.TensorSpec(shape=[None], dtype=tf.float32)
|
| 273 |
+
])
|
| 274 |
+
def f(x):
|
| 275 |
+
return tf.cosh(x)
|
| 276 |
+
|
| 277 |
+
result = f(tf.constant([0.0]))
|
| 278 |
+
# COMPATIBILITY WARNING: op 'tf.Cosh' require(s) "Select TF Ops" for model
|
| 279 |
+
# conversion for TensorFlow Lite.
|
| 280 |
+
# Op: tf.Cosh
|
| 281 |
+
# - tensorflow/python/framework/op_def_library.py:748
|
| 282 |
+
# - tensorflow/python/ops/gen_math_ops.py:2458
|
| 283 |
+
# - <stdin>:6
|
| 284 |
+
```
|
| 285 |
+
|
| 286 |
+
WARNING: Experimental interface, subject to change.
|
| 287 |
+
|
| 288 |
+
Args:
|
| 289 |
+
target: A `tf.function` to decorate.
|
| 290 |
+
converter_target_spec : target_spec of TFLite converter parameter.
|
| 291 |
+
**kwargs: The keyword arguments of the decorator class _Compatible.
|
| 292 |
+
|
| 293 |
+
Returns:
|
| 294 |
+
A callable object of `tf.lite.experimental.authoring._Compatible`.
|
| 295 |
+
"""
|
| 296 |
+
if target is None:
|
| 297 |
+
def wrapper(target):
|
| 298 |
+
return _Compatible(target, converter_target_spec, **kwargs)
|
| 299 |
+
return wrapper
|
| 300 |
+
else:
|
| 301 |
+
return _Compatible(target, converter_target_spec, **kwargs)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/convert.py
ADDED
|
@@ -0,0 +1,1250 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Converts a frozen graph into a TFLite FlatBuffer."""
|
| 16 |
+
|
| 17 |
+
import distutils.spawn
|
| 18 |
+
import enum
|
| 19 |
+
import hashlib
|
| 20 |
+
import os as _os
|
| 21 |
+
import platform as _platform
|
| 22 |
+
import subprocess as _subprocess
|
| 23 |
+
import tempfile as _tempfile
|
| 24 |
+
from typing import Optional
|
| 25 |
+
import warnings
|
| 26 |
+
|
| 27 |
+
from tensorflow.compiler.mlir.lite import converter_flags_pb2 as _conversion_flags_pb2
|
| 28 |
+
from tensorflow.compiler.mlir.lite import model_flags_pb2 as _model_flags_pb2
|
| 29 |
+
from tensorflow.compiler.mlir.lite import types_pb2 as _types_pb2
|
| 30 |
+
from tensorflow.compiler.mlir.lite.metrics import converter_error_data_pb2
|
| 31 |
+
from tensorflow.compiler.mlir.lite.python import wrap_converter
|
| 32 |
+
from tensorflow.compiler.mlir.quantization.stablehlo import quantization_config_pb2
|
| 33 |
+
from tensorflow.compiler.mlir.quantization.stablehlo import quantization_options_pb2 as quant_opts_pb2
|
| 34 |
+
from tensorflow.lite.python import lite_constants
|
| 35 |
+
from tensorflow.lite.python import util
|
| 36 |
+
from tensorflow.lite.python.convert_phase import Component
|
| 37 |
+
from tensorflow.lite.python.convert_phase import convert_phase
|
| 38 |
+
from tensorflow.lite.python.convert_phase import ConverterError
|
| 39 |
+
from tensorflow.lite.python.convert_phase import SubComponent
|
| 40 |
+
from tensorflow.lite.python.metrics.wrapper import metrics_wrapper as _metrics_wrapper
|
| 41 |
+
from tensorflow.lite.tools import flatbuffer_utils
|
| 42 |
+
from tensorflow.python.framework import dtypes
|
| 43 |
+
from tensorflow.python.framework import tensor_shape
|
| 44 |
+
from tensorflow.python.platform import resource_loader as _resource_loader
|
| 45 |
+
from tensorflow.python.util import deprecation
|
| 46 |
+
from tensorflow.python.util.tf_export import tf_export as _tf_export
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def _is_quantized_input_stats_required(
|
| 50 |
+
conversion_flags: _conversion_flags_pb2.ConverterFlags,
|
| 51 |
+
) -> bool:
|
| 52 |
+
"""Checks if the `quantized_input_stats` flag is required for conversion.
|
| 53 |
+
|
| 54 |
+
Args:
|
| 55 |
+
conversion_flags: A protocol buffer describing the conversion process.
|
| 56 |
+
|
| 57 |
+
Returns:
|
| 58 |
+
True, if the `inference_type` or the `inference_input_type` is a quantized
|
| 59 |
+
type and it is not post training quantization, else False.
|
| 60 |
+
"""
|
| 61 |
+
quantized_inference_types = [
|
| 62 |
+
_types_pb2.QUANTIZED_UINT8,
|
| 63 |
+
_types_pb2.QUANTIZED_INT8,
|
| 64 |
+
]
|
| 65 |
+
return (
|
| 66 |
+
conversion_flags.inference_type in quantized_inference_types
|
| 67 |
+
or conversion_flags.inference_input_type in quantized_inference_types
|
| 68 |
+
) and not conversion_flags.post_training_quantize
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def convert_tensor_tf_type_to_tflite_type(
|
| 72 |
+
tf_type: dtypes.DType, usage: str = ""
|
| 73 |
+
) -> _types_pb2.IODataType:
|
| 74 |
+
"""Convert tensor type from tf type to tflite type.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
tf_type: TensorFlow type.
|
| 78 |
+
usage: Text describing the reason for invoking this function.
|
| 79 |
+
|
| 80 |
+
Raises:
|
| 81 |
+
ValueError: If `tf_type` is unsupported.
|
| 82 |
+
|
| 83 |
+
Returns:
|
| 84 |
+
tflite_type: TFLite type. Refer to compiler/mlir/lite/types.proto.
|
| 85 |
+
"""
|
| 86 |
+
mapping = {
|
| 87 |
+
dtypes.float16: _types_pb2.FLOAT16,
|
| 88 |
+
dtypes.float32: _types_pb2.FLOAT,
|
| 89 |
+
dtypes.float64: _types_pb2.FLOAT64,
|
| 90 |
+
dtypes.int8: _types_pb2.INT8,
|
| 91 |
+
dtypes.int16: _types_pb2.INT16,
|
| 92 |
+
dtypes.uint16: _types_pb2.UINT16,
|
| 93 |
+
dtypes.int32: _types_pb2.INT32,
|
| 94 |
+
dtypes.int64: _types_pb2.INT64,
|
| 95 |
+
dtypes.uint8: _types_pb2.UINT8,
|
| 96 |
+
dtypes.uint32: _types_pb2.UINT32,
|
| 97 |
+
dtypes.uint64: _types_pb2.UINT64,
|
| 98 |
+
dtypes.string: _types_pb2.STRING,
|
| 99 |
+
dtypes.bool: _types_pb2.BOOL,
|
| 100 |
+
dtypes.complex64: _types_pb2.COMPLEX64,
|
| 101 |
+
dtypes.complex128: _types_pb2.COMPLEX128,
|
| 102 |
+
}
|
| 103 |
+
tflite_type = mapping.get(tf_type)
|
| 104 |
+
if tflite_type is None:
|
| 105 |
+
raise ValueError(
|
| 106 |
+
"Unsupported TensorFlow type `{0}` provided for the {1}".format(
|
| 107 |
+
tf_type, usage
|
| 108 |
+
)
|
| 109 |
+
)
|
| 110 |
+
return tflite_type
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
# Only a few restricted tensor types are allowed for explicitly setting
|
| 114 |
+
# inference/input/output types.
|
| 115 |
+
def convert_inference_tf_type_to_tflite_type(
|
| 116 |
+
tf_type: dtypes.DType, usage: str = ""
|
| 117 |
+
) -> _types_pb2.IODataType:
|
| 118 |
+
"""Convert inference type from tf type to tflite type.
|
| 119 |
+
|
| 120 |
+
Args:
|
| 121 |
+
tf_type: TensorFlow type.
|
| 122 |
+
usage: Text describing the reason for invoking this function.
|
| 123 |
+
|
| 124 |
+
Raises:
|
| 125 |
+
ValueError: If `tf_type` is unsupported.
|
| 126 |
+
|
| 127 |
+
Returns:
|
| 128 |
+
tflite_type: TFLite type. Refer to compiler/mlir/lite/types.proto.
|
| 129 |
+
"""
|
| 130 |
+
mapping = {
|
| 131 |
+
dtypes.float32: _types_pb2.FLOAT,
|
| 132 |
+
dtypes.uint8: _types_pb2.QUANTIZED_UINT8,
|
| 133 |
+
dtypes.int8: _types_pb2.QUANTIZED_INT8,
|
| 134 |
+
dtypes.int16: _types_pb2.QUANTIZED_INT16,
|
| 135 |
+
}
|
| 136 |
+
tflite_type = mapping.get(tf_type)
|
| 137 |
+
if tflite_type is None:
|
| 138 |
+
raise ValueError(
|
| 139 |
+
"Unsupported TensorFlow type `{0}` provided for the {1}".format(
|
| 140 |
+
tf_type, usage
|
| 141 |
+
)
|
| 142 |
+
)
|
| 143 |
+
return tflite_type
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
# Find the deprecated conversion binary using the resource loader if using from
|
| 147 |
+
# bazel, otherwise we are in a pip where console_scripts already has the tool.
|
| 148 |
+
if lite_constants.EXPERIMENTAL_USE_TOCO_API_DIRECTLY:
|
| 149 |
+
_deprecated_conversion_binary = ""
|
| 150 |
+
else:
|
| 151 |
+
_deprecated_conversion_binary = _resource_loader.get_path_to_datafile(
|
| 152 |
+
"../toco/python/toco_from_protos"
|
| 153 |
+
)
|
| 154 |
+
if not _os.path.exists(_deprecated_conversion_binary):
|
| 155 |
+
_deprecated_conversion_binary = "toco_from_protos"
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def _try_convert_to_unicode(output):
|
| 159 |
+
if output is None:
|
| 160 |
+
return ""
|
| 161 |
+
|
| 162 |
+
if isinstance(output, bytes):
|
| 163 |
+
try:
|
| 164 |
+
return output.decode("utf-8")
|
| 165 |
+
except UnicodeDecodeError:
|
| 166 |
+
pass
|
| 167 |
+
return output
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
@_tf_export("lite.OpsSet")
|
| 171 |
+
class OpsSet(enum.Enum):
|
| 172 |
+
"""Enum class defining the sets of ops available to generate TFLite models.
|
| 173 |
+
|
| 174 |
+
WARNING: Experimental interface, subject to change.
|
| 175 |
+
"""
|
| 176 |
+
|
| 177 |
+
# Convert model using TensorFlow Lite builtin ops.
|
| 178 |
+
TFLITE_BUILTINS = "TFLITE_BUILTINS"
|
| 179 |
+
|
| 180 |
+
# Convert model using TensorFlow ops. Not all TensorFlow ops are available.
|
| 181 |
+
# WARNING: Experimental interface, subject to change.
|
| 182 |
+
SELECT_TF_OPS = "SELECT_TF_OPS"
|
| 183 |
+
|
| 184 |
+
# Convert model using only TensorFlow Lite quantized int8 operations.
|
| 185 |
+
# Specifying this will throw an error for operations that do not yet have
|
| 186 |
+
# quantized implementations.
|
| 187 |
+
TFLITE_BUILTINS_INT8 = "TFLITE_BUILTINS_INT8"
|
| 188 |
+
|
| 189 |
+
# Convert model using only TensorFlow Lite operations with quantized int8
|
| 190 |
+
# weights, int16 activations and int64 bias.
|
| 191 |
+
# Specifying this will throw an error for operations that do not yet have
|
| 192 |
+
# quantized implementations.
|
| 193 |
+
# This quantization mode may be used in models for super-resolution,
|
| 194 |
+
# audio signal processing or image de-noising. It improves accuracy
|
| 195 |
+
# significantly, but only slightly increases the model size.
|
| 196 |
+
# WARNING: These ops are currently experimental and have not yet been
|
| 197 |
+
# finalized.
|
| 198 |
+
# They are only compatible with CPU execution, and have not been optimized for
|
| 199 |
+
# production.
|
| 200 |
+
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 = (
|
| 201 |
+
"EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8"
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
# Convert model using only stablehlo ops.
|
| 205 |
+
# This option can not be combined with other OpsSets.
|
| 206 |
+
# The feature is in early development.
|
| 207 |
+
# The code to execute StableHLO ops in the runtime is to be implemented
|
| 208 |
+
# and the serialization format is not stabilized yet.
|
| 209 |
+
EXPERIMENTAL_STABLEHLO_OPS = "EXPERIMENTAL_STABLEHLO_OPS"
|
| 210 |
+
|
| 211 |
+
def __str__(self):
|
| 212 |
+
return str(self.value)
|
| 213 |
+
|
| 214 |
+
@staticmethod
|
| 215 |
+
def get_options():
|
| 216 |
+
"""Returns a list of OpsSet options as a list of strings."""
|
| 217 |
+
return [str(option) for option in list(OpsSet)]
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
@convert_phase(Component.OPTIMIZE_TFLITE_MODEL, SubComponent.QUANTIZE)
|
| 221 |
+
def mlir_quantize(
|
| 222 |
+
input_data_str,
|
| 223 |
+
disable_per_channel=False,
|
| 224 |
+
fully_quantize=False,
|
| 225 |
+
inference_type=_types_pb2.QUANTIZED_INT8,
|
| 226 |
+
input_data_type=dtypes.float32,
|
| 227 |
+
output_data_type=dtypes.float32,
|
| 228 |
+
enable_numeric_verify=False,
|
| 229 |
+
enable_whole_model_verify=False,
|
| 230 |
+
denylisted_ops=None,
|
| 231 |
+
denylisted_nodes=None,
|
| 232 |
+
enable_variable_quantization=False,
|
| 233 |
+
disable_per_channel_for_dense_layers=False,
|
| 234 |
+
debug_options_str="",
|
| 235 |
+
):
|
| 236 |
+
"""Quantize `input_data_str` with calibration results.
|
| 237 |
+
|
| 238 |
+
Args:
|
| 239 |
+
input_data_str: Input data in serialized form (e.g. a TFLITE model with
|
| 240 |
+
calibration results).
|
| 241 |
+
disable_per_channel: Bool indicating whether to do per-channel or per-tensor
|
| 242 |
+
quantization
|
| 243 |
+
fully_quantize: Bool indicating whether to fully quantize the model. Besides
|
| 244 |
+
model body, the input/output will be quantized as well.
|
| 245 |
+
inference_type: Data type for the activations. The default value is int8.
|
| 246 |
+
input_data_type: Data type for the inputs. The default value is float32.
|
| 247 |
+
output_data_type: Data type for the outputs. The default value is float32.
|
| 248 |
+
enable_numeric_verify: Experimental. Subject to change. Bool indicating
|
| 249 |
+
whether to add NumericVerify ops into the debug mode quantized model.
|
| 250 |
+
enable_whole_model_verify: Experimental. Subject to change. Bool indicating
|
| 251 |
+
whether to add verification for layer by layer, or on whole model. When
|
| 252 |
+
disabled (per-layer) float and quantized ops will be run from same input
|
| 253 |
+
(output of previous quantized layer). When enabled, float and quantized
|
| 254 |
+
ops will run with respective float and quantized output of previous ops.
|
| 255 |
+
denylisted_ops: Experimental. Subject to change. Set of ops to denylist.
|
| 256 |
+
denylisted_nodes: Experimental. Subject to change. Set of notes to denylist.
|
| 257 |
+
enable_variable_quantization: Experimental. Subject to change. Bool
|
| 258 |
+
indicating whether to enable quantization of the residual variables
|
| 259 |
+
remaining after the variable freezing pass.
|
| 260 |
+
disable_per_channel_for_dense_layers: Bool indicating whether to do
|
| 261 |
+
per-channel or per-tensor quantization in Fully Connected layers. Default
|
| 262 |
+
value is False meaning per-channel quantization is enabled.
|
| 263 |
+
debug_options_str: Serialized proto describing TFLite converter debug
|
| 264 |
+
options, see `debug/debug_options.proto`.
|
| 265 |
+
|
| 266 |
+
Returns:
|
| 267 |
+
Quantized model in serialized form (e.g. a TFLITE model) with floating-point
|
| 268 |
+
inputs and outputs.
|
| 269 |
+
"""
|
| 270 |
+
return wrap_converter.wrapped_experimental_mlir_quantize(
|
| 271 |
+
input_data_str,
|
| 272 |
+
disable_per_channel,
|
| 273 |
+
fully_quantize,
|
| 274 |
+
inference_type,
|
| 275 |
+
convert_tensor_tf_type_to_tflite_type(input_data_type),
|
| 276 |
+
convert_tensor_tf_type_to_tflite_type(output_data_type),
|
| 277 |
+
enable_numeric_verify,
|
| 278 |
+
enable_whole_model_verify,
|
| 279 |
+
denylisted_ops,
|
| 280 |
+
denylisted_nodes,
|
| 281 |
+
enable_variable_quantization,
|
| 282 |
+
disable_per_channel_for_dense_layers,
|
| 283 |
+
debug_options_str,
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
@convert_phase(Component.OPTIMIZE_TFLITE_MODEL, SubComponent.SPARSIFY)
|
| 288 |
+
def mlir_sparsify(input_data_str):
|
| 289 |
+
"""Sparsify `input_data_str` to encode sparse tensor with proper format.
|
| 290 |
+
|
| 291 |
+
Args:
|
| 292 |
+
input_data_str: Input data in serialized form (e.g. a TFLITE model).
|
| 293 |
+
|
| 294 |
+
Returns:
|
| 295 |
+
Sparsified model in serialized form (e.g. a TFLITE model).
|
| 296 |
+
"""
|
| 297 |
+
return wrap_converter.wrapped_experimental_mlir_sparsify(input_data_str)
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
def register_custom_opdefs(custom_opdefs_list):
|
| 301 |
+
"""Register the given custom opdefs to the TensorFlow global op registry.
|
| 302 |
+
|
| 303 |
+
Args:
|
| 304 |
+
custom_opdefs_list: String representing the custom ops OpDefs that are
|
| 305 |
+
included in the GraphDef.
|
| 306 |
+
|
| 307 |
+
Returns:
|
| 308 |
+
True if the registration is successfully completed.
|
| 309 |
+
"""
|
| 310 |
+
return wrap_converter.wrapped_register_custom_opdefs(custom_opdefs_list)
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
def convert(
|
| 314 |
+
model_flags: _model_flags_pb2.ModelFlags,
|
| 315 |
+
conversion_flags: _conversion_flags_pb2.ConverterFlags,
|
| 316 |
+
input_data_str: Optional[str] = None,
|
| 317 |
+
debug_info_str: Optional[str] = None,
|
| 318 |
+
enable_mlir_converter: bool = True,
|
| 319 |
+
):
|
| 320 |
+
"""Converts `input_data_str` to a TFLite model.
|
| 321 |
+
|
| 322 |
+
Args:
|
| 323 |
+
model_flags: Proto describing model properties, see `model_flags.proto`.
|
| 324 |
+
conversion_flags: Proto describing conversion properties, see
|
| 325 |
+
`compiler/mlir/lite/converter_flags.proto`.
|
| 326 |
+
input_data_str: Input data in serialized form (e.g. a graphdef is common, or
|
| 327 |
+
it can be hlo text or proto)
|
| 328 |
+
debug_info_str: Serialized `GraphDebugInfo` proto describing logging
|
| 329 |
+
information.
|
| 330 |
+
enable_mlir_converter: Enables MLIR-based conversion.
|
| 331 |
+
|
| 332 |
+
Returns:
|
| 333 |
+
Converted model in serialized form (e.g. a TFLITE model is common).
|
| 334 |
+
Raises:
|
| 335 |
+
ConverterError: When conversion fails in TFLiteConverter, usually due to
|
| 336 |
+
ops not being supported.
|
| 337 |
+
RuntimeError: When conversion fails, an exception is raised with the error
|
| 338 |
+
message embedded.
|
| 339 |
+
"""
|
| 340 |
+
# Historically, deprecated conversion failures would trigger a crash, so we
|
| 341 |
+
# attempt to run the converter out-of-process. The current MLIR conversion
|
| 342 |
+
# pipeline surfaces errors instead, and can be safely run in-process.
|
| 343 |
+
if enable_mlir_converter or not _deprecated_conversion_binary:
|
| 344 |
+
try:
|
| 345 |
+
return wrap_converter.wrapped_convert(
|
| 346 |
+
model_flags.SerializeToString(),
|
| 347 |
+
conversion_flags.SerializeToString(),
|
| 348 |
+
input_data_str,
|
| 349 |
+
debug_info_str,
|
| 350 |
+
enable_mlir_converter,
|
| 351 |
+
)
|
| 352 |
+
except Exception as e:
|
| 353 |
+
converter_error = ConverterError(str(e))
|
| 354 |
+
|
| 355 |
+
for error_data in _metrics_wrapper.retrieve_collected_errors():
|
| 356 |
+
converter_error.append_error(error_data)
|
| 357 |
+
# Seldom we encounter the case where an unsupported
|
| 358 |
+
# `StatefulPartitionedCallOp` is not inlined and remains in the final
|
| 359 |
+
# IR. If this occurs we can set `guarantee_all_funcs_one_use` and retry.
|
| 360 |
+
# This makes the converter copy functions definitions called by
|
| 361 |
+
# multiple StatefulPartitionedCall, thus allowing them to be properly
|
| 362 |
+
# inlined.
|
| 363 |
+
if (
|
| 364 |
+
error_data.error_code
|
| 365 |
+
== converter_error_data_pb2.ConverterErrorData.ERROR_STATEFUL_PARTITIONED_CALL_IN_FINAL_IR
|
| 366 |
+
and not conversion_flags.guarantee_all_funcs_one_use
|
| 367 |
+
):
|
| 368 |
+
conversion_flags.guarantee_all_funcs_one_use = True
|
| 369 |
+
return convert(
|
| 370 |
+
model_flags,
|
| 371 |
+
conversion_flags,
|
| 372 |
+
input_data_str,
|
| 373 |
+
debug_info_str,
|
| 374 |
+
enable_mlir_converter,
|
| 375 |
+
)
|
| 376 |
+
raise converter_error
|
| 377 |
+
|
| 378 |
+
return _run_deprecated_conversion_binary(
|
| 379 |
+
model_flags.SerializeToString(),
|
| 380 |
+
conversion_flags.SerializeToString(),
|
| 381 |
+
input_data_str,
|
| 382 |
+
debug_info_str,
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
@convert_phase(
|
| 387 |
+
Component.CONVERT_TF_TO_TFLITE_MODEL,
|
| 388 |
+
SubComponent.CONVERT_GRAPHDEF_USING_DEPRECATED_CONVERTER,
|
| 389 |
+
)
|
| 390 |
+
def _run_deprecated_conversion_binary(
|
| 391 |
+
model_flags_str, conversion_flags_str, input_data_str, debug_info_str=None
|
| 392 |
+
):
|
| 393 |
+
"""Convert `input_data_str` using deprecated conversion binary.
|
| 394 |
+
|
| 395 |
+
Args:
|
| 396 |
+
model_flags_str: Serialized proto describing model properties, see
|
| 397 |
+
`model_flags.proto`.
|
| 398 |
+
conversion_flags_str: Serialized proto describing TFLite converter
|
| 399 |
+
properties, see `compiler/mlir/lite/converter_flags.proto`.
|
| 400 |
+
input_data_str: Input data in serialized form (e.g. a graphdef is common)
|
| 401 |
+
debug_info_str: Serialized `GraphDebugInfo` proto describing logging
|
| 402 |
+
information. (default None)
|
| 403 |
+
|
| 404 |
+
Returns:
|
| 405 |
+
Converted model in serialized form (e.g. a TFLITE model is common).
|
| 406 |
+
Raises:
|
| 407 |
+
ConverterError: When cannot find the deprecated conversion binary.
|
| 408 |
+
RuntimeError: When conversion fails, an exception is raised with the error
|
| 409 |
+
message embedded.
|
| 410 |
+
"""
|
| 411 |
+
if distutils.spawn.find_executable(_deprecated_conversion_binary) is None:
|
| 412 |
+
raise ConverterError("""Could not find `toco_from_protos` binary, make sure
|
| 413 |
+
your virtualenv bin directory or pip local bin directory is in your path.
|
| 414 |
+
In particular, if you have installed TensorFlow with --user, make sure you
|
| 415 |
+
add the install directory to your path.
|
| 416 |
+
|
| 417 |
+
For example:
|
| 418 |
+
Linux: export PATH=$PATH:~/.local/bin/
|
| 419 |
+
Mac: export PATH=$PATH:~/Library/Python/<version#>/bin
|
| 420 |
+
|
| 421 |
+
Alternative, use virtualenv.""")
|
| 422 |
+
# Windows and TemporaryFile are not that useful together,
|
| 423 |
+
# since you cannot have two readers/writers. So we have to
|
| 424 |
+
# make the temporaries and close and delete them explicitly.
|
| 425 |
+
conversion_filename: str = None
|
| 426 |
+
model_filename: str = None
|
| 427 |
+
input_filename: str = None
|
| 428 |
+
output_filename: str = None
|
| 429 |
+
try:
|
| 430 |
+
# Build all input files
|
| 431 |
+
with (
|
| 432 |
+
_tempfile.NamedTemporaryFile(delete=False) as fp_conversion,
|
| 433 |
+
_tempfile.NamedTemporaryFile(delete=False) as fp_model,
|
| 434 |
+
_tempfile.NamedTemporaryFile(delete=False) as fp_input,
|
| 435 |
+
_tempfile.NamedTemporaryFile(delete=False) as fp_debug,
|
| 436 |
+
):
|
| 437 |
+
conversion_filename = fp_conversion.name
|
| 438 |
+
input_filename = fp_input.name
|
| 439 |
+
model_filename = fp_model.name
|
| 440 |
+
debug_filename = fp_debug.name
|
| 441 |
+
|
| 442 |
+
fp_model.write(model_flags_str)
|
| 443 |
+
fp_conversion.write(conversion_flags_str)
|
| 444 |
+
fp_input.write(input_data_str)
|
| 445 |
+
debug_info_str = debug_info_str if debug_info_str else ""
|
| 446 |
+
# if debug_info_str contains a "string value", then the call to
|
| 447 |
+
# fp_debug.write(debug_info_str) will fail with the following error
|
| 448 |
+
#
|
| 449 |
+
# TypeError: a bytes-like object is required, not 'str'
|
| 450 |
+
#
|
| 451 |
+
# Some of the subtests within the "convert_test" unit-test fail
|
| 452 |
+
# with the error shown above. So watch out for that scenario and
|
| 453 |
+
# convert debug_info_str to bytes where needed
|
| 454 |
+
if not isinstance(debug_info_str, bytes):
|
| 455 |
+
fp_debug.write(debug_info_str.encode("utf-8"))
|
| 456 |
+
else:
|
| 457 |
+
fp_debug.write(debug_info_str)
|
| 458 |
+
|
| 459 |
+
# Reserve an output file
|
| 460 |
+
with _tempfile.NamedTemporaryFile(delete=False) as fp:
|
| 461 |
+
output_filename = fp.name
|
| 462 |
+
|
| 463 |
+
# Run
|
| 464 |
+
cmd = [
|
| 465 |
+
_deprecated_conversion_binary,
|
| 466 |
+
model_filename,
|
| 467 |
+
conversion_filename,
|
| 468 |
+
input_filename,
|
| 469 |
+
output_filename,
|
| 470 |
+
"--debug_proto_file={}".format(debug_filename),
|
| 471 |
+
]
|
| 472 |
+
cmdline = " ".join(cmd)
|
| 473 |
+
is_windows = _platform.system() == "Windows"
|
| 474 |
+
proc = _subprocess.Popen(
|
| 475 |
+
cmdline,
|
| 476 |
+
shell=True,
|
| 477 |
+
stdout=_subprocess.PIPE,
|
| 478 |
+
stderr=_subprocess.STDOUT,
|
| 479 |
+
close_fds=not is_windows,
|
| 480 |
+
)
|
| 481 |
+
stdout, stderr = proc.communicate()
|
| 482 |
+
exitcode = proc.returncode
|
| 483 |
+
if exitcode == 0:
|
| 484 |
+
with open(output_filename, "rb") as fp:
|
| 485 |
+
return fp.read()
|
| 486 |
+
else:
|
| 487 |
+
stdout = _try_convert_to_unicode(stdout)
|
| 488 |
+
stderr = _try_convert_to_unicode(stderr)
|
| 489 |
+
raise ConverterError("See console for info.\n%s\n%s\n" % (stdout, stderr))
|
| 490 |
+
finally:
|
| 491 |
+
# Must manually cleanup files.
|
| 492 |
+
for filename in [
|
| 493 |
+
conversion_filename,
|
| 494 |
+
input_filename,
|
| 495 |
+
model_filename,
|
| 496 |
+
output_filename,
|
| 497 |
+
]:
|
| 498 |
+
try:
|
| 499 |
+
_os.unlink(filename)
|
| 500 |
+
except (OSError, TypeError):
|
| 501 |
+
pass
|
| 502 |
+
|
| 503 |
+
|
| 504 |
+
def build_model_flags(
|
| 505 |
+
change_concat_input_ranges=False,
|
| 506 |
+
allow_nonexistent_arrays=False,
|
| 507 |
+
saved_model_dir=None,
|
| 508 |
+
saved_model_version=0,
|
| 509 |
+
saved_model_tags=None,
|
| 510 |
+
saved_model_exported_names=None,
|
| 511 |
+
**_,
|
| 512 |
+
):
|
| 513 |
+
"""Builds the model flags object from params.
|
| 514 |
+
|
| 515 |
+
Args:
|
| 516 |
+
change_concat_input_ranges: Boolean to change behavior of min/max ranges for
|
| 517 |
+
inputs and outputs of the concat operator for quantized models. Changes
|
| 518 |
+
the ranges of concat operator overlap when true. (default False)
|
| 519 |
+
allow_nonexistent_arrays: Allow specifying array names that don't exist or
|
| 520 |
+
are unused in the final graph. (default False)
|
| 521 |
+
saved_model_dir: Filepath of the saved model to be converted. This value
|
| 522 |
+
will be non-empty only when the saved model import path will be used.
|
| 523 |
+
Otherwises, the graph def-based conversion will be processed.
|
| 524 |
+
saved_model_version: SavedModel file format version of The saved model file
|
| 525 |
+
to be converted. This value will be set only when the SavedModel import
|
| 526 |
+
path will be used.
|
| 527 |
+
saved_model_tags: Set of string saved model tags, formatted in the
|
| 528 |
+
comma-separated value. This value will be set only when the SavedModel
|
| 529 |
+
import path will be used.
|
| 530 |
+
saved_model_exported_names: Names to be exported (default: export all) when
|
| 531 |
+
the saved model import path is on. This value will be set only when the
|
| 532 |
+
SavedModel import path will be used.
|
| 533 |
+
|
| 534 |
+
Returns:
|
| 535 |
+
model_flags: protocol buffer describing the model.
|
| 536 |
+
"""
|
| 537 |
+
model_flags = _model_flags_pb2.ModelFlags()
|
| 538 |
+
model_flags.change_concat_input_ranges = change_concat_input_ranges
|
| 539 |
+
model_flags.allow_nonexistent_arrays = allow_nonexistent_arrays
|
| 540 |
+
if saved_model_dir:
|
| 541 |
+
model_flags.saved_model_dir = saved_model_dir
|
| 542 |
+
model_flags.saved_model_version = saved_model_version
|
| 543 |
+
if saved_model_tags:
|
| 544 |
+
model_flags.saved_model_tags.extend(saved_model_tags)
|
| 545 |
+
if saved_model_exported_names:
|
| 546 |
+
model_flags.saved_model_exported_names.extend(saved_model_exported_names)
|
| 547 |
+
return model_flags
|
| 548 |
+
|
| 549 |
+
|
| 550 |
+
def build_conversion_flags(
|
| 551 |
+
inference_type=dtypes.float32,
|
| 552 |
+
inference_input_type=None,
|
| 553 |
+
input_format=lite_constants.TENSORFLOW_GRAPHDEF,
|
| 554 |
+
output_format=lite_constants.TFLITE,
|
| 555 |
+
default_ranges_stats=None,
|
| 556 |
+
drop_control_dependency=True,
|
| 557 |
+
reorder_across_fake_quant=False,
|
| 558 |
+
allow_custom_ops=False,
|
| 559 |
+
post_training_quantize=False,
|
| 560 |
+
quantize_to_float16=False,
|
| 561 |
+
dump_graphviz_dir=None,
|
| 562 |
+
dump_graphviz_video=False,
|
| 563 |
+
target_ops=None,
|
| 564 |
+
conversion_summary_dir=None,
|
| 565 |
+
select_user_tf_ops=None,
|
| 566 |
+
allow_all_select_tf_ops=False,
|
| 567 |
+
enable_tflite_resource_variables=True,
|
| 568 |
+
unfold_batchmatmul=False,
|
| 569 |
+
legalize_custom_tensor_list_ops=False,
|
| 570 |
+
lower_tensor_list_ops=True,
|
| 571 |
+
default_to_single_batch_in_tensor_list_ops=False,
|
| 572 |
+
accumulation_type=None,
|
| 573 |
+
allow_bfloat16=False,
|
| 574 |
+
unfold_large_splat_constant=False,
|
| 575 |
+
supported_backends=None,
|
| 576 |
+
disable_per_channel_quantization=False,
|
| 577 |
+
enable_mlir_dynamic_range_quantizer=False,
|
| 578 |
+
tf_quantization_mode=None,
|
| 579 |
+
disable_infer_tensor_range=False,
|
| 580 |
+
use_fake_quant_num_bits=False,
|
| 581 |
+
enable_dynamic_update_slice=False,
|
| 582 |
+
preserve_assert_op=False,
|
| 583 |
+
guarantee_all_funcs_one_use=False,
|
| 584 |
+
enable_mlir_variable_quantization=False,
|
| 585 |
+
disable_fuse_mul_and_fc=False,
|
| 586 |
+
quantization_options: Optional[quant_opts_pb2.QuantizationOptions] = None,
|
| 587 |
+
ir_dump_dir=None,
|
| 588 |
+
ir_dump_pass_regex=None,
|
| 589 |
+
ir_dump_func_regex=None,
|
| 590 |
+
enable_timing=None,
|
| 591 |
+
print_ir_before=None,
|
| 592 |
+
print_ir_after=None,
|
| 593 |
+
print_ir_module_scope=None,
|
| 594 |
+
elide_elementsattrs_if_larger=None,
|
| 595 |
+
quantization_config: Optional[
|
| 596 |
+
quantization_config_pb2.QuantizationConfig
|
| 597 |
+
] = None,
|
| 598 |
+
use_buffer_offset=False,
|
| 599 |
+
reduce_type_precision=False,
|
| 600 |
+
qdq_conversion_mode=None,
|
| 601 |
+
disable_per_channel_quantization_for_dense_layers=False,
|
| 602 |
+
enable_composite_direct_lowering=False,
|
| 603 |
+
model_origin_framework=lite_constants.UNSET,
|
| 604 |
+
canonicalizing_inf_as_min_max_float=True,
|
| 605 |
+
**_,
|
| 606 |
+
):
|
| 607 |
+
"""Builds protocol buffer describing a conversion of a model.
|
| 608 |
+
|
| 609 |
+
Typically this is to convert from TensorFlow GraphDef to TFLite, in which
|
| 610 |
+
case the default `input_format` and `output_format` are sufficient.
|
| 611 |
+
|
| 612 |
+
Args:
|
| 613 |
+
inference_type: Data type of numeric arrays, excluding the input layer.
|
| 614 |
+
(default tf.float32, must be in {tf.float32, tf.int8, tf.uint8})
|
| 615 |
+
inference_input_type: Data type of the numeric arrays in the input layer. If
|
| 616 |
+
`inference_input_type` is in {tf.int8, tf.uint8}, then
|
| 617 |
+
`quantized_input_stats` must be provided. (default is the value assigned
|
| 618 |
+
to `inference_type`, must be in {tf.float32, tf.int8, tf.uint8})
|
| 619 |
+
input_format: Type of data to read. (default TENSORFLOW_GRAPHDEF, must be in
|
| 620 |
+
{TENSORFLOW_GRAPHDEF})
|
| 621 |
+
output_format: Output file format. (default TFLITE, must be in {TFLITE,
|
| 622 |
+
GRAPHVIZ_DOT})
|
| 623 |
+
default_ranges_stats: Tuple of integers representing (min, max) range values
|
| 624 |
+
for all arrays without a specified range. Intended for experimenting with
|
| 625 |
+
quantization via "dummy quantization". (default None)
|
| 626 |
+
drop_control_dependency: Boolean indicating whether to drop control
|
| 627 |
+
dependencies silently. This is due to TFLite not supporting control
|
| 628 |
+
dependencies. (default True)
|
| 629 |
+
reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
|
| 630 |
+
nodes in unexpected locations. Used when the location of the FakeQuant
|
| 631 |
+
nodes is preventing graph transformations necessary to convert the graph.
|
| 632 |
+
Results in a graph that differs from the quantized training graph,
|
| 633 |
+
potentially causing differing arithmetic behavior. (default False)
|
| 634 |
+
allow_custom_ops: Boolean indicating whether to allow custom operations.
|
| 635 |
+
When false any unknown operation is an error. When true, custom ops are
|
| 636 |
+
created for any op that is unknown. The developer will need to provide
|
| 637 |
+
these to the TensorFlow Lite runtime with a custom resolver. (default
|
| 638 |
+
False)
|
| 639 |
+
post_training_quantize: Boolean indicating whether to quantize the weights
|
| 640 |
+
of the converted float model. Model size will be reduced and there will be
|
| 641 |
+
latency improvements (at the cost of accuracy). (default False) If
|
| 642 |
+
quantization_options is set, all quantization arg will be ignored.
|
| 643 |
+
quantize_to_float16: Boolean indicating whether to convert float buffers to
|
| 644 |
+
float16. (default False)
|
| 645 |
+
dump_graphviz_dir: Full filepath of folder to dump the graphs at various
|
| 646 |
+
stages of processing GraphViz .dot files. Preferred over
|
| 647 |
+
--output_format=GRAPHVIZ_DOT in order to keep the requirements of the
|
| 648 |
+
output file. (default None)
|
| 649 |
+
dump_graphviz_video: Boolean indicating whether to dump the graph after
|
| 650 |
+
every graph transformation. (default False)
|
| 651 |
+
target_ops: Experimental flag, subject to change. Set of OpsSet options
|
| 652 |
+
indicating which converter to use. (default set([OpsSet.TFLITE_BUILTINS]))
|
| 653 |
+
conversion_summary_dir: A string, the path to the generated conversion logs.
|
| 654 |
+
select_user_tf_ops: List of user's defined TensorFlow ops need to be
|
| 655 |
+
supported in the TensorFlow Lite runtime. These ops will be supported as
|
| 656 |
+
select TensorFlow ops.
|
| 657 |
+
allow_all_select_tf_ops: If True, automatically add all TF ops (including
|
| 658 |
+
custom TF ops) to the converted model as flex ops.
|
| 659 |
+
enable_tflite_resource_variables: Experimental flag, subject to change.
|
| 660 |
+
Enables conversion of resource variables. (default False)
|
| 661 |
+
unfold_batchmatmul: Whether to unfold tf.BatchMatMul to a set of
|
| 662 |
+
tfl.fully_connected ops. If not, translate to tfl.batch_matmul.
|
| 663 |
+
legalize_custom_tensor_list_ops: Whether to legalize `tf.TensorList*` ops to
|
| 664 |
+
tfl custom if they can all be supported.
|
| 665 |
+
lower_tensor_list_ops: Whether to lower tensor list ops to builtin ops. If
|
| 666 |
+
not, use Flex tensor list ops.
|
| 667 |
+
default_to_single_batch_in_tensor_list_ops: Whether to force to use batch
|
| 668 |
+
size one when the tensor list ops has the unspecified batch size.
|
| 669 |
+
accumulation_type: Data type of the accumulators in quantized inference.
|
| 670 |
+
Typically used for float16 quantization and is either fp16 or fp32.
|
| 671 |
+
allow_bfloat16: Whether the converted model supports reduced precision
|
| 672 |
+
inference with the bfloat16 type.
|
| 673 |
+
unfold_large_splat_constant: Whether to unfold large splat constant tensors
|
| 674 |
+
in the flatbuffer model to reduce size.
|
| 675 |
+
supported_backends: List of TFLite backends which needs to check
|
| 676 |
+
compatibility.
|
| 677 |
+
disable_per_channel_quantization: Disable per-channel quantized weights for
|
| 678 |
+
dynamic range quantization. Only per-tensor quantization will be used.
|
| 679 |
+
enable_mlir_dynamic_range_quantizer: Enable MLIR dynamic range quantization.
|
| 680 |
+
If False, the old converter dynamic range quantizer is used.
|
| 681 |
+
tf_quantization_mode: Indicates the mode of TF Quantization when the output
|
| 682 |
+
model is used for TF Quantization.
|
| 683 |
+
disable_infer_tensor_range: Disable infering tensor ranges.
|
| 684 |
+
use_fake_quant_num_bits: Allow quantization parameters to be calculated from
|
| 685 |
+
num_bits attribute.
|
| 686 |
+
enable_dynamic_update_slice: Enable to convert to DynamicUpdateSlice op.
|
| 687 |
+
(default: False).
|
| 688 |
+
preserve_assert_op: Whether to preserve `TF::AssertOp` (default: False).
|
| 689 |
+
guarantee_all_funcs_one_use: Whether to clone functions so that each
|
| 690 |
+
function only has a single use. This option will be helpful if the
|
| 691 |
+
conversion fails when the `PartitionedCall` or `StatefulPartitionedCall`
|
| 692 |
+
can't be properly inlined (default: False).
|
| 693 |
+
enable_mlir_variable_quantization: Enable MLIR variable quantization. There
|
| 694 |
+
is a variable freezing pass, but some variables may not be fully frozen by
|
| 695 |
+
it. This flag enables quantization of those residual variables in the MLIR
|
| 696 |
+
graph.
|
| 697 |
+
disable_fuse_mul_and_fc: Disable fusing input multiplication with
|
| 698 |
+
fullyconnected operations. Useful when quantizing weights.
|
| 699 |
+
quantization_options: [Deprecated] Config to indicate quantization options
|
| 700 |
+
of each components (ex: weight, bias, activation). This can be a preset
|
| 701 |
+
method or a custom method, and allows finer, modular control. This option
|
| 702 |
+
will override any other existing quantization flags. We plan on gradually
|
| 703 |
+
migrating all quantization-related specs into this option.
|
| 704 |
+
ir_dump_dir: A string specifying the target directory to output MLIR dumps
|
| 705 |
+
produced during conversion. If populated, enables MLIR dumps.
|
| 706 |
+
ir_dump_pass_regex: A string containing a regular expression for filtering
|
| 707 |
+
the pass names to be dumped. Effective only if `ir_dump_dir` is populated.
|
| 708 |
+
ir_dump_func_regex: A string containing a regular expression for filtering
|
| 709 |
+
the function names to be dumped. Effective only if `ir_dump_dir` is
|
| 710 |
+
populated.
|
| 711 |
+
enable_timing: A boolean, if set to true reports the execution time of each
|
| 712 |
+
MLIR pass.
|
| 713 |
+
print_ir_before: A string containing a regular expression. If specified,
|
| 714 |
+
prints MLIR before passes which match.
|
| 715 |
+
print_ir_after: A string containing a regular expression. If specified,
|
| 716 |
+
prints MLIR after passes which match.
|
| 717 |
+
print_ir_module_scope: A boolean, if set to true always print the top-level
|
| 718 |
+
operation when printing IR for print_ir_[before|after].
|
| 719 |
+
elide_elementsattrs_if_larger: An int, if specified elides ElementsAttrs
|
| 720 |
+
with '...' that have more elements than the given upper limit.
|
| 721 |
+
quantization_config: Configures the StableHLO Quantizer. See the comments in
|
| 722 |
+
`QuantizationConfig` protobuf definition for details.
|
| 723 |
+
use_buffer_offset: Force the model use buffer_offset & buffer_size fields
|
| 724 |
+
instead of data. i.e. store the constant tensor and custom op binaries
|
| 725 |
+
outside of Flatbuffers
|
| 726 |
+
reduce_type_precision: Convert some tensor types to a lower precision if all
|
| 727 |
+
values within that tensor are within the range of the lower precision.
|
| 728 |
+
This could have side effects e.g. reduced flatbuffer size.
|
| 729 |
+
qdq_conversion_mode: If set, assume input model is a quantized model
|
| 730 |
+
represented with QDQ ops and convert to quantized kernels.
|
| 731 |
+
disable_per_channel_quantization_for_dense_layers: If set, disables per
|
| 732 |
+
channel end enables per tensor integer quantization for weights in Dense
|
| 733 |
+
layers. The flag works only for integer quantized model.
|
| 734 |
+
enable_composite_direct_lowering: If set, attempts to lower composite ops
|
| 735 |
+
directly to tflite ops.
|
| 736 |
+
model_origin_framework: A str specifying the framework of the original
|
| 737 |
+
model. Can be {TENSORFLOW, KERAS, JAX, PYTORCH}
|
| 738 |
+
canonicalizing_inf_as_min_max_float: When set to true, convert +Inf/-Inf to
|
| 739 |
+
MIN/MAX float value and output of converter only contains finite values.
|
| 740 |
+
|
| 741 |
+
Returns:
|
| 742 |
+
conversion_flags: protocol buffer describing the conversion process.
|
| 743 |
+
Raises:
|
| 744 |
+
ValueError, if the input tensor type is unknown.
|
| 745 |
+
"""
|
| 746 |
+
conversion_flags = _conversion_flags_pb2.ConverterFlags()
|
| 747 |
+
conversion_flags.inference_type = convert_inference_tf_type_to_tflite_type(
|
| 748 |
+
inference_type, usage="inference_type flag"
|
| 749 |
+
)
|
| 750 |
+
if inference_input_type:
|
| 751 |
+
conversion_flags.inference_input_type = (
|
| 752 |
+
convert_inference_tf_type_to_tflite_type(
|
| 753 |
+
inference_input_type, usage="inference_input_type flag"
|
| 754 |
+
)
|
| 755 |
+
)
|
| 756 |
+
else:
|
| 757 |
+
conversion_flags.inference_input_type = conversion_flags.inference_type
|
| 758 |
+
conversion_flags.input_format = input_format
|
| 759 |
+
conversion_flags.output_format = output_format
|
| 760 |
+
if default_ranges_stats:
|
| 761 |
+
conversion_flags.default_ranges_min = default_ranges_stats[0]
|
| 762 |
+
conversion_flags.default_ranges_max = default_ranges_stats[1]
|
| 763 |
+
conversion_flags.drop_control_dependency = drop_control_dependency
|
| 764 |
+
conversion_flags.reorder_across_fake_quant = reorder_across_fake_quant
|
| 765 |
+
conversion_flags.allow_custom_ops = allow_custom_ops
|
| 766 |
+
conversion_flags.post_training_quantize = post_training_quantize
|
| 767 |
+
conversion_flags.quantize_to_float16 = quantize_to_float16
|
| 768 |
+
if dump_graphviz_dir:
|
| 769 |
+
conversion_flags.dump_graphviz_dir = dump_graphviz_dir
|
| 770 |
+
conversion_flags.dump_graphviz_include_video = dump_graphviz_video
|
| 771 |
+
if target_ops:
|
| 772 |
+
if OpsSet.SELECT_TF_OPS in target_ops:
|
| 773 |
+
conversion_flags.enable_select_tf_ops = True
|
| 774 |
+
if set(target_ops) == {OpsSet.SELECT_TF_OPS}:
|
| 775 |
+
conversion_flags.force_select_tf_ops = True
|
| 776 |
+
if OpsSet.EXPERIMENTAL_STABLEHLO_OPS in target_ops:
|
| 777 |
+
conversion_flags.convert_to_stablehlo = True
|
| 778 |
+
if OpsSet.EXPERIMENTAL_STABLEHLO_OPS in target_ops and len(target_ops) > 1:
|
| 779 |
+
raise ValueError(
|
| 780 |
+
"StableHLO Ops set can not be specified with other Ops set together"
|
| 781 |
+
)
|
| 782 |
+
if conversion_summary_dir:
|
| 783 |
+
conversion_flags.conversion_summary_dir = conversion_summary_dir
|
| 784 |
+
if select_user_tf_ops:
|
| 785 |
+
conversion_flags.select_user_tf_ops.extend(select_user_tf_ops)
|
| 786 |
+
conversion_flags.allow_all_select_tf_ops = allow_all_select_tf_ops
|
| 787 |
+
conversion_flags.enable_tflite_resource_variables = (
|
| 788 |
+
enable_tflite_resource_variables
|
| 789 |
+
)
|
| 790 |
+
conversion_flags.unfold_batchmatmul = unfold_batchmatmul
|
| 791 |
+
conversion_flags.legalize_custom_tensor_list_ops = (
|
| 792 |
+
legalize_custom_tensor_list_ops
|
| 793 |
+
)
|
| 794 |
+
conversion_flags.lower_tensor_list_ops = lower_tensor_list_ops
|
| 795 |
+
conversion_flags.default_to_single_batch_in_tensor_list_ops = (
|
| 796 |
+
default_to_single_batch_in_tensor_list_ops
|
| 797 |
+
)
|
| 798 |
+
if accumulation_type:
|
| 799 |
+
conversion_flags.accumulation_type = convert_tensor_tf_type_to_tflite_type(
|
| 800 |
+
accumulation_type, usage="accumulation_type flag"
|
| 801 |
+
)
|
| 802 |
+
conversion_flags.allow_bfloat16 = allow_bfloat16
|
| 803 |
+
conversion_flags.unfold_large_splat_constant = unfold_large_splat_constant
|
| 804 |
+
if supported_backends:
|
| 805 |
+
conversion_flags.supported_backends.extend(supported_backends)
|
| 806 |
+
conversion_flags.disable_per_channel_quantization = (
|
| 807 |
+
disable_per_channel_quantization
|
| 808 |
+
)
|
| 809 |
+
conversion_flags.enable_mlir_dynamic_range_quantizer = (
|
| 810 |
+
enable_mlir_dynamic_range_quantizer
|
| 811 |
+
)
|
| 812 |
+
conversion_flags.enable_dynamic_update_slice = enable_dynamic_update_slice
|
| 813 |
+
conversion_flags.preserve_assert_op = preserve_assert_op
|
| 814 |
+
conversion_flags.guarantee_all_funcs_one_use = guarantee_all_funcs_one_use
|
| 815 |
+
if tf_quantization_mode:
|
| 816 |
+
conversion_flags.tf_quantization_mode = tf_quantization_mode
|
| 817 |
+
conversion_flags.disable_infer_tensor_range = disable_infer_tensor_range
|
| 818 |
+
conversion_flags.use_fake_quant_num_bits = use_fake_quant_num_bits
|
| 819 |
+
conversion_flags.enable_mlir_variable_quantization = (
|
| 820 |
+
enable_mlir_variable_quantization
|
| 821 |
+
)
|
| 822 |
+
conversion_flags.disable_fuse_mul_and_fc = disable_fuse_mul_and_fc
|
| 823 |
+
if quantization_options: # Deprecated
|
| 824 |
+
conversion_flags.quantization_options.CopyFrom(quantization_options)
|
| 825 |
+
if quantization_config:
|
| 826 |
+
conversion_flags.quantization_config.CopyFrom(quantization_config)
|
| 827 |
+
|
| 828 |
+
# Transfer debug options. Check for existence before populating in order to
|
| 829 |
+
# leverage defaults specified in proto definition.
|
| 830 |
+
# TODO: b/319329480 - Match the debug_options fields with the user-facing
|
| 831 |
+
# flags.
|
| 832 |
+
if ir_dump_dir is not None:
|
| 833 |
+
conversion_flags.debug_options.ir_dump_dir = ir_dump_dir
|
| 834 |
+
if ir_dump_pass_regex is not None:
|
| 835 |
+
conversion_flags.debug_options.ir_dump_pass_regex = ir_dump_pass_regex
|
| 836 |
+
if ir_dump_func_regex is not None:
|
| 837 |
+
conversion_flags.debug_options.ir_dump_func_regex = ir_dump_func_regex
|
| 838 |
+
if enable_timing is not None:
|
| 839 |
+
conversion_flags.debug_options.enable_timing = enable_timing
|
| 840 |
+
if print_ir_before is not None:
|
| 841 |
+
conversion_flags.debug_options.print_ir_before = print_ir_before
|
| 842 |
+
if print_ir_after is not None:
|
| 843 |
+
conversion_flags.debug_options.print_ir_after = print_ir_after
|
| 844 |
+
if print_ir_module_scope is not None:
|
| 845 |
+
conversion_flags.debug_options.print_ir_module_scope = print_ir_module_scope
|
| 846 |
+
if elide_elementsattrs_if_larger is not None:
|
| 847 |
+
conversion_flags.debug_options.elide_elementsattrs_if_larger = (
|
| 848 |
+
elide_elementsattrs_if_larger
|
| 849 |
+
)
|
| 850 |
+
|
| 851 |
+
if use_buffer_offset is not None:
|
| 852 |
+
conversion_flags.use_buffer_offset = use_buffer_offset
|
| 853 |
+
if reduce_type_precision is not None:
|
| 854 |
+
conversion_flags.reduce_type_precision = reduce_type_precision
|
| 855 |
+
if qdq_conversion_mode is not None:
|
| 856 |
+
conversion_flags.qdq_conversion_mode = qdq_conversion_mode
|
| 857 |
+
conversion_flags.disable_per_channel_quantization_for_dense_layers = (
|
| 858 |
+
disable_per_channel_quantization_for_dense_layers
|
| 859 |
+
)
|
| 860 |
+
conversion_flags.enable_composite_direct_lowering = (
|
| 861 |
+
enable_composite_direct_lowering
|
| 862 |
+
)
|
| 863 |
+
conversion_flags.model_origin_framework = (
|
| 864 |
+
_conversion_flags_pb2.ConverterFlags.ModelOriginFramework.Value(
|
| 865 |
+
model_origin_framework
|
| 866 |
+
)
|
| 867 |
+
)
|
| 868 |
+
conversion_flags.canonicalizing_inf_as_min_max_float = (
|
| 869 |
+
canonicalizing_inf_as_min_max_float
|
| 870 |
+
)
|
| 871 |
+
return conversion_flags
|
| 872 |
+
|
| 873 |
+
|
| 874 |
+
@convert_phase(
|
| 875 |
+
Component.CONVERT_TF_TO_TFLITE_MODEL, SubComponent.CONVERT_GRAPHDEF
|
| 876 |
+
)
|
| 877 |
+
def convert_graphdef_with_arrays(
|
| 878 |
+
input_data,
|
| 879 |
+
input_arrays_with_shape,
|
| 880 |
+
output_arrays,
|
| 881 |
+
control_output_arrays,
|
| 882 |
+
**kwargs,
|
| 883 |
+
):
|
| 884 |
+
"""Convert a frozen GraphDef that can't be loaded in TF.
|
| 885 |
+
|
| 886 |
+
Conversion can be customized by providing arguments that are forwarded to
|
| 887 |
+
`build_model_flags` and `build_conversion_flags` (see documentation).
|
| 888 |
+
|
| 889 |
+
Args:
|
| 890 |
+
input_data: Input data (i.e. often `sess.graph_def`),
|
| 891 |
+
input_arrays_with_shape: Tuple of strings representing input tensor names
|
| 892 |
+
and list of integers representing input shapes (e.g., [("foo" : [1, 16,
|
| 893 |
+
16, 3])]). Use only when graph cannot be loaded into TensorFlow and when
|
| 894 |
+
`input_tensors` is None.
|
| 895 |
+
output_arrays: List of output tensors to freeze graph with. Use only when
|
| 896 |
+
graph cannot be loaded into TensorFlow and when `output_tensors` is None.
|
| 897 |
+
control_output_arrays: Control output node names. This is used when
|
| 898 |
+
converting a Graph with no output tensors. For example, if the graph's
|
| 899 |
+
last operation is a Print op, just specify that op's name in this field.
|
| 900 |
+
This can be used together with the `output_arrays` parameter.
|
| 901 |
+
**kwargs: See `build_model_flags` and `build_conversion_flags`.
|
| 902 |
+
|
| 903 |
+
Returns:
|
| 904 |
+
The converted data. For example if TFLite was the destination, then
|
| 905 |
+
this will be a tflite flatbuffer in a bytes array.
|
| 906 |
+
|
| 907 |
+
Raises:
|
| 908 |
+
Defined in `build_conversion_flags`.
|
| 909 |
+
"""
|
| 910 |
+
model_flags = build_model_flags(**kwargs)
|
| 911 |
+
conversion_flags = build_conversion_flags(**kwargs)
|
| 912 |
+
enable_mlir_converter = kwargs.get("enable_mlir_converter", True)
|
| 913 |
+
quantized_input_stats = kwargs.get("quantized_input_stats", None)
|
| 914 |
+
|
| 915 |
+
for idx, (name, shape) in enumerate(input_arrays_with_shape):
|
| 916 |
+
input_array = model_flags.input_arrays.add()
|
| 917 |
+
if _is_quantized_input_stats_required(conversion_flags):
|
| 918 |
+
if quantized_input_stats:
|
| 919 |
+
input_array.mean_value, input_array.std_value = quantized_input_stats[
|
| 920 |
+
idx
|
| 921 |
+
]
|
| 922 |
+
else:
|
| 923 |
+
raise ValueError(
|
| 924 |
+
"The `quantized_input_stats` flag must be defined when either "
|
| 925 |
+
"`inference_type` flag or `inference_input_type` flag is set to "
|
| 926 |
+
"tf.int8 or tf.uint8."
|
| 927 |
+
)
|
| 928 |
+
input_array.name = name
|
| 929 |
+
input_array.shape.dims.extend(list(map(int, shape)))
|
| 930 |
+
|
| 931 |
+
if output_arrays:
|
| 932 |
+
for name in output_arrays:
|
| 933 |
+
model_flags.output_arrays.append(name)
|
| 934 |
+
if control_output_arrays:
|
| 935 |
+
for name in control_output_arrays:
|
| 936 |
+
model_flags.control_output_arrays.append(name)
|
| 937 |
+
|
| 938 |
+
data = convert(
|
| 939 |
+
model_flags,
|
| 940 |
+
conversion_flags,
|
| 941 |
+
input_data.SerializeToString(),
|
| 942 |
+
debug_info_str=None,
|
| 943 |
+
enable_mlir_converter=enable_mlir_converter,
|
| 944 |
+
)
|
| 945 |
+
return data
|
| 946 |
+
|
| 947 |
+
|
| 948 |
+
@convert_phase(
|
| 949 |
+
Component.CONVERT_TF_TO_TFLITE_MODEL, SubComponent.CONVERT_GRAPHDEF
|
| 950 |
+
)
|
| 951 |
+
def convert_graphdef(input_data, input_tensors, output_tensors, **kwargs):
|
| 952 |
+
"""Convert a frozen GraphDef model using the TF Lite converter.
|
| 953 |
+
|
| 954 |
+
Conversion can be customized by providing arguments that are forwarded to
|
| 955 |
+
`build_model_flags` and `build_conversion_flags` (see documentation).
|
| 956 |
+
|
| 957 |
+
Args:
|
| 958 |
+
input_data: Input data (i.e. often `sess.graph_def`),
|
| 959 |
+
input_tensors: List of input tensors. Type and shape are computed using
|
| 960 |
+
`foo.shape` and `foo.dtype`.
|
| 961 |
+
output_tensors: List of output tensors (only .name is used from this).
|
| 962 |
+
**kwargs: See `build_model_flags` and `build_conversion_flags`.
|
| 963 |
+
|
| 964 |
+
Returns:
|
| 965 |
+
The converted data. For example if TFLite was the destination, then
|
| 966 |
+
this will be a tflite flatbuffer in a bytes array.
|
| 967 |
+
|
| 968 |
+
Raises:
|
| 969 |
+
Defined in `build_conversion_flags`.
|
| 970 |
+
"""
|
| 971 |
+
model_flags = build_model_flags(**kwargs)
|
| 972 |
+
conversion_flags = build_conversion_flags(**kwargs)
|
| 973 |
+
saved_model_dir = kwargs.get("saved_model_dir", None)
|
| 974 |
+
input_shapes = kwargs.get("input_shapes", None)
|
| 975 |
+
enable_mlir_converter = kwargs.get("enable_mlir_converter", True)
|
| 976 |
+
quantized_input_stats = kwargs.get("quantized_input_stats", None)
|
| 977 |
+
debug_info = kwargs.get("debug_info", None)
|
| 978 |
+
|
| 979 |
+
for idx, input_tensor in enumerate(input_tensors):
|
| 980 |
+
input_array = model_flags.input_arrays.add()
|
| 981 |
+
if saved_model_dir:
|
| 982 |
+
input_array.name = input_tensor.name
|
| 983 |
+
else:
|
| 984 |
+
input_array.name = util.get_tensor_name(input_tensor)
|
| 985 |
+
input_array.data_type = convert_tensor_tf_type_to_tflite_type(
|
| 986 |
+
input_tensor.dtype, usage="input type of the TensorFlow model"
|
| 987 |
+
)
|
| 988 |
+
|
| 989 |
+
if _is_quantized_input_stats_required(conversion_flags):
|
| 990 |
+
if quantized_input_stats:
|
| 991 |
+
input_array.mean_value, input_array.std_value = quantized_input_stats[
|
| 992 |
+
idx
|
| 993 |
+
]
|
| 994 |
+
else:
|
| 995 |
+
# We should ideally raise an error here, but we don't as it would break
|
| 996 |
+
# several models/projects that depend on this workflow.
|
| 997 |
+
warnings.warn(
|
| 998 |
+
"Statistics for quantized inputs were expected, but not "
|
| 999 |
+
"specified; continuing anyway."
|
| 1000 |
+
)
|
| 1001 |
+
|
| 1002 |
+
if input_shapes is None:
|
| 1003 |
+
shape = input_tensor.shape
|
| 1004 |
+
else:
|
| 1005 |
+
shape = input_shapes[idx]
|
| 1006 |
+
|
| 1007 |
+
if shape.rank is not None:
|
| 1008 |
+
# Create shapes with -1 for unknown dimensions.
|
| 1009 |
+
dims = []
|
| 1010 |
+
for dim in shape:
|
| 1011 |
+
if dim is None or (
|
| 1012 |
+
isinstance(dim, tensor_shape.Dimension) and dim.value is None
|
| 1013 |
+
):
|
| 1014 |
+
dims.append(-1)
|
| 1015 |
+
else:
|
| 1016 |
+
dims.append(int(dim))
|
| 1017 |
+
input_array.shape.dims.extend(dims)
|
| 1018 |
+
input_array.shape.unknown_rank = False
|
| 1019 |
+
else:
|
| 1020 |
+
input_array.shape.unknown_rank = True
|
| 1021 |
+
|
| 1022 |
+
for output_tensor in output_tensors:
|
| 1023 |
+
if saved_model_dir:
|
| 1024 |
+
model_flags.output_arrays.append(output_tensor.name)
|
| 1025 |
+
else:
|
| 1026 |
+
model_flags.output_arrays.append(util.get_tensor_name(output_tensor))
|
| 1027 |
+
|
| 1028 |
+
data = convert(
|
| 1029 |
+
model_flags,
|
| 1030 |
+
conversion_flags,
|
| 1031 |
+
input_data.SerializeToString(),
|
| 1032 |
+
debug_info_str=debug_info.SerializeToString() if debug_info else None,
|
| 1033 |
+
enable_mlir_converter=enable_mlir_converter,
|
| 1034 |
+
)
|
| 1035 |
+
return data
|
| 1036 |
+
|
| 1037 |
+
|
| 1038 |
+
@convert_phase(
|
| 1039 |
+
Component.CONVERT_TF_TO_TFLITE_MODEL, SubComponent.CONVERT_SAVED_MODEL
|
| 1040 |
+
)
|
| 1041 |
+
def convert_saved_model(**kwargs):
|
| 1042 |
+
"""Converts a SavedModel using TF Lite converter."""
|
| 1043 |
+
model_flags = build_model_flags(**kwargs)
|
| 1044 |
+
conversion_flags = build_conversion_flags(**kwargs)
|
| 1045 |
+
data = convert(
|
| 1046 |
+
model_flags,
|
| 1047 |
+
conversion_flags,
|
| 1048 |
+
input_data_str=None,
|
| 1049 |
+
debug_info_str=None,
|
| 1050 |
+
enable_mlir_converter=True,
|
| 1051 |
+
)
|
| 1052 |
+
return data
|
| 1053 |
+
|
| 1054 |
+
|
| 1055 |
+
@convert_phase(
|
| 1056 |
+
Component.CONVERT_TF_TO_TFLITE_MODEL, SubComponent.CONVERT_JAX_HLO
|
| 1057 |
+
)
|
| 1058 |
+
def convert_jax_hlo(input_content, input_names, is_proto_format, **kwargs):
|
| 1059 |
+
"""Converts a Jax hlo-based model using TFLite converter."""
|
| 1060 |
+
model_flags = _model_flags_pb2.ModelFlags()
|
| 1061 |
+
model_flags.use_hlo_import = True
|
| 1062 |
+
if is_proto_format:
|
| 1063 |
+
model_flags.hlo_file_type = _model_flags_pb2.ModelFlags.HLO_PROTO
|
| 1064 |
+
else:
|
| 1065 |
+
model_flags.hlo_file_type = _model_flags_pb2.ModelFlags.HLO_TEXT
|
| 1066 |
+
|
| 1067 |
+
# Build input names.
|
| 1068 |
+
for input_name in input_names:
|
| 1069 |
+
input_array = model_flags.input_arrays.add()
|
| 1070 |
+
input_array.name = input_name
|
| 1071 |
+
|
| 1072 |
+
conversion_flags = build_conversion_flags(**kwargs)
|
| 1073 |
+
data = convert(
|
| 1074 |
+
model_flags,
|
| 1075 |
+
conversion_flags,
|
| 1076 |
+
input_data_str=input_content,
|
| 1077 |
+
debug_info_str=None,
|
| 1078 |
+
enable_mlir_converter=True,
|
| 1079 |
+
)
|
| 1080 |
+
return data
|
| 1081 |
+
|
| 1082 |
+
|
| 1083 |
+
@_tf_export(v1=["lite.toco_convert"])
|
| 1084 |
+
@deprecation.deprecated(None, "Use `lite.TFLiteConverter` instead.")
|
| 1085 |
+
def toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs):
|
| 1086 |
+
"""Convert a TensorFlow GraphDef to TFLite.
|
| 1087 |
+
|
| 1088 |
+
This function is deprecated. Please use `tf.lite.TFLiteConverter` API instead.
|
| 1089 |
+
Conversion can be customized by providing arguments that are forwarded to
|
| 1090 |
+
`build_model_flags` and `build_conversion_flags` (see documentation for
|
| 1091 |
+
details).
|
| 1092 |
+
Args:
|
| 1093 |
+
input_data: Input data (i.e. often `sess.graph_def`).
|
| 1094 |
+
input_tensors: List of input tensors. Type and shape are computed using
|
| 1095 |
+
`foo.shape` and `foo.dtype`.
|
| 1096 |
+
output_tensors: List of output tensors (only .name is used from this).
|
| 1097 |
+
*args: See `build_model_flags` and `build_conversion_flags`.
|
| 1098 |
+
**kwargs: See `build_model_flags` and `build_conversion_flags`.
|
| 1099 |
+
|
| 1100 |
+
Returns:
|
| 1101 |
+
The converted TensorFlow Lite model in a bytes array.
|
| 1102 |
+
|
| 1103 |
+
Raises:
|
| 1104 |
+
Defined in `convert`.
|
| 1105 |
+
"""
|
| 1106 |
+
kwargs["enable_mlir_converter"] = kwargs.get("enable_mlir_converter", False)
|
| 1107 |
+
return convert_graphdef(
|
| 1108 |
+
input_data, input_tensors, output_tensors, *args, **kwargs
|
| 1109 |
+
)
|
| 1110 |
+
|
| 1111 |
+
|
| 1112 |
+
def deduplicate_readonly_buffers(tflite_model):
|
| 1113 |
+
"""Generates a new model byte array after deduplicating readonly buffers.
|
| 1114 |
+
|
| 1115 |
+
This function should be invoked after the model optimization toolkit. The
|
| 1116 |
+
model optimization toolkit assumes that each tensor object owns its each
|
| 1117 |
+
buffer separately.
|
| 1118 |
+
|
| 1119 |
+
Args:
|
| 1120 |
+
tflite_model: TFLite flatbuffer in a byte array to be deduplicated.
|
| 1121 |
+
|
| 1122 |
+
Returns:
|
| 1123 |
+
TFLite flatbuffer in a bytes array, processed with the deduplication method.
|
| 1124 |
+
"""
|
| 1125 |
+
# Load TFLite Flatbuffer byte array into an object.
|
| 1126 |
+
model = flatbuffer_utils.convert_bytearray_to_object(tflite_model)
|
| 1127 |
+
|
| 1128 |
+
# Get all the read-only buffers, which can be modified without causing any
|
| 1129 |
+
# issue in the graph invocation stage.
|
| 1130 |
+
read_only_buffer_indices = set()
|
| 1131 |
+
for subgraph in model.subgraphs:
|
| 1132 |
+
# To get all the read-only buffers:
|
| 1133 |
+
# (1) Get all read-only input tensors.
|
| 1134 |
+
# (2) Discard intermediate or output tensors.
|
| 1135 |
+
# (3) Discard the subgraph's input/output tensors.
|
| 1136 |
+
# (4) Gather the buffers of the read-only input tensors.
|
| 1137 |
+
|
| 1138 |
+
# (1) Get read-only input tensors.
|
| 1139 |
+
read_only_input_tensor_indices = set()
|
| 1140 |
+
for op in subgraph.operators:
|
| 1141 |
+
if op.inputs is None:
|
| 1142 |
+
continue
|
| 1143 |
+
for i, input_tensor_idx in enumerate(op.inputs):
|
| 1144 |
+
# Ignore mutable tensors.
|
| 1145 |
+
if op.mutatingVariableInputs is not None:
|
| 1146 |
+
# Ignore invalid tensors.
|
| 1147 |
+
if (
|
| 1148 |
+
i < len(op.mutatingVariableInputs)
|
| 1149 |
+
and op.mutatingVariableInputs[i]
|
| 1150 |
+
):
|
| 1151 |
+
continue
|
| 1152 |
+
# Ignore variable tensors.
|
| 1153 |
+
if subgraph.tensors[input_tensor_idx].isVariable:
|
| 1154 |
+
continue
|
| 1155 |
+
read_only_input_tensor_indices.add(input_tensor_idx)
|
| 1156 |
+
|
| 1157 |
+
# (2) Discard intermediate or output tensors.
|
| 1158 |
+
for op in subgraph.operators:
|
| 1159 |
+
if op.outputs is not None:
|
| 1160 |
+
for output_tensor_idx in op.outputs:
|
| 1161 |
+
read_only_input_tensor_indices.discard(output_tensor_idx)
|
| 1162 |
+
if op.intermediates is not None:
|
| 1163 |
+
for intermediate_tensor_idx in op.intermediates:
|
| 1164 |
+
read_only_input_tensor_indices.discard(intermediate_tensor_idx)
|
| 1165 |
+
|
| 1166 |
+
# (3) Discard the subgraph's input and output tensors.
|
| 1167 |
+
if subgraph.inputs is not None:
|
| 1168 |
+
for input_tensor_idx in subgraph.inputs:
|
| 1169 |
+
read_only_input_tensor_indices.discard(input_tensor_idx)
|
| 1170 |
+
if subgraph.outputs is not None:
|
| 1171 |
+
for output_tensor_idx in subgraph.outputs:
|
| 1172 |
+
read_only_input_tensor_indices.discard(output_tensor_idx)
|
| 1173 |
+
|
| 1174 |
+
# (4) Gather the buffers of the read-only input tensors.
|
| 1175 |
+
for tensor_idx in read_only_input_tensor_indices:
|
| 1176 |
+
read_only_buffer_indices.add(subgraph.tensors[tensor_idx].buffer)
|
| 1177 |
+
|
| 1178 |
+
# Ignore invalid negative index or zero-sized buffers.
|
| 1179 |
+
for buffer_idx in read_only_buffer_indices.copy():
|
| 1180 |
+
if buffer_idx < 0 or (
|
| 1181 |
+
model.buffers[buffer_idx].data is None
|
| 1182 |
+
or isinstance(model.buffers[buffer_idx].data, list)
|
| 1183 |
+
or model.buffers[buffer_idx].data.size == 0
|
| 1184 |
+
):
|
| 1185 |
+
read_only_buffer_indices.discard(buffer_idx)
|
| 1186 |
+
|
| 1187 |
+
class BufferIndex:
|
| 1188 |
+
"""A class to store index, size, hash of the buffers in TFLite model."""
|
| 1189 |
+
|
| 1190 |
+
def __init__(self, idx, size, hash_value):
|
| 1191 |
+
self.idx = idx
|
| 1192 |
+
self.size = size
|
| 1193 |
+
self.hash_value = hash_value
|
| 1194 |
+
|
| 1195 |
+
read_only_buffers = list(
|
| 1196 |
+
map(
|
| 1197 |
+
lambda index: BufferIndex( # pylint: disable=g-long-lambda
|
| 1198 |
+
index,
|
| 1199 |
+
model.buffers[index].data.size,
|
| 1200 |
+
hashlib.md5(model.buffers[index].data.data.tobytes()).hexdigest(),
|
| 1201 |
+
),
|
| 1202 |
+
read_only_buffer_indices,
|
| 1203 |
+
)
|
| 1204 |
+
)
|
| 1205 |
+
|
| 1206 |
+
# Sort read_only_buffers by buffer size & hash in descending order.
|
| 1207 |
+
read_only_buffers = sorted(
|
| 1208 |
+
read_only_buffers,
|
| 1209 |
+
key=lambda buffer: (buffer.size, buffer.hash_value),
|
| 1210 |
+
reverse=True,
|
| 1211 |
+
)
|
| 1212 |
+
|
| 1213 |
+
# Create a map of duplicate buffers (same size and same type).
|
| 1214 |
+
# eg: In [1, 2, 3, 4, 5, 6] if (1, 4, 6) and (2, 5) are each, groups of buffer
|
| 1215 |
+
# indices of the same size and type, then the map would be {4:1, 6:1, 5:2}
|
| 1216 |
+
duplicate_buffer_map = {}
|
| 1217 |
+
for i, buffer_i in enumerate(read_only_buffers):
|
| 1218 |
+
# This buffer is a duplicate.
|
| 1219 |
+
if buffer_i.idx in duplicate_buffer_map:
|
| 1220 |
+
continue
|
| 1221 |
+
# This buffer is unique. Scan rest of the list to find duplicates
|
| 1222 |
+
# of this buffer and mark them accordingly.
|
| 1223 |
+
for buffer_j in read_only_buffers[i + 1 :]:
|
| 1224 |
+
if buffer_j.idx in duplicate_buffer_map:
|
| 1225 |
+
continue
|
| 1226 |
+
if buffer_i.size != buffer_j.size:
|
| 1227 |
+
break
|
| 1228 |
+
if buffer_i.hash_value != buffer_j.hash_value:
|
| 1229 |
+
continue
|
| 1230 |
+
# Found duplicate. Nullify j-th buffer and use i-th buffer instead.
|
| 1231 |
+
duplicate_buffer_map[buffer_j.idx] = buffer_i.idx
|
| 1232 |
+
|
| 1233 |
+
# Make the duplicated tensors use the single shared buffer index.
|
| 1234 |
+
for subgraph in model.subgraphs:
|
| 1235 |
+
for op in subgraph.operators:
|
| 1236 |
+
if op.inputs is None:
|
| 1237 |
+
continue
|
| 1238 |
+
for input_tensor in op.inputs:
|
| 1239 |
+
buffer_idx = subgraph.tensors[input_tensor].buffer
|
| 1240 |
+
if buffer_idx in duplicate_buffer_map:
|
| 1241 |
+
subgraph.tensors[input_tensor].buffer = duplicate_buffer_map[
|
| 1242 |
+
buffer_idx
|
| 1243 |
+
]
|
| 1244 |
+
|
| 1245 |
+
# Nullify the unused buffers.
|
| 1246 |
+
for idx in duplicate_buffer_map:
|
| 1247 |
+
model.buffers[idx].data = None
|
| 1248 |
+
|
| 1249 |
+
# Return a TFLite flatbuffer as a byte array.
|
| 1250 |
+
return flatbuffer_utils.convert_object_to_bytearray(model)
|
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/convert_saved_model.py
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
"""Functions to convert SavedModel to frozen GraphDefs."""
|
| 16 |
+
|
| 17 |
+
from tensorflow.lite.python import util
|
| 18 |
+
from tensorflow.lite.python.convert_phase import Component
|
| 19 |
+
from tensorflow.lite.python.convert_phase import convert_phase
|
| 20 |
+
from tensorflow.lite.python.convert_phase import SubComponent
|
| 21 |
+
from tensorflow.python.client import session
|
| 22 |
+
from tensorflow.python.framework import ops
|
| 23 |
+
from tensorflow.python.platform import tf_logging as logging
|
| 24 |
+
from tensorflow.python.saved_model import constants
|
| 25 |
+
from tensorflow.python.saved_model import loader
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def get_meta_graph_def(saved_model_dir, tag_set):
|
| 29 |
+
"""Validate saved_model and extract MetaGraphDef.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
saved_model_dir: saved_model path to convert.
|
| 33 |
+
tag_set: Set of tag(s) of the MetaGraphDef to load.
|
| 34 |
+
|
| 35 |
+
Returns:
|
| 36 |
+
The meta_graph_def used for tflite conversion.
|
| 37 |
+
|
| 38 |
+
Raises:
|
| 39 |
+
ValueError: No valid MetaGraphDef for given tag_set.
|
| 40 |
+
"""
|
| 41 |
+
with session.Session(graph=ops.Graph()) as sess:
|
| 42 |
+
return loader.load(sess, tag_set, saved_model_dir)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def get_signature_def(meta_graph, signature_key):
|
| 46 |
+
"""Get the signature def from meta_graph with given signature_key.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
meta_graph: meta_graph_def.
|
| 50 |
+
signature_key: signature_def in the meta_graph_def.
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
The signature_def used for tflite conversion.
|
| 54 |
+
|
| 55 |
+
Raises:
|
| 56 |
+
ValueError: Given signature_key is not valid for this meta_graph.
|
| 57 |
+
"""
|
| 58 |
+
signature_def_map = meta_graph.signature_def
|
| 59 |
+
signature_def_keys = set(signature_def_map.keys())
|
| 60 |
+
logging.info(
|
| 61 |
+
"The given SavedModel MetaGraphDef contains SignatureDefs with the "
|
| 62 |
+
"following keys: %s", signature_def_keys)
|
| 63 |
+
if signature_key not in signature_def_keys:
|
| 64 |
+
raise ValueError("No '{}' in the SavedModel\'s SignatureDefs. Possible "
|
| 65 |
+
"values are '{}'.".format(signature_key,
|
| 66 |
+
",".join(signature_def_keys)))
|
| 67 |
+
return signature_def_map[signature_key]
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def get_inputs_outputs(signature_def):
|
| 71 |
+
"""Get inputs and outputs from SignatureDef.
|
| 72 |
+
|
| 73 |
+
Args:
|
| 74 |
+
signature_def: SignatureDef in the meta_graph_def for conversion.
|
| 75 |
+
|
| 76 |
+
Returns:
|
| 77 |
+
The inputs and outputs in the graph for conversion.
|
| 78 |
+
"""
|
| 79 |
+
inputs_tensor_info = signature_def.inputs
|
| 80 |
+
outputs_tensor_info = signature_def.outputs
|
| 81 |
+
|
| 82 |
+
def gather_names(tensor_info):
|
| 83 |
+
return [tensor_info[key].name for key in tensor_info]
|
| 84 |
+
|
| 85 |
+
inputs = gather_names(inputs_tensor_info)
|
| 86 |
+
outputs = gather_names(outputs_tensor_info)
|
| 87 |
+
return inputs, outputs
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def _get_tensors(graph, signature_def_tensor_names=None,
|
| 91 |
+
user_tensor_names=None):
|
| 92 |
+
"""Gets the tensors associated with the tensor names.
|
| 93 |
+
|
| 94 |
+
Either signature_def_tensor_names or user_tensor_names should be provided. If
|
| 95 |
+
the user provides tensors, the tensors associated with the user provided
|
| 96 |
+
tensor names are provided. Otherwise, the tensors associated with the names in
|
| 97 |
+
the SignatureDef are provided.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
graph: GraphDef representing graph.
|
| 101 |
+
signature_def_tensor_names: Tensor names stored in either the inputs or
|
| 102 |
+
outputs of a SignatureDef. (default None)
|
| 103 |
+
user_tensor_names: Tensor names provided by the user. (default None)
|
| 104 |
+
|
| 105 |
+
Returns:
|
| 106 |
+
List of tensors.
|
| 107 |
+
|
| 108 |
+
Raises:
|
| 109 |
+
ValueError:
|
| 110 |
+
signature_def_tensors and user_tensor_names are undefined or empty.
|
| 111 |
+
user_tensor_names are not valid.
|
| 112 |
+
"""
|
| 113 |
+
tensors = []
|
| 114 |
+
if user_tensor_names:
|
| 115 |
+
# Sort the tensor names.
|
| 116 |
+
user_tensor_names = sorted(user_tensor_names)
|
| 117 |
+
|
| 118 |
+
tensors = util.get_tensors_from_tensor_names(graph, user_tensor_names)
|
| 119 |
+
elif signature_def_tensor_names:
|
| 120 |
+
tensors = [
|
| 121 |
+
graph.get_tensor_by_name(name)
|
| 122 |
+
for name in sorted(signature_def_tensor_names)
|
| 123 |
+
]
|
| 124 |
+
else:
|
| 125 |
+
# Throw ValueError if signature_def_tensors and user_tensor_names are both
|
| 126 |
+
# either undefined or empty.
|
| 127 |
+
raise ValueError(
|
| 128 |
+
"Specify either signature_def_tensor_names or user_tensor_names")
|
| 129 |
+
|
| 130 |
+
return tensors
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
@convert_phase(Component.PREPARE_TF_MODEL, SubComponent.FREEZE_SAVED_MODEL)
|
| 134 |
+
def freeze_saved_model(saved_model_dir, input_arrays, input_shapes,
|
| 135 |
+
output_arrays, tag_set, signature_key):
|
| 136 |
+
"""Converts a SavedModel to a frozen graph.
|
| 137 |
+
|
| 138 |
+
Args:
|
| 139 |
+
saved_model_dir: SavedModel directory to convert.
|
| 140 |
+
input_arrays: List of input tensors to freeze graph with. Uses input arrays
|
| 141 |
+
from SignatureDef when none are provided.
|
| 142 |
+
input_shapes: Dict of strings representing input tensor names to list of
|
| 143 |
+
integers representing input shapes (e.g., {"foo": : [1, 16, 16, 3]}).
|
| 144 |
+
Automatically determined when input shapes is None (e.g., {"foo" : None}).
|
| 145 |
+
output_arrays: List of output tensors to freeze graph with. Uses output
|
| 146 |
+
arrays from SignatureDef when none are provided.
|
| 147 |
+
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
|
| 148 |
+
analyze. All tags in the tag set must be present.
|
| 149 |
+
signature_key: Key identifying SignatureDef containing inputs and outputs.
|
| 150 |
+
|
| 151 |
+
Returns:
|
| 152 |
+
frozen_graph_def: Frozen GraphDef.
|
| 153 |
+
in_tensors: List of input tensors for the graph.
|
| 154 |
+
out_tensors: List of output tensors for the graph.
|
| 155 |
+
graph: `Graph` object.
|
| 156 |
+
|
| 157 |
+
Raises:
|
| 158 |
+
ValueError:
|
| 159 |
+
SavedModel doesn't contain a MetaGraphDef identified by tag_set.
|
| 160 |
+
signature_key is not in the MetaGraphDef.
|
| 161 |
+
assets/ directory is in the MetaGraphDef.
|
| 162 |
+
input_shapes does not match the length of input_arrays.
|
| 163 |
+
input_arrays or output_arrays are not valid.
|
| 164 |
+
"""
|
| 165 |
+
# Read SignatureDef.
|
| 166 |
+
meta_graph = get_meta_graph_def(saved_model_dir, tag_set)
|
| 167 |
+
signature_def = get_signature_def(meta_graph, signature_key)
|
| 168 |
+
inputs, outputs = get_inputs_outputs(signature_def)
|
| 169 |
+
|
| 170 |
+
# Check SavedModel for assets directory.
|
| 171 |
+
collection_def = meta_graph.collection_def
|
| 172 |
+
if constants.ASSETS_KEY in collection_def:
|
| 173 |
+
raise ValueError("SavedModels with assets/ directory are not supported.")
|
| 174 |
+
|
| 175 |
+
graph = ops.Graph()
|
| 176 |
+
with session.Session(graph=graph) as sess:
|
| 177 |
+
loader.load(sess, meta_graph.meta_info_def.tags, saved_model_dir)
|
| 178 |
+
|
| 179 |
+
# Gets input and output tensors.
|
| 180 |
+
# TODO(zhixianyan): Use TFLite supported Op list to filter outputs.
|
| 181 |
+
in_tensors = _get_tensors(graph, inputs, input_arrays)
|
| 182 |
+
out_tensors = _get_tensors(graph, outputs, output_arrays)
|
| 183 |
+
util.set_tensor_shapes(in_tensors, input_shapes)
|
| 184 |
+
|
| 185 |
+
frozen_graph_def = util.freeze_graph(sess, in_tensors, out_tensors)
|
| 186 |
+
return frozen_graph_def, in_tensors, out_tensors, sess.graph
|