diff --git a/.gitattributes b/.gitattributes index 0f3eb782591b5e94b7ea3c15b3425513877e36cb..d55b51676be13544be61528b9970162ed9125c5e 100644 --- a/.gitattributes +++ b/.gitattributes @@ -198,3 +198,8 @@ SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/gr SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/profiler/internal/_pywrap_profiler.so filter=lfs diff=lfs merge=lfs -text SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_tf_session.so filter=lfs diff=lfs merge=lfs -text SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/impl/testing/pybind_for_testing.so filter=lfs diff=lfs merge=lfs -text +SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/interpreter_wrapper/_pywrap_tensorflow_interpreter_wrapper.so filter=lfs diff=lfs merge=lfs -text +SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/optimize/_pywrap_tensorflow_lite_calibration_wrapper.so filter=lfs diff=lfs merge=lfs -text +SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/_pywrap_tensorflow_lite_metrics_wrapper.so filter=lfs diff=lfs merge=lfs -text +SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/_pywrap_analyzer_wrapper.so filter=lfs diff=lfs merge=lfs -text +SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/_audio_microfrontend_op.so filter=lfs diff=lfs merge=lfs -text diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2cc719415679ef86837e377276f204f662543fd Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..341ad8e0bb88b5630c9103103ee989fb9819e3a7 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/ops/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/ops/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f4bac237a9ce375b69a5d5e5ad32da4c30a5ad0 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/ops/__pycache__/gen_audio_microfrontend_op.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/ops/__pycache__/gen_audio_microfrontend_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5aab0b95b3056b329da7bb5c0876406c2b02f67 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/ops/__pycache__/gen_audio_microfrontend_op.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/ops/gen_audio_microfrontend_op.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/ops/gen_audio_microfrontend_op.py new file mode 100644 index 0000000000000000000000000000000000000000..ab4c1b41fa0ea371416e6416743d5ea458e1f294 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/ops/gen_audio_microfrontend_op.py @@ -0,0 +1,423 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +TV_AudioMicrofrontend_out_type = TypeVar("TV_AudioMicrofrontend_out_type", _atypes.Float32, _atypes.UInt16) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('audio_microfrontend') +def audio_microfrontend(audio: Annotated[Any, _atypes.Int16], sample_rate:int=16000, window_size:int=25, window_step:int=10, num_channels:int=32, upper_band_limit:float=7500, lower_band_limit:float=125, smoothing_bits:int=10, even_smoothing:float=0.025, odd_smoothing:float=0.06, min_signal_remaining:float=0.05, enable_pcan:bool=False, pcan_strength:float=0.95, pcan_offset:float=80, gain_bits:int=21, enable_log:bool=True, scale_shift:int=6, left_context:int=0, right_context:int=0, frame_stride:int=1, zero_padding:bool=False, out_scale:int=1, out_type:TV_AudioMicrofrontend_out_type=_dtypes.uint16, name=None) -> Annotated[Any, TV_AudioMicrofrontend_out_type]: + r"""Audio Microfrontend Op. + + This Op converts a sequence of audio data into one or more + feature vectors containing filterbanks of the input. The + conversion process uses a lightweight library to perform: + + 1. A slicing window function + 2. Short-time FFTs + 3. Filterbank calculations + 4. Noise reduction + 5. PCAN Auto Gain Control + 6. Logarithmic scaling + + Arguments + audio: 1D Tensor, int16 audio data in temporal ordering. + sample_rate: Integer, the sample rate of the audio in Hz. + window_size: Integer, length of desired time frames in ms. + window_step: Integer, length of step size for the next frame in ms. + num_channels: Integer, the number of filterbank channels to use. + upper_band_limit: Float, the highest frequency included in the filterbanks. + lower_band_limit: Float, the lowest frequency included in the filterbanks. + smoothing_bits: Int, scale up signal by 2^(smoothing_bits) before reduction. + even_smoothing: Float, smoothing coefficient for even-numbered channels. + odd_smoothing: Float, smoothing coefficient for odd-numbered channels. + min_signal_remaining: Float, fraction of signal to preserve in smoothing. + enable_pcan: Bool, enable PCAN auto gain control. + pcan_strength: Float, gain normalization exponent. + pcan_offset: Float, positive value added in the normalization denominator. + gain_bits: Int, number of fractional bits in the gain. + enable_log: Bool, enable logarithmic scaling of filterbanks. + scale_shift: Integer, scale filterbanks by 2^(scale_shift). + left_context: Integer, number of preceding frames to attach to each frame. + right_context: Integer, number of preceding frames to attach to each frame. + frame_stride: Integer, M frames to skip over, where output[n] = frame[n*M]. + zero_padding: Bool, if left/right context is out-of-bounds, attach frame of + zeroes. Otherwise, frame[0] or frame[size-1] will be copied. + out_scale: Integer, divide all filterbanks by this number. + out_type: DType, type of the output Tensor, defaults to UINT16. + + Returns + filterbanks: 2D Tensor, each row is a time frame, each column is a channel. + + Args: + audio: A `Tensor` of type `int16`. + sample_rate: An optional `int`. Defaults to `16000`. + window_size: An optional `int`. Defaults to `25`. + window_step: An optional `int`. Defaults to `10`. + num_channels: An optional `int`. Defaults to `32`. + upper_band_limit: An optional `float`. Defaults to `7500`. + lower_band_limit: An optional `float`. Defaults to `125`. + smoothing_bits: An optional `int`. Defaults to `10`. + even_smoothing: An optional `float`. Defaults to `0.025`. + odd_smoothing: An optional `float`. Defaults to `0.06`. + min_signal_remaining: An optional `float`. Defaults to `0.05`. + enable_pcan: An optional `bool`. Defaults to `False`. + pcan_strength: An optional `float`. Defaults to `0.95`. + pcan_offset: An optional `float`. Defaults to `80`. + gain_bits: An optional `int`. Defaults to `21`. + enable_log: An optional `bool`. Defaults to `True`. + scale_shift: An optional `int`. Defaults to `6`. + left_context: An optional `int`. Defaults to `0`. + right_context: An optional `int`. Defaults to `0`. + frame_stride: An optional `int`. Defaults to `1`. + zero_padding: An optional `bool`. Defaults to `False`. + out_scale: An optional `int`. Defaults to `1`. + out_type: An optional `tf.DType` from: `tf.uint16, tf.float32`. Defaults to `tf.uint16`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `out_type`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "AudioMicrofrontend", name, audio, "sample_rate", sample_rate, + "window_size", window_size, "window_step", window_step, + "num_channels", num_channels, "upper_band_limit", upper_band_limit, + "lower_band_limit", lower_band_limit, "smoothing_bits", + smoothing_bits, "even_smoothing", even_smoothing, "odd_smoothing", + odd_smoothing, "min_signal_remaining", min_signal_remaining, + "enable_pcan", enable_pcan, "pcan_strength", pcan_strength, + "pcan_offset", pcan_offset, "gain_bits", gain_bits, "enable_log", + enable_log, "scale_shift", scale_shift, "left_context", left_context, + "right_context", right_context, "frame_stride", frame_stride, + "zero_padding", zero_padding, "out_scale", out_scale, "out_type", + out_type) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_audio_microfrontend( + (audio, sample_rate, window_size, window_step, num_channels, + upper_band_limit, lower_band_limit, smoothing_bits, even_smoothing, + odd_smoothing, min_signal_remaining, enable_pcan, pcan_strength, + pcan_offset, gain_bits, enable_log, scale_shift, left_context, + right_context, frame_stride, zero_padding, out_scale, out_type, + name,), None) + if _result is not NotImplemented: + return _result + return audio_microfrontend_eager_fallback( + audio, sample_rate=sample_rate, window_size=window_size, + window_step=window_step, num_channels=num_channels, + upper_band_limit=upper_band_limit, + lower_band_limit=lower_band_limit, smoothing_bits=smoothing_bits, + even_smoothing=even_smoothing, odd_smoothing=odd_smoothing, + min_signal_remaining=min_signal_remaining, enable_pcan=enable_pcan, + pcan_strength=pcan_strength, pcan_offset=pcan_offset, + gain_bits=gain_bits, enable_log=enable_log, scale_shift=scale_shift, + left_context=left_context, right_context=right_context, + frame_stride=frame_stride, zero_padding=zero_padding, + out_scale=out_scale, out_type=out_type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + audio_microfrontend, (), dict(audio=audio, + sample_rate=sample_rate, + window_size=window_size, + window_step=window_step, + num_channels=num_channels, + upper_band_limit=upper_band_limit, + lower_band_limit=lower_band_limit, + smoothing_bits=smoothing_bits, + even_smoothing=even_smoothing, + odd_smoothing=odd_smoothing, + min_signal_remaining=min_signal_remaining, + enable_pcan=enable_pcan, + pcan_strength=pcan_strength, + pcan_offset=pcan_offset, + gain_bits=gain_bits, + enable_log=enable_log, + scale_shift=scale_shift, + left_context=left_context, + right_context=right_context, + frame_stride=frame_stride, + zero_padding=zero_padding, + out_scale=out_scale, + out_type=out_type, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_audio_microfrontend( + (audio, sample_rate, window_size, window_step, num_channels, + upper_band_limit, lower_band_limit, smoothing_bits, even_smoothing, + odd_smoothing, min_signal_remaining, enable_pcan, pcan_strength, + pcan_offset, gain_bits, enable_log, scale_shift, left_context, + right_context, frame_stride, zero_padding, out_scale, out_type, + name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + if sample_rate is None: + sample_rate = 16000 + sample_rate = _execute.make_int(sample_rate, "sample_rate") + if window_size is None: + window_size = 25 + window_size = _execute.make_int(window_size, "window_size") + if window_step is None: + window_step = 10 + window_step = _execute.make_int(window_step, "window_step") + if num_channels is None: + num_channels = 32 + num_channels = _execute.make_int(num_channels, "num_channels") + if upper_band_limit is None: + upper_band_limit = 7500 + upper_band_limit = _execute.make_float(upper_band_limit, "upper_band_limit") + if lower_band_limit is None: + lower_band_limit = 125 + lower_band_limit = _execute.make_float(lower_band_limit, "lower_band_limit") + if smoothing_bits is None: + smoothing_bits = 10 + smoothing_bits = _execute.make_int(smoothing_bits, "smoothing_bits") + if even_smoothing is None: + even_smoothing = 0.025 + even_smoothing = _execute.make_float(even_smoothing, "even_smoothing") + if odd_smoothing is None: + odd_smoothing = 0.06 + odd_smoothing = _execute.make_float(odd_smoothing, "odd_smoothing") + if min_signal_remaining is None: + min_signal_remaining = 0.05 + min_signal_remaining = _execute.make_float(min_signal_remaining, "min_signal_remaining") + if enable_pcan is None: + enable_pcan = False + enable_pcan = _execute.make_bool(enable_pcan, "enable_pcan") + if pcan_strength is None: + pcan_strength = 0.95 + pcan_strength = _execute.make_float(pcan_strength, "pcan_strength") + if pcan_offset is None: + pcan_offset = 80 + pcan_offset = _execute.make_float(pcan_offset, "pcan_offset") + if gain_bits is None: + gain_bits = 21 + gain_bits = _execute.make_int(gain_bits, "gain_bits") + if enable_log is None: + enable_log = True + enable_log = _execute.make_bool(enable_log, "enable_log") + if scale_shift is None: + scale_shift = 6 + scale_shift = _execute.make_int(scale_shift, "scale_shift") + if left_context is None: + left_context = 0 + left_context = _execute.make_int(left_context, "left_context") + if right_context is None: + right_context = 0 + right_context = _execute.make_int(right_context, "right_context") + if frame_stride is None: + frame_stride = 1 + frame_stride = _execute.make_int(frame_stride, "frame_stride") + if zero_padding is None: + zero_padding = False + zero_padding = _execute.make_bool(zero_padding, "zero_padding") + if out_scale is None: + out_scale = 1 + out_scale = _execute.make_int(out_scale, "out_scale") + if out_type is None: + out_type = _dtypes.uint16 + out_type = _execute.make_type(out_type, "out_type") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "AudioMicrofrontend", audio=audio, sample_rate=sample_rate, + window_size=window_size, + window_step=window_step, + num_channels=num_channels, + upper_band_limit=upper_band_limit, + lower_band_limit=lower_band_limit, + smoothing_bits=smoothing_bits, + even_smoothing=even_smoothing, + odd_smoothing=odd_smoothing, + min_signal_remaining=min_signal_remaining, + enable_pcan=enable_pcan, + pcan_strength=pcan_strength, + pcan_offset=pcan_offset, gain_bits=gain_bits, + enable_log=enable_log, scale_shift=scale_shift, + left_context=left_context, + right_context=right_context, + frame_stride=frame_stride, + zero_padding=zero_padding, out_scale=out_scale, + out_type=out_type, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + audio_microfrontend, (), dict(audio=audio, sample_rate=sample_rate, + window_size=window_size, + window_step=window_step, + num_channels=num_channels, + upper_band_limit=upper_band_limit, + lower_band_limit=lower_band_limit, + smoothing_bits=smoothing_bits, + even_smoothing=even_smoothing, + odd_smoothing=odd_smoothing, + min_signal_remaining=min_signal_remaining, + enable_pcan=enable_pcan, + pcan_strength=pcan_strength, + pcan_offset=pcan_offset, + gain_bits=gain_bits, + enable_log=enable_log, + scale_shift=scale_shift, + left_context=left_context, + right_context=right_context, + frame_stride=frame_stride, + zero_padding=zero_padding, + out_scale=out_scale, + out_type=out_type, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("sample_rate", _op._get_attr_int("sample_rate"), "window_size", + _op._get_attr_int("window_size"), "window_step", + _op._get_attr_int("window_step"), "num_channels", + _op._get_attr_int("num_channels"), "upper_band_limit", + _op.get_attr("upper_band_limit"), "lower_band_limit", + _op.get_attr("lower_band_limit"), "smoothing_bits", + _op._get_attr_int("smoothing_bits"), "even_smoothing", + _op.get_attr("even_smoothing"), "odd_smoothing", + _op.get_attr("odd_smoothing"), "min_signal_remaining", + _op.get_attr("min_signal_remaining"), "enable_pcan", + _op._get_attr_bool("enable_pcan"), "pcan_strength", + _op.get_attr("pcan_strength"), "pcan_offset", + _op.get_attr("pcan_offset"), "gain_bits", + _op._get_attr_int("gain_bits"), "enable_log", + _op._get_attr_bool("enable_log"), "scale_shift", + _op._get_attr_int("scale_shift"), "left_context", + _op._get_attr_int("left_context"), "right_context", + _op._get_attr_int("right_context"), "frame_stride", + _op._get_attr_int("frame_stride"), "zero_padding", + _op._get_attr_bool("zero_padding"), "out_scale", + _op._get_attr_int("out_scale"), "out_type", + _op._get_attr_type("out_type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "AudioMicrofrontend", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +AudioMicrofrontend = tf_export("raw_ops.AudioMicrofrontend")(_ops.to_raw_op(audio_microfrontend)) +_dispatcher_for_audio_microfrontend = audio_microfrontend._tf_type_based_dispatcher.Dispatch + + +def audio_microfrontend_eager_fallback(audio: Annotated[Any, _atypes.Int16], sample_rate: int, window_size: int, window_step: int, num_channels: int, upper_band_limit: float, lower_band_limit: float, smoothing_bits: int, even_smoothing: float, odd_smoothing: float, min_signal_remaining: float, enable_pcan: bool, pcan_strength: float, pcan_offset: float, gain_bits: int, enable_log: bool, scale_shift: int, left_context: int, right_context: int, frame_stride: int, zero_padding: bool, out_scale: int, out_type: TV_AudioMicrofrontend_out_type, name, ctx) -> Annotated[Any, TV_AudioMicrofrontend_out_type]: + if sample_rate is None: + sample_rate = 16000 + sample_rate = _execute.make_int(sample_rate, "sample_rate") + if window_size is None: + window_size = 25 + window_size = _execute.make_int(window_size, "window_size") + if window_step is None: + window_step = 10 + window_step = _execute.make_int(window_step, "window_step") + if num_channels is None: + num_channels = 32 + num_channels = _execute.make_int(num_channels, "num_channels") + if upper_band_limit is None: + upper_band_limit = 7500 + upper_band_limit = _execute.make_float(upper_band_limit, "upper_band_limit") + if lower_band_limit is None: + lower_band_limit = 125 + lower_band_limit = _execute.make_float(lower_band_limit, "lower_band_limit") + if smoothing_bits is None: + smoothing_bits = 10 + smoothing_bits = _execute.make_int(smoothing_bits, "smoothing_bits") + if even_smoothing is None: + even_smoothing = 0.025 + even_smoothing = _execute.make_float(even_smoothing, "even_smoothing") + if odd_smoothing is None: + odd_smoothing = 0.06 + odd_smoothing = _execute.make_float(odd_smoothing, "odd_smoothing") + if min_signal_remaining is None: + min_signal_remaining = 0.05 + min_signal_remaining = _execute.make_float(min_signal_remaining, "min_signal_remaining") + if enable_pcan is None: + enable_pcan = False + enable_pcan = _execute.make_bool(enable_pcan, "enable_pcan") + if pcan_strength is None: + pcan_strength = 0.95 + pcan_strength = _execute.make_float(pcan_strength, "pcan_strength") + if pcan_offset is None: + pcan_offset = 80 + pcan_offset = _execute.make_float(pcan_offset, "pcan_offset") + if gain_bits is None: + gain_bits = 21 + gain_bits = _execute.make_int(gain_bits, "gain_bits") + if enable_log is None: + enable_log = True + enable_log = _execute.make_bool(enable_log, "enable_log") + if scale_shift is None: + scale_shift = 6 + scale_shift = _execute.make_int(scale_shift, "scale_shift") + if left_context is None: + left_context = 0 + left_context = _execute.make_int(left_context, "left_context") + if right_context is None: + right_context = 0 + right_context = _execute.make_int(right_context, "right_context") + if frame_stride is None: + frame_stride = 1 + frame_stride = _execute.make_int(frame_stride, "frame_stride") + if zero_padding is None: + zero_padding = False + zero_padding = _execute.make_bool(zero_padding, "zero_padding") + if out_scale is None: + out_scale = 1 + out_scale = _execute.make_int(out_scale, "out_scale") + if out_type is None: + out_type = _dtypes.uint16 + out_type = _execute.make_type(out_type, "out_type") + audio = _ops.convert_to_tensor(audio, _dtypes.int16) + _inputs_flat = [audio] + _attrs = ("sample_rate", sample_rate, "window_size", window_size, + "window_step", window_step, "num_channels", num_channels, + "upper_band_limit", upper_band_limit, "lower_band_limit", lower_band_limit, + "smoothing_bits", smoothing_bits, "even_smoothing", even_smoothing, + "odd_smoothing", odd_smoothing, "min_signal_remaining", + min_signal_remaining, "enable_pcan", enable_pcan, "pcan_strength", + pcan_strength, "pcan_offset", pcan_offset, "gain_bits", gain_bits, + "enable_log", enable_log, "scale_shift", scale_shift, "left_context", + left_context, "right_context", right_context, "frame_stride", frame_stride, + "zero_padding", zero_padding, "out_scale", out_scale, "out_type", out_type) + _result = _execute.execute(b"AudioMicrofrontend", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "AudioMicrofrontend", _inputs_flat, _attrs, _result) + _result, = _result + return _result + diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a409cec1c616bb78a9efcfb2c557f4a7cc701c40 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..625f75b25f4bbdc14cb203fe05d21bb839ccc83c Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/__pycache__/audio_microfrontend_op.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/__pycache__/audio_microfrontend_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89a78c5b28ce856760348c0eabd4d0b26eab813c Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/__pycache__/audio_microfrontend_op.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/_audio_microfrontend_op.so b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/_audio_microfrontend_op.so new file mode 100644 index 0000000000000000000000000000000000000000..c9c56d5921d4ff96d6b365ffc1999538f4f34887 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/_audio_microfrontend_op.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c87c0f07860502c2f646feeeee3524feb706e1b430d5ddf0314133a260531af +size 1240400 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/audio_microfrontend_op.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/audio_microfrontend_op.py new file mode 100644 index 0000000000000000000000000000000000000000..ef9cfe21e667f8b1a5cf45fc5b6caaad013b6eea --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/experimental/microfrontend/python/ops/audio_microfrontend_op.py @@ -0,0 +1,110 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""AudioMicrofrontend Op creates filterbanks from audio data.""" + +from tensorflow.lite.experimental.microfrontend.ops import gen_audio_microfrontend_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import load_library +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.platform import resource_loader + +_audio_microfrontend_op = load_library.load_op_library( + resource_loader.get_path_to_datafile("_audio_microfrontend_op.so")) + + +def audio_microfrontend(audio, + sample_rate=16000, + window_size=25, + window_step=10, + num_channels=32, + upper_band_limit=7500.0, + lower_band_limit=125.0, + smoothing_bits=10, + even_smoothing=0.025, + odd_smoothing=0.06, + min_signal_remaining=0.05, + enable_pcan=True, + pcan_strength=0.95, + pcan_offset=80.0, + gain_bits=21, + enable_log=True, + scale_shift=6, + left_context=0, + right_context=0, + frame_stride=1, + zero_padding=False, + out_scale=1, + out_type=dtypes.uint16): + """Audio Microfrontend Op. + + This Op converts a sequence of audio data into one or more + feature vectors containing filterbanks of the input. The + conversion process uses a lightweight library to perform: + + 1. A slicing window function + 2. Short-time FFTs + 3. Filterbank calculations + 4. Noise reduction + 5. PCAN Auto Gain Control + 6. Logarithmic scaling + + Args: + audio: 1D Tensor, int16 audio data in temporal ordering. + sample_rate: Integer, the sample rate of the audio in Hz. + window_size: Integer, length of desired time frames in ms. + window_step: Integer, length of step size for the next frame in ms. + num_channels: Integer, the number of filterbank channels to use. + upper_band_limit: Float, the highest frequency included in the filterbanks. + lower_band_limit: Float, the lowest frequency included in the filterbanks. + smoothing_bits: Int, scale up signal by 2^(smoothing_bits) before reduction. + even_smoothing: Float, smoothing coefficient for even-numbered channels. + odd_smoothing: Float, smoothing coefficient for odd-numbered channels. + min_signal_remaining: Float, fraction of signal to preserve in smoothing. + enable_pcan: Bool, enable PCAN auto gain control. + pcan_strength: Float, gain normalization exponent. + pcan_offset: Float, positive value added in the normalization denominator. + gain_bits: Int, number of fractional bits in the gain. + enable_log: Bool, enable logarithmic scaling of filterbanks. + scale_shift: Integer, scale filterbanks by 2^(scale_shift). + left_context: Integer, number of preceding frames to attach to each frame. + right_context: Integer, number of preceding frames to attach to each frame. + frame_stride: Integer, M frames to skip over, where output[n] = frame[n*M]. + zero_padding: Bool, if left/right context is out-of-bounds, attach frame of + zeroes. Otherwise, frame[0] or frame[size-1] will be copied. + out_scale: Integer, divide all filterbanks by this number. + out_type: DType, type of the output Tensor, defaults to UINT16. + + Returns: + filterbanks: 2D Tensor, each row is a time frame, each column is a channel. + + Raises: + ValueError: If the audio tensor is not explicitly a vector. + """ + audio_shape = audio.shape + if audio_shape.ndims is None: + raise ValueError("Input to `AudioMicrofrontend` should have known rank.") + if len(audio_shape) > 1: + audio = array_ops.reshape(audio, [-1]) + + return gen_audio_microfrontend_op.audio_microfrontend( + audio, sample_rate, window_size, window_step, num_channels, + upper_band_limit, lower_band_limit, smoothing_bits, even_smoothing, + odd_smoothing, min_signal_remaining, enable_pcan, pcan_strength, + pcan_offset, gain_bits, enable_log, scale_shift, left_context, + right_context, frame_stride, zero_padding, out_scale, out_type) + + +ops.NotDifferentiable("AudioMicrofrontend") diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43bb3d99ee43e22278c17bc109696e927bb48392 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4232dffa1ae2e914bb39310d781b183d785581a9 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/__pycache__/model_runtime_info_pb2.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/__pycache__/model_runtime_info_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fe3cacc894415fbdee7057dd9b8266cb1b03fb7 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/__pycache__/model_runtime_info_pb2.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/__pycache__/profiling_info_pb2.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/__pycache__/profiling_info_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9fc9761cbb6e12bd6b0895f3e05421423866f21 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/__pycache__/profiling_info_pb2.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/model_runtime_info_pb2.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/model_runtime_info_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..e45f0566bf4efecf7a1991741ed49fc5d9a12915 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/model_runtime_info_pb2.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorflow/lite/profiling/proto/model_runtime_info.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from tensorflow.lite.profiling.proto import profiling_info_pb2 as tensorflow_dot_lite_dot_profiling_dot_proto_dot_profiling__info__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n8tensorflow/lite/profiling/proto/model_runtime_info.proto\x12\x10tflite.profiling\x1a\x34tensorflow/lite/profiling/proto/profiling_info.proto\"_\n\x13ModelRuntimeDetails\x12\x12\n\nmodel_name\x18\x01 \x01(\t\x12\x34\n\tsubgraphs\x18\x02 \x03(\x0b\x32!.tflite.profiling.RuntimeSubgraph\"\xa9\x02\n\x0fRuntimeSubgraph\x12\x13\n\x0bsubgraph_id\x18\x01 \x01(\x05\x12%\n\x05\x65\x64ges\x18\x02 \x03(\x0b\x32\x16.tflite.profiling.Edge\x12%\n\x05nodes\x18\x03 \x03(\x0b\x32\x16.tflite.profiling.Node\x12\x1a\n\x0e\x65xecution_plan\x18\x04 \x03(\x05\x42\x02\x10\x01\x12\x45\n\rsubgraph_type\x18\x05 \x01(\x0e\x32..tflite.profiling.RuntimeSubgraph.SubgraphType\"P\n\x0cSubgraphType\x12\x14\n\x10UNKNOWN_SUBGRAPH\x10\x00\x12\x13\n\x0fTFLITE_SUBGRAPH\x10\x01\x12\x15\n\x11\x44\x45LEGATE_SUBGRAPH\x10\x02\"\xba\x02\n\x04Node\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0c\n\x04type\x18\x03 \x01(\t\x12\x12\n\x06inputs\x18\x04 \x03(\x05\x42\x02\x10\x01\x12\x13\n\x07outputs\x18\x05 \x03(\x05\x42\x02\x10\x01\x12\x19\n\rintermediates\x18\x06 \x03(\x05\x42\x02\x10\x01\x12\x17\n\x0btemporaries\x18\x07 \x03(\x05\x42\x02\x10\x01\x12\x38\n\x0fop_profile_data\x18\n \x01(\x0b\x32\x1f.tflite.profiling.OpProfileData\x12\x46\n\x15\x64\x65legate_node_details\x18\x08 \x01(\x0b\x32%.tflite.profiling.DelegateNodeDetailsH\x00\x12\x1e\n\x14\x64\x65legated_to_node_id\x18\t \x01(\x05H\x00\x42\x0b\n\tnode_info\"R\n\x13\x44\x65legateNodeDetails\x12\x15\n\rdelegate_name\x18\x01 \x01(\t\x12$\n\x18tflite_node_ids_replaced\x18\x02 \x03(\x05\x42\x02\x10\x01\"\x81\x05\n\x04\x45\x64ge\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x32\n\tdata_type\x18\x03 \x01(\x0e\x32\x1f.tflite.profiling.Edge.DataType\x12\x11\n\x05shape\x18\x04 \x03(\x05\x42\x02\x10\x01\x12\x17\n\x0f\x61llocation_type\x18\x05 \x01(\t\x12\x36\n\x0blayout_type\x18\x06 \x01(\x0e\x32!.tflite.profiling.Edge.LayoutType\x12\x0c\n\x04size\x18\x07 \x01(\x05\"\x85\x02\n\x08\x44\x61taType\x12\x10\n\x0cUNKNOWN_TYPE\x10\x00\x12\x0b\n\x07\x46LOAT32\x10\x01\x12\t\n\x05INT32\x10\x02\x12\t\n\x05UINT8\x10\x03\x12\t\n\x05INT64\x10\x04\x12\n\n\x06STRING\x10\x05\x12\x08\n\x04\x42OOL\x10\x06\x12\t\n\x05INT16\x10\x07\x12\r\n\tCOMPLEX64\x10\x08\x12\x08\n\x04INT8\x10\t\x12\x0b\n\x07\x46LOAT16\x10\n\x12\x0b\n\x07\x46LOAT64\x10\x0b\x12\x0e\n\nCOMPLEX128\x10\x0c\x12\n\n\x06UINT64\x10\r\x12\x0c\n\x08RESOURCE\x10\x0e\x12\x0b\n\x07VARIANT\x10\x0f\x12\n\n\x06UINT32\x10\x10\x12\n\n\x06UINT16\x10\x11\x12\x08\n\x04INT4\x10\x12\x12\x0c\n\x08\x42\x46LOAT16\x10\x13\"\xb0\x01\n\nLayoutType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06SCALAR\x10\x01\x12\n\n\x06LINEAR\x10\x02\x12\x06\n\x02HW\x10\x03\x12\x07\n\x03\x43HW\x10\x04\x12\x07\n\x03HWC\x10\x05\x12\x08\n\x04OIHW\x10\x06\x12\x08\n\x04OHWI\x10\x07\x12\x08\n\x04IHWO\x10\x08\x12\x08\n\x04IOHW\x10\t\x12\x08\n\x04\x42HWC\x10\n\x12\x08\n\x04HWDC\x10\x0b\x12\t\n\x05\x42HWDC\x10\x0c\x12\x07\n\x03HWD\x10\r\x12\t\n\x05OHWDI\x10\x0e\x12\x08\n\x04HWIO\x10\x0f\x42\x02P\x01') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.lite.profiling.proto.model_runtime_info_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'P\001' + _RUNTIMESUBGRAPH.fields_by_name['execution_plan']._options = None + _RUNTIMESUBGRAPH.fields_by_name['execution_plan']._serialized_options = b'\020\001' + _NODE.fields_by_name['inputs']._options = None + _NODE.fields_by_name['inputs']._serialized_options = b'\020\001' + _NODE.fields_by_name['outputs']._options = None + _NODE.fields_by_name['outputs']._serialized_options = b'\020\001' + _NODE.fields_by_name['intermediates']._options = None + _NODE.fields_by_name['intermediates']._serialized_options = b'\020\001' + _NODE.fields_by_name['temporaries']._options = None + _NODE.fields_by_name['temporaries']._serialized_options = b'\020\001' + _DELEGATENODEDETAILS.fields_by_name['tflite_node_ids_replaced']._options = None + _DELEGATENODEDETAILS.fields_by_name['tflite_node_ids_replaced']._serialized_options = b'\020\001' + _EDGE.fields_by_name['shape']._options = None + _EDGE.fields_by_name['shape']._serialized_options = b'\020\001' + _MODELRUNTIMEDETAILS._serialized_start=132 + _MODELRUNTIMEDETAILS._serialized_end=227 + _RUNTIMESUBGRAPH._serialized_start=230 + _RUNTIMESUBGRAPH._serialized_end=527 + _RUNTIMESUBGRAPH_SUBGRAPHTYPE._serialized_start=447 + _RUNTIMESUBGRAPH_SUBGRAPHTYPE._serialized_end=527 + _NODE._serialized_start=530 + _NODE._serialized_end=844 + _DELEGATENODEDETAILS._serialized_start=846 + _DELEGATENODEDETAILS._serialized_end=928 + _EDGE._serialized_start=931 + _EDGE._serialized_end=1572 + _EDGE_DATATYPE._serialized_start=1132 + _EDGE_DATATYPE._serialized_end=1393 + _EDGE_LAYOUTTYPE._serialized_start=1396 + _EDGE_LAYOUTTYPE._serialized_end=1572 +# @@protoc_insertion_point(module_scope) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/profiling_info_pb2.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/profiling_info_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..a6f9f15f054930685e62478cb2485ffd1f1b74cd --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/profiling/proto/profiling_info_pb2.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorflow/lite/profiling/proto/profiling_info.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n4tensorflow/lite/profiling/proto/profiling_info.proto\x12\x10tflite.profiling\"\xa7\x01\n\x16\x42\x65nchmarkProfilingData\x12\x12\n\nmodel_name\x18\x01 \x01(\t\x12:\n\x0cinit_profile\x18\x02 \x01(\x0b\x32$.tflite.profiling.ModelProfilingData\x12=\n\x0fruntime_profile\x18\x03 \x01(\x0b\x32$.tflite.profiling.ModelProfilingData\"\x9c\x01\n\x12ModelProfilingData\x12\x42\n\x11subgraph_profiles\x18\x01 \x03(\x0b\x32\'.tflite.profiling.SubGraphProfilingData\x12\x42\n\x11\x64\x65legate_profiles\x18\x02 \x03(\x0b\x32\'.tflite.profiling.DelegateProfilingData\"\x80\x01\n\x15SubGraphProfilingData\x12\x15\n\rsubgraph_name\x18\x01 \x01(\t\x12\x16\n\x0esubgraph_index\x18\x02 \x01(\x05\x12\x38\n\x0fper_op_profiles\x18\x03 \x03(\x0b\x32\x1f.tflite.profiling.OpProfileData\"h\n\x15\x44\x65legateProfilingData\x12\x15\n\rdelegate_name\x18\x01 \x01(\t\x12\x38\n\x0fper_op_profiles\x18\x02 \x03(\x0b\x32\x1f.tflite.profiling.OpProfileData\"\x93\x01\n\x0fOpProfilingStat\x12\r\n\x05\x66irst\x18\x01 \x01(\x03\x12\x0c\n\x04last\x18\x02 \x01(\x03\x12\x0b\n\x03\x61vg\x18\x03 \x01(\x03\x12\x0e\n\x06stddev\x18\x04 \x01(\x02\x12\x10\n\x08variance\x18\x05 \x01(\x02\x12\x0b\n\x03min\x18\x06 \x01(\x03\x12\x0b\n\x03max\x18\x07 \x01(\x03\x12\x0b\n\x03sum\x18\x08 \x01(\x03\x12\r\n\x05\x63ount\x18\t \x01(\x03\"\xcf\x01\n\rOpProfileData\x12\x11\n\tnode_type\x18\x01 \x01(\t\x12\x41\n\x16inference_microseconds\x18\x02 \x01(\x0b\x32!.tflite.profiling.OpProfilingStat\x12\x31\n\x06mem_kb\x18\x03 \x01(\x0b\x32!.tflite.profiling.OpProfilingStat\x12\x14\n\x0ctimes_called\x18\x04 \x01(\x03\x12\x0c\n\x04name\x18\x05 \x01(\t\x12\x11\n\trun_order\x18\x06 \x01(\x03\x42\x02P\x01') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.lite.profiling.proto.profiling_info_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'P\001' + _BENCHMARKPROFILINGDATA._serialized_start=75 + _BENCHMARKPROFILINGDATA._serialized_end=242 + _MODELPROFILINGDATA._serialized_start=245 + _MODELPROFILINGDATA._serialized_end=401 + _SUBGRAPHPROFILINGDATA._serialized_start=404 + _SUBGRAPHPROFILINGDATA._serialized_end=532 + _DELEGATEPROFILINGDATA._serialized_start=534 + _DELEGATEPROFILINGDATA._serialized_end=638 + _OPPROFILINGSTAT._serialized_start=641 + _OPPROFILINGSTAT._serialized_end=788 + _OPPROFILEDATA._serialized_start=791 + _OPPROFILEDATA._serialized_end=998 +# @@protoc_insertion_point(module_scope) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..405b9e059e9751036014ab78f446fbb840e11f61 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/analyzer.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/analyzer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d18ff0fce2da53883df38c6e1ef1495315391f1b Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/analyzer.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/conversion_metadata_schema_py_generated.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/conversion_metadata_schema_py_generated.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ca7f9d94bd5357114187f3f536610947efc7912 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/conversion_metadata_schema_py_generated.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/convert.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/convert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4773c1e9c427899447432f2674d919057bb8d7b Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/convert.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/convert_phase.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/convert_phase.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..337e5f4c2a83c03fe986bcafcf91b90a778d6391 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/convert_phase.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/convert_saved_model.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/convert_saved_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50308df05877148ec41ce73f1b47d5affe5436e8 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/convert_saved_model.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/interpreter.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/interpreter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6869852e8cd05a60f355bd20fd0cbc4733d6f54 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/interpreter.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/lite.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/lite.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75b78219980277a72cf07f1d2678a2f83006f0c2 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/lite.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/lite_constants.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/lite_constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8b9bae31d1b123d8a49434bd5185888b0f5bd78 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/lite_constants.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/op_hint.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/op_hint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93c7bf6b5623c1fb27219f21117976e9af0efc0f Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/op_hint.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/schema_py_generated.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/schema_py_generated.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91a58f686d80424338ba6b182618177904c894c4 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/schema_py_generated.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/schema_util.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/schema_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..208a0c1c36e6b5947dd5db35b4e55020afa6deb0 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/schema_util.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/tflite_convert.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/tflite_convert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1947d35c33ec46651f73d1181e1d5e345e060b0 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/tflite_convert.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/tflite_keras_util.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/tflite_keras_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c869f1df9c7a10a2419f5a7a1d187e6ff4ae401 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/tflite_keras_util.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/util.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e26e85c5b1862964ebe0ced0745c6da8db66123 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/__pycache__/util.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c25b6d83c414b56692d3959ee76db124fe8d546 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/_pywrap_analyzer_wrapper.pyi b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/_pywrap_analyzer_wrapper.pyi new file mode 100644 index 0000000000000000000000000000000000000000..0181580f660c3cbf48ec64de0449d42161f0bf92 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/_pywrap_analyzer_wrapper.pyi @@ -0,0 +1,16 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +def ModelAnalyzer(arg0: str, arg1: bool, arg2: bool) -> str: ... diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/_pywrap_analyzer_wrapper.so b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/_pywrap_analyzer_wrapper.so new file mode 100644 index 0000000000000000000000000000000000000000..fed471c38886f9f8c933e9de49ec00fed44abf00 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer_wrapper/_pywrap_analyzer_wrapper.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad6e053502d3f80aa92e5b1fb1c61236a2cb3c34c1f500e3ccce89d112362d1b +size 2573672 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/authoring/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/authoring/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/authoring/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/authoring/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0dbfeead7e51c355300ff595a8fac44bb5e38f58 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/authoring/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/authoring/__pycache__/authoring.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/authoring/__pycache__/authoring.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9e4c8791a6ac7144c2dd9ae647175053948f939 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/authoring/__pycache__/authoring.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/authoring/authoring.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/authoring/authoring.py new file mode 100644 index 0000000000000000000000000000000000000000..9dcd3ee3b0a1cba8f4a7d9f604af7c2c54a9a30e --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/authoring/authoring.py @@ -0,0 +1,301 @@ +# Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""TensorFlow Authoring tool package for TFLite compatibility. + +WARNING: The package is experimental and subject to change. + +This package provides a way to check TFLite compatibility at model authoring +time. + +Example: + @tf.lite.experimental.authoring.compatible + @tf.function(input_signature=[ + tf.TensorSpec(shape=[None], dtype=tf.float32) + ]) + def f(x): + return tf.cosh(x) + + result = f(tf.constant([0.0])) + + > COMPATIBILITY WARNING: op 'tf.Cosh' require(s) "Select TF Ops" for model + > conversion for TensorFlow Lite. + > Op: tf.Cosh + > - tensorflow/python/framework/op_def_library.py:xxx + > - tensorflow/python/ops/gen_math_ops.py:xxx + > - simple_authoring.py:xxx +""" +import functools +from tensorflow.compiler.mlir.lite.metrics import converter_error_data_pb2 +# pylint: disable=g-import-not-at-top +from tensorflow.lite.python import convert +from tensorflow.lite.python import lite +from tensorflow.python.util.tf_export import tf_export as _tf_export + + +_CUSTOM_OPS_HDR = "Custom ops: " +_TF_OPS_HDR = "TF Select ops: " +_AUTHORING_ERROR_HDR = "COMPATIBILITY ERROR" +_AUTHORING_WARNING_HDR = "COMPATIBILITY WARNING" +_FUNC_GRAPH_SRC_PATH = "tensorflow/python/framework/func_graph.py" + + +class CompatibilityError(Exception): + """Raised when an error occurs with TFLite compatibility.""" + pass + + +class _Compatible: + """A decorator class to check TFLite compatibility created by `lite.experimental.authoring.compatible`.""" + + def __init__(self, + target, + converter_target_spec=None, + converter_allow_custom_ops=None, + raise_exception=False): + """Initialize the decorator object. + + Here is the description of the object variables. + - _func : decorated function. + - _obj_func : for class object, we need to use this object to provide `self` + instance as 1 first argument. + - _verified : whether the compatibility is checked or not. + + Args: + target: decorated function. + converter_target_spec : target_spec of TFLite converter parameter. + converter_allow_custom_ops : allow_custom_ops of TFLite converter + parameter. + raise_exception : to raise an exception on compatibility issues. + User need to use get_compatibility_log() to check details. + """ + functools.update_wrapper(self, target) + self._func = target + self._obj_func = None + self._verified = False + self._log_messages = [] + self._raise_exception = raise_exception + self._converter_target_spec = converter_target_spec + self._converter_allow_custom_ops = converter_allow_custom_ops + + def __get__(self, instance, cls): + """A Python descriptor interface.""" + self._obj_func = self._func.__get__(instance, cls) + return self + + def _get_func(self): + """Returns decorated function object. + + For a class method, use self._obj_func to provide `self` instance. + """ + if self._obj_func is not None: + return self._obj_func + else: + return self._func + + def __call__(self, *args, **kwargs): # pylint: disable=g-doc-args + """Calls decorated function object. + + Also verifies if the function is compatible with TFLite. + + Returns: + A execution result of the decorated function. + """ + + if not self._verified: + model = self._get_func() + concrete_func = model.get_concrete_function(*args, **kwargs) + converter = lite.TFLiteConverterV2.from_concrete_functions( + [concrete_func], model) + # Set provided converter parameters + if self._converter_target_spec is not None: + converter.target_spec = self._converter_target_spec + if self._converter_allow_custom_ops is not None: + converter.allow_custom_ops = self._converter_allow_custom_ops + try: + converter.convert() + except convert.ConverterError as err: + self._decode_error(err) + finally: + self._verified = True + + return self._get_func()(*args, **kwargs) + + def get_concrete_function(self, *args, **kwargs): + """Returns a concrete function of the decorated function.""" + return self._get_func().get_concrete_function(*args, **kwargs) + + def _get_location_string(self, location): + """Dump location of ConveterError.errors.location.""" + callstack = [] + for single_call in reversed(location.call): + if (location.type == + converter_error_data_pb2.ConverterErrorData.CALLSITELOC): + callstack.append( + f" - {single_call.source.filename}:{single_call.source.line}") + else: + callstack.append(str(single_call)) + callstack_dump = "\n".join(callstack) + return callstack_dump + + def _dump_error_details(self, ops, locations): + """Dump the list of ops and locations.""" + for i in range(0, len(ops)): + callstack_dump = self._get_location_string(locations[i]) + err_string = f"Op: {ops[i]}\n{callstack_dump}\n" + self._log(err_string) + + def _decode_error_legacy(self, err): + """Parses the given legacy ConverterError for OSS.""" + for line in str(err).splitlines(): + # Check custom op usage error. + if line.startswith(_CUSTOM_OPS_HDR): + custom_ops = line[len(_CUSTOM_OPS_HDR):] + err_string = ( + f"{_AUTHORING_ERROR_HDR}: op '{custom_ops}' is(are) not natively " + "supported by TensorFlow Lite. You need to provide a custom " + "operator. https://www.tensorflow.org/lite/guide/ops_custom") + self._log(err_string) + # Check TensorFlow op usage error. + elif line.startswith(_TF_OPS_HDR): + tf_ops = line[len(_TF_OPS_HDR):] + err_string = ( + f"{_AUTHORING_WARNING_HDR}: op '{tf_ops}' require(s) \"Select TF " + "Ops\" for model conversion for TensorFlow Lite. " + "https://www.tensorflow.org/lite/guide/ops_select") + self._log(err_string) + + def _decode_converter_error(self, err): + """Parses the given ConverterError which has detailed error information.""" + custom_ops = [] + custom_ops_location = [] + tf_ops = [] + tf_ops_location = [] + gpu_not_compatible_ops = [] + for err in err.errors: + # Check custom op usage error. + if err.error_code == converter_error_data_pb2.ConverterErrorData.ERROR_NEEDS_CUSTOM_OPS: + custom_ops.append(err.operator.name) + custom_ops_location.append(err.location) + # Check TensorFlow op usage error. + elif err.error_code == converter_error_data_pb2.ConverterErrorData.ERROR_NEEDS_FLEX_OPS: + tf_ops.append(err.operator.name) + tf_ops_location.append(err.location) + # Check GPU delegate compatibility error. + elif err.error_code == converter_error_data_pb2.ConverterErrorData.ERROR_GPU_NOT_COMPATIBLE: + gpu_not_compatible_ops.append(err.operator.name) + # Log the first line of ConveterError.errors.error_message only + # since the seond line is "Error code: xxxx" + self._log(err.error_message.splitlines()[0]) + self._log(self._get_location_string(err.location) + "\n") + else: + # Log other errors. + self._log(f"{_AUTHORING_ERROR_HDR}: {err.error_message}") + self._log(self._get_location_string(err.location) + "\n") + + if custom_ops: + custom_ops_str = ", ".join(sorted(custom_ops)) + err_string = ( + f"{_AUTHORING_ERROR_HDR}: op '{custom_ops_str}' is(are) not natively " + "supported by TensorFlow Lite. You need to provide a custom " + "operator. https://www.tensorflow.org/lite/guide/ops_custom") + self._log(err_string) + self._dump_error_details(custom_ops, custom_ops_location) + + if tf_ops: + tf_ops_str = ", ".join(sorted(tf_ops)) + err_string = ( + f"{_AUTHORING_WARNING_HDR}: op '{tf_ops_str}' require(s) \"Select TF" + " Ops\" for model conversion for TensorFlow Lite. " + "https://www.tensorflow.org/lite/guide/ops_select") + self._log(err_string) + self._dump_error_details(tf_ops, tf_ops_location) + + if gpu_not_compatible_ops: + not_compatible_ops_str = ", ".join(sorted(gpu_not_compatible_ops)) + err_string = ( + f"{_AUTHORING_WARNING_HDR}: op '{not_compatible_ops_str}' aren't " + "compatible with TensorFlow Lite GPU delegate. " + "https://www.tensorflow.org/lite/performance/gpu") + self._log(err_string) + + def _decode_error(self, err): + """Parses the given ConverterError and generates compatibility warnings.""" + if hasattr(err, "errors"): + self._decode_converter_error(err) + else: + self._decode_error_legacy(err) + + if self._raise_exception and self._log_messages: + raise CompatibilityError(f"CompatibilityException at {repr(self._func)}") + + def _log(self, message): + """Log and print authoring warning / error message.""" + self._log_messages.append(message) + print(message) + + def get_compatibility_log(self): + """Returns list of compatibility log messages. + + WARNING: This method should only be used for unit tests. + + Returns: + The list of log messages by the recent compatibility check. + Raises: + RuntimeError: when the compatibility was NOT checked. + """ + if not self._verified: + raise RuntimeError("target compatibility isn't verified yet") + return self._log_messages + + +@_tf_export("lite.experimental.authoring.compatible") +def compatible(target=None, converter_target_spec=None, **kwargs): + """Wraps `tf.function` into a callable function with TFLite compatibility checking. + + Example: + + ```python + @tf.lite.experimental.authoring.compatible + @tf.function(input_signature=[ + tf.TensorSpec(shape=[None], dtype=tf.float32) + ]) + def f(x): + return tf.cosh(x) + + result = f(tf.constant([0.0])) + # COMPATIBILITY WARNING: op 'tf.Cosh' require(s) "Select TF Ops" for model + # conversion for TensorFlow Lite. + # Op: tf.Cosh + # - tensorflow/python/framework/op_def_library.py:748 + # - tensorflow/python/ops/gen_math_ops.py:2458 + # - :6 + ``` + + WARNING: Experimental interface, subject to change. + + Args: + target: A `tf.function` to decorate. + converter_target_spec : target_spec of TFLite converter parameter. + **kwargs: The keyword arguments of the decorator class _Compatible. + + Returns: + A callable object of `tf.lite.experimental.authoring._Compatible`. + """ + if target is None: + def wrapper(target): + return _Compatible(target, converter_target_spec, **kwargs) + return wrapper + else: + return _Compatible(target, converter_target_spec, **kwargs) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/convert.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/convert.py new file mode 100644 index 0000000000000000000000000000000000000000..f4206cb68c932d402c8a54fd5381bbff5e9ba1dd --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/convert.py @@ -0,0 +1,1250 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Converts a frozen graph into a TFLite FlatBuffer.""" + +import distutils.spawn +import enum +import hashlib +import os as _os +import platform as _platform +import subprocess as _subprocess +import tempfile as _tempfile +from typing import Optional +import warnings + +from tensorflow.compiler.mlir.lite import converter_flags_pb2 as _conversion_flags_pb2 +from tensorflow.compiler.mlir.lite import model_flags_pb2 as _model_flags_pb2 +from tensorflow.compiler.mlir.lite import types_pb2 as _types_pb2 +from tensorflow.compiler.mlir.lite.metrics import converter_error_data_pb2 +from tensorflow.compiler.mlir.lite.python import wrap_converter +from tensorflow.compiler.mlir.quantization.stablehlo import quantization_config_pb2 +from tensorflow.compiler.mlir.quantization.stablehlo import quantization_options_pb2 as quant_opts_pb2 +from tensorflow.lite.python import lite_constants +from tensorflow.lite.python import util +from tensorflow.lite.python.convert_phase import Component +from tensorflow.lite.python.convert_phase import convert_phase +from tensorflow.lite.python.convert_phase import ConverterError +from tensorflow.lite.python.convert_phase import SubComponent +from tensorflow.lite.python.metrics.wrapper import metrics_wrapper as _metrics_wrapper +from tensorflow.lite.tools import flatbuffer_utils +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import tensor_shape +from tensorflow.python.platform import resource_loader as _resource_loader +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export as _tf_export + + +def _is_quantized_input_stats_required( + conversion_flags: _conversion_flags_pb2.ConverterFlags, +) -> bool: + """Checks if the `quantized_input_stats` flag is required for conversion. + + Args: + conversion_flags: A protocol buffer describing the conversion process. + + Returns: + True, if the `inference_type` or the `inference_input_type` is a quantized + type and it is not post training quantization, else False. + """ + quantized_inference_types = [ + _types_pb2.QUANTIZED_UINT8, + _types_pb2.QUANTIZED_INT8, + ] + return ( + conversion_flags.inference_type in quantized_inference_types + or conversion_flags.inference_input_type in quantized_inference_types + ) and not conversion_flags.post_training_quantize + + +def convert_tensor_tf_type_to_tflite_type( + tf_type: dtypes.DType, usage: str = "" +) -> _types_pb2.IODataType: + """Convert tensor type from tf type to tflite type. + + Args: + tf_type: TensorFlow type. + usage: Text describing the reason for invoking this function. + + Raises: + ValueError: If `tf_type` is unsupported. + + Returns: + tflite_type: TFLite type. Refer to compiler/mlir/lite/types.proto. + """ + mapping = { + dtypes.float16: _types_pb2.FLOAT16, + dtypes.float32: _types_pb2.FLOAT, + dtypes.float64: _types_pb2.FLOAT64, + dtypes.int8: _types_pb2.INT8, + dtypes.int16: _types_pb2.INT16, + dtypes.uint16: _types_pb2.UINT16, + dtypes.int32: _types_pb2.INT32, + dtypes.int64: _types_pb2.INT64, + dtypes.uint8: _types_pb2.UINT8, + dtypes.uint32: _types_pb2.UINT32, + dtypes.uint64: _types_pb2.UINT64, + dtypes.string: _types_pb2.STRING, + dtypes.bool: _types_pb2.BOOL, + dtypes.complex64: _types_pb2.COMPLEX64, + dtypes.complex128: _types_pb2.COMPLEX128, + } + tflite_type = mapping.get(tf_type) + if tflite_type is None: + raise ValueError( + "Unsupported TensorFlow type `{0}` provided for the {1}".format( + tf_type, usage + ) + ) + return tflite_type + + +# Only a few restricted tensor types are allowed for explicitly setting +# inference/input/output types. +def convert_inference_tf_type_to_tflite_type( + tf_type: dtypes.DType, usage: str = "" +) -> _types_pb2.IODataType: + """Convert inference type from tf type to tflite type. + + Args: + tf_type: TensorFlow type. + usage: Text describing the reason for invoking this function. + + Raises: + ValueError: If `tf_type` is unsupported. + + Returns: + tflite_type: TFLite type. Refer to compiler/mlir/lite/types.proto. + """ + mapping = { + dtypes.float32: _types_pb2.FLOAT, + dtypes.uint8: _types_pb2.QUANTIZED_UINT8, + dtypes.int8: _types_pb2.QUANTIZED_INT8, + dtypes.int16: _types_pb2.QUANTIZED_INT16, + } + tflite_type = mapping.get(tf_type) + if tflite_type is None: + raise ValueError( + "Unsupported TensorFlow type `{0}` provided for the {1}".format( + tf_type, usage + ) + ) + return tflite_type + + +# Find the deprecated conversion binary using the resource loader if using from +# bazel, otherwise we are in a pip where console_scripts already has the tool. +if lite_constants.EXPERIMENTAL_USE_TOCO_API_DIRECTLY: + _deprecated_conversion_binary = "" +else: + _deprecated_conversion_binary = _resource_loader.get_path_to_datafile( + "../toco/python/toco_from_protos" + ) + if not _os.path.exists(_deprecated_conversion_binary): + _deprecated_conversion_binary = "toco_from_protos" + + +def _try_convert_to_unicode(output): + if output is None: + return "" + + if isinstance(output, bytes): + try: + return output.decode("utf-8") + except UnicodeDecodeError: + pass + return output + + +@_tf_export("lite.OpsSet") +class OpsSet(enum.Enum): + """Enum class defining the sets of ops available to generate TFLite models. + + WARNING: Experimental interface, subject to change. + """ + + # Convert model using TensorFlow Lite builtin ops. + TFLITE_BUILTINS = "TFLITE_BUILTINS" + + # Convert model using TensorFlow ops. Not all TensorFlow ops are available. + # WARNING: Experimental interface, subject to change. + SELECT_TF_OPS = "SELECT_TF_OPS" + + # Convert model using only TensorFlow Lite quantized int8 operations. + # Specifying this will throw an error for operations that do not yet have + # quantized implementations. + TFLITE_BUILTINS_INT8 = "TFLITE_BUILTINS_INT8" + + # Convert model using only TensorFlow Lite operations with quantized int8 + # weights, int16 activations and int64 bias. + # Specifying this will throw an error for operations that do not yet have + # quantized implementations. + # This quantization mode may be used in models for super-resolution, + # audio signal processing or image de-noising. It improves accuracy + # significantly, but only slightly increases the model size. + # WARNING: These ops are currently experimental and have not yet been + # finalized. + # They are only compatible with CPU execution, and have not been optimized for + # production. + EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 = ( + "EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8" + ) + + # Convert model using only stablehlo ops. + # This option can not be combined with other OpsSets. + # The feature is in early development. + # The code to execute StableHLO ops in the runtime is to be implemented + # and the serialization format is not stabilized yet. + EXPERIMENTAL_STABLEHLO_OPS = "EXPERIMENTAL_STABLEHLO_OPS" + + def __str__(self): + return str(self.value) + + @staticmethod + def get_options(): + """Returns a list of OpsSet options as a list of strings.""" + return [str(option) for option in list(OpsSet)] + + +@convert_phase(Component.OPTIMIZE_TFLITE_MODEL, SubComponent.QUANTIZE) +def mlir_quantize( + input_data_str, + disable_per_channel=False, + fully_quantize=False, + inference_type=_types_pb2.QUANTIZED_INT8, + input_data_type=dtypes.float32, + output_data_type=dtypes.float32, + enable_numeric_verify=False, + enable_whole_model_verify=False, + denylisted_ops=None, + denylisted_nodes=None, + enable_variable_quantization=False, + disable_per_channel_for_dense_layers=False, + debug_options_str="", +): + """Quantize `input_data_str` with calibration results. + + Args: + input_data_str: Input data in serialized form (e.g. a TFLITE model with + calibration results). + disable_per_channel: Bool indicating whether to do per-channel or per-tensor + quantization + fully_quantize: Bool indicating whether to fully quantize the model. Besides + model body, the input/output will be quantized as well. + inference_type: Data type for the activations. The default value is int8. + input_data_type: Data type for the inputs. The default value is float32. + output_data_type: Data type for the outputs. The default value is float32. + enable_numeric_verify: Experimental. Subject to change. Bool indicating + whether to add NumericVerify ops into the debug mode quantized model. + enable_whole_model_verify: Experimental. Subject to change. Bool indicating + whether to add verification for layer by layer, or on whole model. When + disabled (per-layer) float and quantized ops will be run from same input + (output of previous quantized layer). When enabled, float and quantized + ops will run with respective float and quantized output of previous ops. + denylisted_ops: Experimental. Subject to change. Set of ops to denylist. + denylisted_nodes: Experimental. Subject to change. Set of notes to denylist. + enable_variable_quantization: Experimental. Subject to change. Bool + indicating whether to enable quantization of the residual variables + remaining after the variable freezing pass. + disable_per_channel_for_dense_layers: Bool indicating whether to do + per-channel or per-tensor quantization in Fully Connected layers. Default + value is False meaning per-channel quantization is enabled. + debug_options_str: Serialized proto describing TFLite converter debug + options, see `debug/debug_options.proto`. + + Returns: + Quantized model in serialized form (e.g. a TFLITE model) with floating-point + inputs and outputs. + """ + return wrap_converter.wrapped_experimental_mlir_quantize( + input_data_str, + disable_per_channel, + fully_quantize, + inference_type, + convert_tensor_tf_type_to_tflite_type(input_data_type), + convert_tensor_tf_type_to_tflite_type(output_data_type), + enable_numeric_verify, + enable_whole_model_verify, + denylisted_ops, + denylisted_nodes, + enable_variable_quantization, + disable_per_channel_for_dense_layers, + debug_options_str, + ) + + +@convert_phase(Component.OPTIMIZE_TFLITE_MODEL, SubComponent.SPARSIFY) +def mlir_sparsify(input_data_str): + """Sparsify `input_data_str` to encode sparse tensor with proper format. + + Args: + input_data_str: Input data in serialized form (e.g. a TFLITE model). + + Returns: + Sparsified model in serialized form (e.g. a TFLITE model). + """ + return wrap_converter.wrapped_experimental_mlir_sparsify(input_data_str) + + +def register_custom_opdefs(custom_opdefs_list): + """Register the given custom opdefs to the TensorFlow global op registry. + + Args: + custom_opdefs_list: String representing the custom ops OpDefs that are + included in the GraphDef. + + Returns: + True if the registration is successfully completed. + """ + return wrap_converter.wrapped_register_custom_opdefs(custom_opdefs_list) + + +def convert( + model_flags: _model_flags_pb2.ModelFlags, + conversion_flags: _conversion_flags_pb2.ConverterFlags, + input_data_str: Optional[str] = None, + debug_info_str: Optional[str] = None, + enable_mlir_converter: bool = True, +): + """Converts `input_data_str` to a TFLite model. + + Args: + model_flags: Proto describing model properties, see `model_flags.proto`. + conversion_flags: Proto describing conversion properties, see + `compiler/mlir/lite/converter_flags.proto`. + input_data_str: Input data in serialized form (e.g. a graphdef is common, or + it can be hlo text or proto) + debug_info_str: Serialized `GraphDebugInfo` proto describing logging + information. + enable_mlir_converter: Enables MLIR-based conversion. + + Returns: + Converted model in serialized form (e.g. a TFLITE model is common). + Raises: + ConverterError: When conversion fails in TFLiteConverter, usually due to + ops not being supported. + RuntimeError: When conversion fails, an exception is raised with the error + message embedded. + """ + # Historically, deprecated conversion failures would trigger a crash, so we + # attempt to run the converter out-of-process. The current MLIR conversion + # pipeline surfaces errors instead, and can be safely run in-process. + if enable_mlir_converter or not _deprecated_conversion_binary: + try: + return wrap_converter.wrapped_convert( + model_flags.SerializeToString(), + conversion_flags.SerializeToString(), + input_data_str, + debug_info_str, + enable_mlir_converter, + ) + except Exception as e: + converter_error = ConverterError(str(e)) + + for error_data in _metrics_wrapper.retrieve_collected_errors(): + converter_error.append_error(error_data) + # Seldom we encounter the case where an unsupported + # `StatefulPartitionedCallOp` is not inlined and remains in the final + # IR. If this occurs we can set `guarantee_all_funcs_one_use` and retry. + # This makes the converter copy functions definitions called by + # multiple StatefulPartitionedCall, thus allowing them to be properly + # inlined. + if ( + error_data.error_code + == converter_error_data_pb2.ConverterErrorData.ERROR_STATEFUL_PARTITIONED_CALL_IN_FINAL_IR + and not conversion_flags.guarantee_all_funcs_one_use + ): + conversion_flags.guarantee_all_funcs_one_use = True + return convert( + model_flags, + conversion_flags, + input_data_str, + debug_info_str, + enable_mlir_converter, + ) + raise converter_error + + return _run_deprecated_conversion_binary( + model_flags.SerializeToString(), + conversion_flags.SerializeToString(), + input_data_str, + debug_info_str, + ) + + +@convert_phase( + Component.CONVERT_TF_TO_TFLITE_MODEL, + SubComponent.CONVERT_GRAPHDEF_USING_DEPRECATED_CONVERTER, +) +def _run_deprecated_conversion_binary( + model_flags_str, conversion_flags_str, input_data_str, debug_info_str=None +): + """Convert `input_data_str` using deprecated conversion binary. + + Args: + model_flags_str: Serialized proto describing model properties, see + `model_flags.proto`. + conversion_flags_str: Serialized proto describing TFLite converter + properties, see `compiler/mlir/lite/converter_flags.proto`. + input_data_str: Input data in serialized form (e.g. a graphdef is common) + debug_info_str: Serialized `GraphDebugInfo` proto describing logging + information. (default None) + + Returns: + Converted model in serialized form (e.g. a TFLITE model is common). + Raises: + ConverterError: When cannot find the deprecated conversion binary. + RuntimeError: When conversion fails, an exception is raised with the error + message embedded. + """ + if distutils.spawn.find_executable(_deprecated_conversion_binary) is None: + raise ConverterError("""Could not find `toco_from_protos` binary, make sure +your virtualenv bin directory or pip local bin directory is in your path. +In particular, if you have installed TensorFlow with --user, make sure you +add the install directory to your path. + +For example: +Linux: export PATH=$PATH:~/.local/bin/ +Mac: export PATH=$PATH:~/Library/Python//bin + +Alternative, use virtualenv.""") + # Windows and TemporaryFile are not that useful together, + # since you cannot have two readers/writers. So we have to + # make the temporaries and close and delete them explicitly. + conversion_filename: str = None + model_filename: str = None + input_filename: str = None + output_filename: str = None + try: + # Build all input files + with ( + _tempfile.NamedTemporaryFile(delete=False) as fp_conversion, + _tempfile.NamedTemporaryFile(delete=False) as fp_model, + _tempfile.NamedTemporaryFile(delete=False) as fp_input, + _tempfile.NamedTemporaryFile(delete=False) as fp_debug, + ): + conversion_filename = fp_conversion.name + input_filename = fp_input.name + model_filename = fp_model.name + debug_filename = fp_debug.name + + fp_model.write(model_flags_str) + fp_conversion.write(conversion_flags_str) + fp_input.write(input_data_str) + debug_info_str = debug_info_str if debug_info_str else "" + # if debug_info_str contains a "string value", then the call to + # fp_debug.write(debug_info_str) will fail with the following error + # + # TypeError: a bytes-like object is required, not 'str' + # + # Some of the subtests within the "convert_test" unit-test fail + # with the error shown above. So watch out for that scenario and + # convert debug_info_str to bytes where needed + if not isinstance(debug_info_str, bytes): + fp_debug.write(debug_info_str.encode("utf-8")) + else: + fp_debug.write(debug_info_str) + + # Reserve an output file + with _tempfile.NamedTemporaryFile(delete=False) as fp: + output_filename = fp.name + + # Run + cmd = [ + _deprecated_conversion_binary, + model_filename, + conversion_filename, + input_filename, + output_filename, + "--debug_proto_file={}".format(debug_filename), + ] + cmdline = " ".join(cmd) + is_windows = _platform.system() == "Windows" + proc = _subprocess.Popen( + cmdline, + shell=True, + stdout=_subprocess.PIPE, + stderr=_subprocess.STDOUT, + close_fds=not is_windows, + ) + stdout, stderr = proc.communicate() + exitcode = proc.returncode + if exitcode == 0: + with open(output_filename, "rb") as fp: + return fp.read() + else: + stdout = _try_convert_to_unicode(stdout) + stderr = _try_convert_to_unicode(stderr) + raise ConverterError("See console for info.\n%s\n%s\n" % (stdout, stderr)) + finally: + # Must manually cleanup files. + for filename in [ + conversion_filename, + input_filename, + model_filename, + output_filename, + ]: + try: + _os.unlink(filename) + except (OSError, TypeError): + pass + + +def build_model_flags( + change_concat_input_ranges=False, + allow_nonexistent_arrays=False, + saved_model_dir=None, + saved_model_version=0, + saved_model_tags=None, + saved_model_exported_names=None, + **_, +): + """Builds the model flags object from params. + + Args: + change_concat_input_ranges: Boolean to change behavior of min/max ranges for + inputs and outputs of the concat operator for quantized models. Changes + the ranges of concat operator overlap when true. (default False) + allow_nonexistent_arrays: Allow specifying array names that don't exist or + are unused in the final graph. (default False) + saved_model_dir: Filepath of the saved model to be converted. This value + will be non-empty only when the saved model import path will be used. + Otherwises, the graph def-based conversion will be processed. + saved_model_version: SavedModel file format version of The saved model file + to be converted. This value will be set only when the SavedModel import + path will be used. + saved_model_tags: Set of string saved model tags, formatted in the + comma-separated value. This value will be set only when the SavedModel + import path will be used. + saved_model_exported_names: Names to be exported (default: export all) when + the saved model import path is on. This value will be set only when the + SavedModel import path will be used. + + Returns: + model_flags: protocol buffer describing the model. + """ + model_flags = _model_flags_pb2.ModelFlags() + model_flags.change_concat_input_ranges = change_concat_input_ranges + model_flags.allow_nonexistent_arrays = allow_nonexistent_arrays + if saved_model_dir: + model_flags.saved_model_dir = saved_model_dir + model_flags.saved_model_version = saved_model_version + if saved_model_tags: + model_flags.saved_model_tags.extend(saved_model_tags) + if saved_model_exported_names: + model_flags.saved_model_exported_names.extend(saved_model_exported_names) + return model_flags + + +def build_conversion_flags( + inference_type=dtypes.float32, + inference_input_type=None, + input_format=lite_constants.TENSORFLOW_GRAPHDEF, + output_format=lite_constants.TFLITE, + default_ranges_stats=None, + drop_control_dependency=True, + reorder_across_fake_quant=False, + allow_custom_ops=False, + post_training_quantize=False, + quantize_to_float16=False, + dump_graphviz_dir=None, + dump_graphviz_video=False, + target_ops=None, + conversion_summary_dir=None, + select_user_tf_ops=None, + allow_all_select_tf_ops=False, + enable_tflite_resource_variables=True, + unfold_batchmatmul=False, + legalize_custom_tensor_list_ops=False, + lower_tensor_list_ops=True, + default_to_single_batch_in_tensor_list_ops=False, + accumulation_type=None, + allow_bfloat16=False, + unfold_large_splat_constant=False, + supported_backends=None, + disable_per_channel_quantization=False, + enable_mlir_dynamic_range_quantizer=False, + tf_quantization_mode=None, + disable_infer_tensor_range=False, + use_fake_quant_num_bits=False, + enable_dynamic_update_slice=False, + preserve_assert_op=False, + guarantee_all_funcs_one_use=False, + enable_mlir_variable_quantization=False, + disable_fuse_mul_and_fc=False, + quantization_options: Optional[quant_opts_pb2.QuantizationOptions] = None, + ir_dump_dir=None, + ir_dump_pass_regex=None, + ir_dump_func_regex=None, + enable_timing=None, + print_ir_before=None, + print_ir_after=None, + print_ir_module_scope=None, + elide_elementsattrs_if_larger=None, + quantization_config: Optional[ + quantization_config_pb2.QuantizationConfig + ] = None, + use_buffer_offset=False, + reduce_type_precision=False, + qdq_conversion_mode=None, + disable_per_channel_quantization_for_dense_layers=False, + enable_composite_direct_lowering=False, + model_origin_framework=lite_constants.UNSET, + canonicalizing_inf_as_min_max_float=True, + **_, +): + """Builds protocol buffer describing a conversion of a model. + + Typically this is to convert from TensorFlow GraphDef to TFLite, in which + case the default `input_format` and `output_format` are sufficient. + + Args: + inference_type: Data type of numeric arrays, excluding the input layer. + (default tf.float32, must be in {tf.float32, tf.int8, tf.uint8}) + inference_input_type: Data type of the numeric arrays in the input layer. If + `inference_input_type` is in {tf.int8, tf.uint8}, then + `quantized_input_stats` must be provided. (default is the value assigned + to `inference_type`, must be in {tf.float32, tf.int8, tf.uint8}) + input_format: Type of data to read. (default TENSORFLOW_GRAPHDEF, must be in + {TENSORFLOW_GRAPHDEF}) + output_format: Output file format. (default TFLITE, must be in {TFLITE, + GRAPHVIZ_DOT}) + default_ranges_stats: Tuple of integers representing (min, max) range values + for all arrays without a specified range. Intended for experimenting with + quantization via "dummy quantization". (default None) + drop_control_dependency: Boolean indicating whether to drop control + dependencies silently. This is due to TFLite not supporting control + dependencies. (default True) + reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant + nodes in unexpected locations. Used when the location of the FakeQuant + nodes is preventing graph transformations necessary to convert the graph. + Results in a graph that differs from the quantized training graph, + potentially causing differing arithmetic behavior. (default False) + allow_custom_ops: Boolean indicating whether to allow custom operations. + When false any unknown operation is an error. When true, custom ops are + created for any op that is unknown. The developer will need to provide + these to the TensorFlow Lite runtime with a custom resolver. (default + False) + post_training_quantize: Boolean indicating whether to quantize the weights + of the converted float model. Model size will be reduced and there will be + latency improvements (at the cost of accuracy). (default False) If + quantization_options is set, all quantization arg will be ignored. + quantize_to_float16: Boolean indicating whether to convert float buffers to + float16. (default False) + dump_graphviz_dir: Full filepath of folder to dump the graphs at various + stages of processing GraphViz .dot files. Preferred over + --output_format=GRAPHVIZ_DOT in order to keep the requirements of the + output file. (default None) + dump_graphviz_video: Boolean indicating whether to dump the graph after + every graph transformation. (default False) + target_ops: Experimental flag, subject to change. Set of OpsSet options + indicating which converter to use. (default set([OpsSet.TFLITE_BUILTINS])) + conversion_summary_dir: A string, the path to the generated conversion logs. + select_user_tf_ops: List of user's defined TensorFlow ops need to be + supported in the TensorFlow Lite runtime. These ops will be supported as + select TensorFlow ops. + allow_all_select_tf_ops: If True, automatically add all TF ops (including + custom TF ops) to the converted model as flex ops. + enable_tflite_resource_variables: Experimental flag, subject to change. + Enables conversion of resource variables. (default False) + unfold_batchmatmul: Whether to unfold tf.BatchMatMul to a set of + tfl.fully_connected ops. If not, translate to tfl.batch_matmul. + legalize_custom_tensor_list_ops: Whether to legalize `tf.TensorList*` ops to + tfl custom if they can all be supported. + lower_tensor_list_ops: Whether to lower tensor list ops to builtin ops. If + not, use Flex tensor list ops. + default_to_single_batch_in_tensor_list_ops: Whether to force to use batch + size one when the tensor list ops has the unspecified batch size. + accumulation_type: Data type of the accumulators in quantized inference. + Typically used for float16 quantization and is either fp16 or fp32. + allow_bfloat16: Whether the converted model supports reduced precision + inference with the bfloat16 type. + unfold_large_splat_constant: Whether to unfold large splat constant tensors + in the flatbuffer model to reduce size. + supported_backends: List of TFLite backends which needs to check + compatibility. + disable_per_channel_quantization: Disable per-channel quantized weights for + dynamic range quantization. Only per-tensor quantization will be used. + enable_mlir_dynamic_range_quantizer: Enable MLIR dynamic range quantization. + If False, the old converter dynamic range quantizer is used. + tf_quantization_mode: Indicates the mode of TF Quantization when the output + model is used for TF Quantization. + disable_infer_tensor_range: Disable infering tensor ranges. + use_fake_quant_num_bits: Allow quantization parameters to be calculated from + num_bits attribute. + enable_dynamic_update_slice: Enable to convert to DynamicUpdateSlice op. + (default: False). + preserve_assert_op: Whether to preserve `TF::AssertOp` (default: False). + guarantee_all_funcs_one_use: Whether to clone functions so that each + function only has a single use. This option will be helpful if the + conversion fails when the `PartitionedCall` or `StatefulPartitionedCall` + can't be properly inlined (default: False). + enable_mlir_variable_quantization: Enable MLIR variable quantization. There + is a variable freezing pass, but some variables may not be fully frozen by + it. This flag enables quantization of those residual variables in the MLIR + graph. + disable_fuse_mul_and_fc: Disable fusing input multiplication with + fullyconnected operations. Useful when quantizing weights. + quantization_options: [Deprecated] Config to indicate quantization options + of each components (ex: weight, bias, activation). This can be a preset + method or a custom method, and allows finer, modular control. This option + will override any other existing quantization flags. We plan on gradually + migrating all quantization-related specs into this option. + ir_dump_dir: A string specifying the target directory to output MLIR dumps + produced during conversion. If populated, enables MLIR dumps. + ir_dump_pass_regex: A string containing a regular expression for filtering + the pass names to be dumped. Effective only if `ir_dump_dir` is populated. + ir_dump_func_regex: A string containing a regular expression for filtering + the function names to be dumped. Effective only if `ir_dump_dir` is + populated. + enable_timing: A boolean, if set to true reports the execution time of each + MLIR pass. + print_ir_before: A string containing a regular expression. If specified, + prints MLIR before passes which match. + print_ir_after: A string containing a regular expression. If specified, + prints MLIR after passes which match. + print_ir_module_scope: A boolean, if set to true always print the top-level + operation when printing IR for print_ir_[before|after]. + elide_elementsattrs_if_larger: An int, if specified elides ElementsAttrs + with '...' that have more elements than the given upper limit. + quantization_config: Configures the StableHLO Quantizer. See the comments in + `QuantizationConfig` protobuf definition for details. + use_buffer_offset: Force the model use buffer_offset & buffer_size fields + instead of data. i.e. store the constant tensor and custom op binaries + outside of Flatbuffers + reduce_type_precision: Convert some tensor types to a lower precision if all + values within that tensor are within the range of the lower precision. + This could have side effects e.g. reduced flatbuffer size. + qdq_conversion_mode: If set, assume input model is a quantized model + represented with QDQ ops and convert to quantized kernels. + disable_per_channel_quantization_for_dense_layers: If set, disables per + channel end enables per tensor integer quantization for weights in Dense + layers. The flag works only for integer quantized model. + enable_composite_direct_lowering: If set, attempts to lower composite ops + directly to tflite ops. + model_origin_framework: A str specifying the framework of the original + model. Can be {TENSORFLOW, KERAS, JAX, PYTORCH} + canonicalizing_inf_as_min_max_float: When set to true, convert +Inf/-Inf to + MIN/MAX float value and output of converter only contains finite values. + + Returns: + conversion_flags: protocol buffer describing the conversion process. + Raises: + ValueError, if the input tensor type is unknown. + """ + conversion_flags = _conversion_flags_pb2.ConverterFlags() + conversion_flags.inference_type = convert_inference_tf_type_to_tflite_type( + inference_type, usage="inference_type flag" + ) + if inference_input_type: + conversion_flags.inference_input_type = ( + convert_inference_tf_type_to_tflite_type( + inference_input_type, usage="inference_input_type flag" + ) + ) + else: + conversion_flags.inference_input_type = conversion_flags.inference_type + conversion_flags.input_format = input_format + conversion_flags.output_format = output_format + if default_ranges_stats: + conversion_flags.default_ranges_min = default_ranges_stats[0] + conversion_flags.default_ranges_max = default_ranges_stats[1] + conversion_flags.drop_control_dependency = drop_control_dependency + conversion_flags.reorder_across_fake_quant = reorder_across_fake_quant + conversion_flags.allow_custom_ops = allow_custom_ops + conversion_flags.post_training_quantize = post_training_quantize + conversion_flags.quantize_to_float16 = quantize_to_float16 + if dump_graphviz_dir: + conversion_flags.dump_graphviz_dir = dump_graphviz_dir + conversion_flags.dump_graphviz_include_video = dump_graphviz_video + if target_ops: + if OpsSet.SELECT_TF_OPS in target_ops: + conversion_flags.enable_select_tf_ops = True + if set(target_ops) == {OpsSet.SELECT_TF_OPS}: + conversion_flags.force_select_tf_ops = True + if OpsSet.EXPERIMENTAL_STABLEHLO_OPS in target_ops: + conversion_flags.convert_to_stablehlo = True + if OpsSet.EXPERIMENTAL_STABLEHLO_OPS in target_ops and len(target_ops) > 1: + raise ValueError( + "StableHLO Ops set can not be specified with other Ops set together" + ) + if conversion_summary_dir: + conversion_flags.conversion_summary_dir = conversion_summary_dir + if select_user_tf_ops: + conversion_flags.select_user_tf_ops.extend(select_user_tf_ops) + conversion_flags.allow_all_select_tf_ops = allow_all_select_tf_ops + conversion_flags.enable_tflite_resource_variables = ( + enable_tflite_resource_variables + ) + conversion_flags.unfold_batchmatmul = unfold_batchmatmul + conversion_flags.legalize_custom_tensor_list_ops = ( + legalize_custom_tensor_list_ops + ) + conversion_flags.lower_tensor_list_ops = lower_tensor_list_ops + conversion_flags.default_to_single_batch_in_tensor_list_ops = ( + default_to_single_batch_in_tensor_list_ops + ) + if accumulation_type: + conversion_flags.accumulation_type = convert_tensor_tf_type_to_tflite_type( + accumulation_type, usage="accumulation_type flag" + ) + conversion_flags.allow_bfloat16 = allow_bfloat16 + conversion_flags.unfold_large_splat_constant = unfold_large_splat_constant + if supported_backends: + conversion_flags.supported_backends.extend(supported_backends) + conversion_flags.disable_per_channel_quantization = ( + disable_per_channel_quantization + ) + conversion_flags.enable_mlir_dynamic_range_quantizer = ( + enable_mlir_dynamic_range_quantizer + ) + conversion_flags.enable_dynamic_update_slice = enable_dynamic_update_slice + conversion_flags.preserve_assert_op = preserve_assert_op + conversion_flags.guarantee_all_funcs_one_use = guarantee_all_funcs_one_use + if tf_quantization_mode: + conversion_flags.tf_quantization_mode = tf_quantization_mode + conversion_flags.disable_infer_tensor_range = disable_infer_tensor_range + conversion_flags.use_fake_quant_num_bits = use_fake_quant_num_bits + conversion_flags.enable_mlir_variable_quantization = ( + enable_mlir_variable_quantization + ) + conversion_flags.disable_fuse_mul_and_fc = disable_fuse_mul_and_fc + if quantization_options: # Deprecated + conversion_flags.quantization_options.CopyFrom(quantization_options) + if quantization_config: + conversion_flags.quantization_config.CopyFrom(quantization_config) + + # Transfer debug options. Check for existence before populating in order to + # leverage defaults specified in proto definition. + # TODO: b/319329480 - Match the debug_options fields with the user-facing + # flags. + if ir_dump_dir is not None: + conversion_flags.debug_options.ir_dump_dir = ir_dump_dir + if ir_dump_pass_regex is not None: + conversion_flags.debug_options.ir_dump_pass_regex = ir_dump_pass_regex + if ir_dump_func_regex is not None: + conversion_flags.debug_options.ir_dump_func_regex = ir_dump_func_regex + if enable_timing is not None: + conversion_flags.debug_options.enable_timing = enable_timing + if print_ir_before is not None: + conversion_flags.debug_options.print_ir_before = print_ir_before + if print_ir_after is not None: + conversion_flags.debug_options.print_ir_after = print_ir_after + if print_ir_module_scope is not None: + conversion_flags.debug_options.print_ir_module_scope = print_ir_module_scope + if elide_elementsattrs_if_larger is not None: + conversion_flags.debug_options.elide_elementsattrs_if_larger = ( + elide_elementsattrs_if_larger + ) + + if use_buffer_offset is not None: + conversion_flags.use_buffer_offset = use_buffer_offset + if reduce_type_precision is not None: + conversion_flags.reduce_type_precision = reduce_type_precision + if qdq_conversion_mode is not None: + conversion_flags.qdq_conversion_mode = qdq_conversion_mode + conversion_flags.disable_per_channel_quantization_for_dense_layers = ( + disable_per_channel_quantization_for_dense_layers + ) + conversion_flags.enable_composite_direct_lowering = ( + enable_composite_direct_lowering + ) + conversion_flags.model_origin_framework = ( + _conversion_flags_pb2.ConverterFlags.ModelOriginFramework.Value( + model_origin_framework + ) + ) + conversion_flags.canonicalizing_inf_as_min_max_float = ( + canonicalizing_inf_as_min_max_float + ) + return conversion_flags + + +@convert_phase( + Component.CONVERT_TF_TO_TFLITE_MODEL, SubComponent.CONVERT_GRAPHDEF +) +def convert_graphdef_with_arrays( + input_data, + input_arrays_with_shape, + output_arrays, + control_output_arrays, + **kwargs, +): + """Convert a frozen GraphDef that can't be loaded in TF. + + Conversion can be customized by providing arguments that are forwarded to + `build_model_flags` and `build_conversion_flags` (see documentation). + + Args: + input_data: Input data (i.e. often `sess.graph_def`), + input_arrays_with_shape: Tuple of strings representing input tensor names + and list of integers representing input shapes (e.g., [("foo" : [1, 16, + 16, 3])]). Use only when graph cannot be loaded into TensorFlow and when + `input_tensors` is None. + output_arrays: List of output tensors to freeze graph with. Use only when + graph cannot be loaded into TensorFlow and when `output_tensors` is None. + control_output_arrays: Control output node names. This is used when + converting a Graph with no output tensors. For example, if the graph's + last operation is a Print op, just specify that op's name in this field. + This can be used together with the `output_arrays` parameter. + **kwargs: See `build_model_flags` and `build_conversion_flags`. + + Returns: + The converted data. For example if TFLite was the destination, then + this will be a tflite flatbuffer in a bytes array. + + Raises: + Defined in `build_conversion_flags`. + """ + model_flags = build_model_flags(**kwargs) + conversion_flags = build_conversion_flags(**kwargs) + enable_mlir_converter = kwargs.get("enable_mlir_converter", True) + quantized_input_stats = kwargs.get("quantized_input_stats", None) + + for idx, (name, shape) in enumerate(input_arrays_with_shape): + input_array = model_flags.input_arrays.add() + if _is_quantized_input_stats_required(conversion_flags): + if quantized_input_stats: + input_array.mean_value, input_array.std_value = quantized_input_stats[ + idx + ] + else: + raise ValueError( + "The `quantized_input_stats` flag must be defined when either " + "`inference_type` flag or `inference_input_type` flag is set to " + "tf.int8 or tf.uint8." + ) + input_array.name = name + input_array.shape.dims.extend(list(map(int, shape))) + + if output_arrays: + for name in output_arrays: + model_flags.output_arrays.append(name) + if control_output_arrays: + for name in control_output_arrays: + model_flags.control_output_arrays.append(name) + + data = convert( + model_flags, + conversion_flags, + input_data.SerializeToString(), + debug_info_str=None, + enable_mlir_converter=enable_mlir_converter, + ) + return data + + +@convert_phase( + Component.CONVERT_TF_TO_TFLITE_MODEL, SubComponent.CONVERT_GRAPHDEF +) +def convert_graphdef(input_data, input_tensors, output_tensors, **kwargs): + """Convert a frozen GraphDef model using the TF Lite converter. + + Conversion can be customized by providing arguments that are forwarded to + `build_model_flags` and `build_conversion_flags` (see documentation). + + Args: + input_data: Input data (i.e. often `sess.graph_def`), + input_tensors: List of input tensors. Type and shape are computed using + `foo.shape` and `foo.dtype`. + output_tensors: List of output tensors (only .name is used from this). + **kwargs: See `build_model_flags` and `build_conversion_flags`. + + Returns: + The converted data. For example if TFLite was the destination, then + this will be a tflite flatbuffer in a bytes array. + + Raises: + Defined in `build_conversion_flags`. + """ + model_flags = build_model_flags(**kwargs) + conversion_flags = build_conversion_flags(**kwargs) + saved_model_dir = kwargs.get("saved_model_dir", None) + input_shapes = kwargs.get("input_shapes", None) + enable_mlir_converter = kwargs.get("enable_mlir_converter", True) + quantized_input_stats = kwargs.get("quantized_input_stats", None) + debug_info = kwargs.get("debug_info", None) + + for idx, input_tensor in enumerate(input_tensors): + input_array = model_flags.input_arrays.add() + if saved_model_dir: + input_array.name = input_tensor.name + else: + input_array.name = util.get_tensor_name(input_tensor) + input_array.data_type = convert_tensor_tf_type_to_tflite_type( + input_tensor.dtype, usage="input type of the TensorFlow model" + ) + + if _is_quantized_input_stats_required(conversion_flags): + if quantized_input_stats: + input_array.mean_value, input_array.std_value = quantized_input_stats[ + idx + ] + else: + # We should ideally raise an error here, but we don't as it would break + # several models/projects that depend on this workflow. + warnings.warn( + "Statistics for quantized inputs were expected, but not " + "specified; continuing anyway." + ) + + if input_shapes is None: + shape = input_tensor.shape + else: + shape = input_shapes[idx] + + if shape.rank is not None: + # Create shapes with -1 for unknown dimensions. + dims = [] + for dim in shape: + if dim is None or ( + isinstance(dim, tensor_shape.Dimension) and dim.value is None + ): + dims.append(-1) + else: + dims.append(int(dim)) + input_array.shape.dims.extend(dims) + input_array.shape.unknown_rank = False + else: + input_array.shape.unknown_rank = True + + for output_tensor in output_tensors: + if saved_model_dir: + model_flags.output_arrays.append(output_tensor.name) + else: + model_flags.output_arrays.append(util.get_tensor_name(output_tensor)) + + data = convert( + model_flags, + conversion_flags, + input_data.SerializeToString(), + debug_info_str=debug_info.SerializeToString() if debug_info else None, + enable_mlir_converter=enable_mlir_converter, + ) + return data + + +@convert_phase( + Component.CONVERT_TF_TO_TFLITE_MODEL, SubComponent.CONVERT_SAVED_MODEL +) +def convert_saved_model(**kwargs): + """Converts a SavedModel using TF Lite converter.""" + model_flags = build_model_flags(**kwargs) + conversion_flags = build_conversion_flags(**kwargs) + data = convert( + model_flags, + conversion_flags, + input_data_str=None, + debug_info_str=None, + enable_mlir_converter=True, + ) + return data + + +@convert_phase( + Component.CONVERT_TF_TO_TFLITE_MODEL, SubComponent.CONVERT_JAX_HLO +) +def convert_jax_hlo(input_content, input_names, is_proto_format, **kwargs): + """Converts a Jax hlo-based model using TFLite converter.""" + model_flags = _model_flags_pb2.ModelFlags() + model_flags.use_hlo_import = True + if is_proto_format: + model_flags.hlo_file_type = _model_flags_pb2.ModelFlags.HLO_PROTO + else: + model_flags.hlo_file_type = _model_flags_pb2.ModelFlags.HLO_TEXT + + # Build input names. + for input_name in input_names: + input_array = model_flags.input_arrays.add() + input_array.name = input_name + + conversion_flags = build_conversion_flags(**kwargs) + data = convert( + model_flags, + conversion_flags, + input_data_str=input_content, + debug_info_str=None, + enable_mlir_converter=True, + ) + return data + + +@_tf_export(v1=["lite.toco_convert"]) +@deprecation.deprecated(None, "Use `lite.TFLiteConverter` instead.") +def toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs): + """Convert a TensorFlow GraphDef to TFLite. + + This function is deprecated. Please use `tf.lite.TFLiteConverter` API instead. + Conversion can be customized by providing arguments that are forwarded to + `build_model_flags` and `build_conversion_flags` (see documentation for + details). + Args: + input_data: Input data (i.e. often `sess.graph_def`). + input_tensors: List of input tensors. Type and shape are computed using + `foo.shape` and `foo.dtype`. + output_tensors: List of output tensors (only .name is used from this). + *args: See `build_model_flags` and `build_conversion_flags`. + **kwargs: See `build_model_flags` and `build_conversion_flags`. + + Returns: + The converted TensorFlow Lite model in a bytes array. + + Raises: + Defined in `convert`. + """ + kwargs["enable_mlir_converter"] = kwargs.get("enable_mlir_converter", False) + return convert_graphdef( + input_data, input_tensors, output_tensors, *args, **kwargs + ) + + +def deduplicate_readonly_buffers(tflite_model): + """Generates a new model byte array after deduplicating readonly buffers. + + This function should be invoked after the model optimization toolkit. The + model optimization toolkit assumes that each tensor object owns its each + buffer separately. + + Args: + tflite_model: TFLite flatbuffer in a byte array to be deduplicated. + + Returns: + TFLite flatbuffer in a bytes array, processed with the deduplication method. + """ + # Load TFLite Flatbuffer byte array into an object. + model = flatbuffer_utils.convert_bytearray_to_object(tflite_model) + + # Get all the read-only buffers, which can be modified without causing any + # issue in the graph invocation stage. + read_only_buffer_indices = set() + for subgraph in model.subgraphs: + # To get all the read-only buffers: + # (1) Get all read-only input tensors. + # (2) Discard intermediate or output tensors. + # (3) Discard the subgraph's input/output tensors. + # (4) Gather the buffers of the read-only input tensors. + + # (1) Get read-only input tensors. + read_only_input_tensor_indices = set() + for op in subgraph.operators: + if op.inputs is None: + continue + for i, input_tensor_idx in enumerate(op.inputs): + # Ignore mutable tensors. + if op.mutatingVariableInputs is not None: + # Ignore invalid tensors. + if ( + i < len(op.mutatingVariableInputs) + and op.mutatingVariableInputs[i] + ): + continue + # Ignore variable tensors. + if subgraph.tensors[input_tensor_idx].isVariable: + continue + read_only_input_tensor_indices.add(input_tensor_idx) + + # (2) Discard intermediate or output tensors. + for op in subgraph.operators: + if op.outputs is not None: + for output_tensor_idx in op.outputs: + read_only_input_tensor_indices.discard(output_tensor_idx) + if op.intermediates is not None: + for intermediate_tensor_idx in op.intermediates: + read_only_input_tensor_indices.discard(intermediate_tensor_idx) + + # (3) Discard the subgraph's input and output tensors. + if subgraph.inputs is not None: + for input_tensor_idx in subgraph.inputs: + read_only_input_tensor_indices.discard(input_tensor_idx) + if subgraph.outputs is not None: + for output_tensor_idx in subgraph.outputs: + read_only_input_tensor_indices.discard(output_tensor_idx) + + # (4) Gather the buffers of the read-only input tensors. + for tensor_idx in read_only_input_tensor_indices: + read_only_buffer_indices.add(subgraph.tensors[tensor_idx].buffer) + + # Ignore invalid negative index or zero-sized buffers. + for buffer_idx in read_only_buffer_indices.copy(): + if buffer_idx < 0 or ( + model.buffers[buffer_idx].data is None + or isinstance(model.buffers[buffer_idx].data, list) + or model.buffers[buffer_idx].data.size == 0 + ): + read_only_buffer_indices.discard(buffer_idx) + + class BufferIndex: + """A class to store index, size, hash of the buffers in TFLite model.""" + + def __init__(self, idx, size, hash_value): + self.idx = idx + self.size = size + self.hash_value = hash_value + + read_only_buffers = list( + map( + lambda index: BufferIndex( # pylint: disable=g-long-lambda + index, + model.buffers[index].data.size, + hashlib.md5(model.buffers[index].data.data.tobytes()).hexdigest(), + ), + read_only_buffer_indices, + ) + ) + + # Sort read_only_buffers by buffer size & hash in descending order. + read_only_buffers = sorted( + read_only_buffers, + key=lambda buffer: (buffer.size, buffer.hash_value), + reverse=True, + ) + + # Create a map of duplicate buffers (same size and same type). + # eg: In [1, 2, 3, 4, 5, 6] if (1, 4, 6) and (2, 5) are each, groups of buffer + # indices of the same size and type, then the map would be {4:1, 6:1, 5:2} + duplicate_buffer_map = {} + for i, buffer_i in enumerate(read_only_buffers): + # This buffer is a duplicate. + if buffer_i.idx in duplicate_buffer_map: + continue + # This buffer is unique. Scan rest of the list to find duplicates + # of this buffer and mark them accordingly. + for buffer_j in read_only_buffers[i + 1 :]: + if buffer_j.idx in duplicate_buffer_map: + continue + if buffer_i.size != buffer_j.size: + break + if buffer_i.hash_value != buffer_j.hash_value: + continue + # Found duplicate. Nullify j-th buffer and use i-th buffer instead. + duplicate_buffer_map[buffer_j.idx] = buffer_i.idx + + # Make the duplicated tensors use the single shared buffer index. + for subgraph in model.subgraphs: + for op in subgraph.operators: + if op.inputs is None: + continue + for input_tensor in op.inputs: + buffer_idx = subgraph.tensors[input_tensor].buffer + if buffer_idx in duplicate_buffer_map: + subgraph.tensors[input_tensor].buffer = duplicate_buffer_map[ + buffer_idx + ] + + # Nullify the unused buffers. + for idx in duplicate_buffer_map: + model.buffers[idx].data = None + + # Return a TFLite flatbuffer as a byte array. + return flatbuffer_utils.convert_object_to_bytearray(model) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/convert_saved_model.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/convert_saved_model.py new file mode 100644 index 0000000000000000000000000000000000000000..6d968c37007910d43109f577ce80b1fe5ec68086 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/convert_saved_model.py @@ -0,0 +1,186 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functions to convert SavedModel to frozen GraphDefs.""" + +from tensorflow.lite.python import util +from tensorflow.lite.python.convert_phase import Component +from tensorflow.lite.python.convert_phase import convert_phase +from tensorflow.lite.python.convert_phase import SubComponent +from tensorflow.python.client import session +from tensorflow.python.framework import ops +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.saved_model import constants +from tensorflow.python.saved_model import loader + + +def get_meta_graph_def(saved_model_dir, tag_set): + """Validate saved_model and extract MetaGraphDef. + + Args: + saved_model_dir: saved_model path to convert. + tag_set: Set of tag(s) of the MetaGraphDef to load. + + Returns: + The meta_graph_def used for tflite conversion. + + Raises: + ValueError: No valid MetaGraphDef for given tag_set. + """ + with session.Session(graph=ops.Graph()) as sess: + return loader.load(sess, tag_set, saved_model_dir) + + +def get_signature_def(meta_graph, signature_key): + """Get the signature def from meta_graph with given signature_key. + + Args: + meta_graph: meta_graph_def. + signature_key: signature_def in the meta_graph_def. + + Returns: + The signature_def used for tflite conversion. + + Raises: + ValueError: Given signature_key is not valid for this meta_graph. + """ + signature_def_map = meta_graph.signature_def + signature_def_keys = set(signature_def_map.keys()) + logging.info( + "The given SavedModel MetaGraphDef contains SignatureDefs with the " + "following keys: %s", signature_def_keys) + if signature_key not in signature_def_keys: + raise ValueError("No '{}' in the SavedModel\'s SignatureDefs. Possible " + "values are '{}'.".format(signature_key, + ",".join(signature_def_keys))) + return signature_def_map[signature_key] + + +def get_inputs_outputs(signature_def): + """Get inputs and outputs from SignatureDef. + + Args: + signature_def: SignatureDef in the meta_graph_def for conversion. + + Returns: + The inputs and outputs in the graph for conversion. + """ + inputs_tensor_info = signature_def.inputs + outputs_tensor_info = signature_def.outputs + + def gather_names(tensor_info): + return [tensor_info[key].name for key in tensor_info] + + inputs = gather_names(inputs_tensor_info) + outputs = gather_names(outputs_tensor_info) + return inputs, outputs + + +def _get_tensors(graph, signature_def_tensor_names=None, + user_tensor_names=None): + """Gets the tensors associated with the tensor names. + + Either signature_def_tensor_names or user_tensor_names should be provided. If + the user provides tensors, the tensors associated with the user provided + tensor names are provided. Otherwise, the tensors associated with the names in + the SignatureDef are provided. + + Args: + graph: GraphDef representing graph. + signature_def_tensor_names: Tensor names stored in either the inputs or + outputs of a SignatureDef. (default None) + user_tensor_names: Tensor names provided by the user. (default None) + + Returns: + List of tensors. + + Raises: + ValueError: + signature_def_tensors and user_tensor_names are undefined or empty. + user_tensor_names are not valid. + """ + tensors = [] + if user_tensor_names: + # Sort the tensor names. + user_tensor_names = sorted(user_tensor_names) + + tensors = util.get_tensors_from_tensor_names(graph, user_tensor_names) + elif signature_def_tensor_names: + tensors = [ + graph.get_tensor_by_name(name) + for name in sorted(signature_def_tensor_names) + ] + else: + # Throw ValueError if signature_def_tensors and user_tensor_names are both + # either undefined or empty. + raise ValueError( + "Specify either signature_def_tensor_names or user_tensor_names") + + return tensors + + +@convert_phase(Component.PREPARE_TF_MODEL, SubComponent.FREEZE_SAVED_MODEL) +def freeze_saved_model(saved_model_dir, input_arrays, input_shapes, + output_arrays, tag_set, signature_key): + """Converts a SavedModel to a frozen graph. + + Args: + saved_model_dir: SavedModel directory to convert. + input_arrays: List of input tensors to freeze graph with. Uses input arrays + from SignatureDef when none are provided. + input_shapes: Dict of strings representing input tensor names to list of + integers representing input shapes (e.g., {"foo": : [1, 16, 16, 3]}). + Automatically determined when input shapes is None (e.g., {"foo" : None}). + output_arrays: List of output tensors to freeze graph with. Uses output + arrays from SignatureDef when none are provided. + tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to + analyze. All tags in the tag set must be present. + signature_key: Key identifying SignatureDef containing inputs and outputs. + + Returns: + frozen_graph_def: Frozen GraphDef. + in_tensors: List of input tensors for the graph. + out_tensors: List of output tensors for the graph. + graph: `Graph` object. + + Raises: + ValueError: + SavedModel doesn't contain a MetaGraphDef identified by tag_set. + signature_key is not in the MetaGraphDef. + assets/ directory is in the MetaGraphDef. + input_shapes does not match the length of input_arrays. + input_arrays or output_arrays are not valid. + """ + # Read SignatureDef. + meta_graph = get_meta_graph_def(saved_model_dir, tag_set) + signature_def = get_signature_def(meta_graph, signature_key) + inputs, outputs = get_inputs_outputs(signature_def) + + # Check SavedModel for assets directory. + collection_def = meta_graph.collection_def + if constants.ASSETS_KEY in collection_def: + raise ValueError("SavedModels with assets/ directory are not supported.") + + graph = ops.Graph() + with session.Session(graph=graph) as sess: + loader.load(sess, meta_graph.meta_info_def.tags, saved_model_dir) + + # Gets input and output tensors. + # TODO(zhixianyan): Use TFLite supported Op list to filter outputs. + in_tensors = _get_tensors(graph, inputs, input_arrays) + out_tensors = _get_tensors(graph, outputs, output_arrays) + util.set_tensor_shapes(in_tensors, input_shapes) + + frozen_graph_def = util.freeze_graph(sess, in_tensors, out_tensors) + return frozen_graph_def, in_tensors, out_tensors, sess.graph diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/interpreter.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/interpreter.py new file mode 100644 index 0000000000000000000000000000000000000000..64f7d1dac8dba0655525b7b371b6f79a6dfa3299 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/interpreter.py @@ -0,0 +1,1018 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Python TF-Lite interpreter.""" +import ctypes +import enum +import os +import platform +import sys + +import numpy as np + +# pylint: disable=g-import-not-at-top +if not os.path.splitext(__file__)[0].endswith( + os.path.join('tflite_runtime', 'interpreter')): + # This file is part of tensorflow package. + from tensorflow.lite.python.interpreter_wrapper import _pywrap_tensorflow_interpreter_wrapper as _interpreter_wrapper + from tensorflow.lite.python.metrics import metrics + from tensorflow.python.util.tf_export import tf_export as _tf_export +else: + # This file is part of tflite_runtime package. + from tflite_runtime import _pywrap_tensorflow_interpreter_wrapper as _interpreter_wrapper + from tflite_runtime import metrics_portable as metrics + + def _tf_export(*x, **kwargs): + del x, kwargs + return lambda x: x + + +# pylint: enable=g-import-not-at-top + + +class Delegate: + """Python wrapper class to manage TfLiteDelegate objects. + + The shared library is expected to have two functions, + tflite_plugin_create_delegate and tflite_plugin_destroy_delegate, + which should implement the API specified in + tensorflow/lite/delegates/external/external_delegate_interface.h. + """ + + def __init__(self, library, options=None): + """Loads delegate from the shared library. + + Args: + library: Shared library name. + options: Dictionary of options that are required to load the delegate. All + keys and values in the dictionary should be serializable. Consult the + documentation of the specific delegate for required and legal options. + (default None) + + Raises: + RuntimeError: This is raised if the Python implementation is not CPython. + """ + + # TODO(b/136468453): Remove need for __del__ ordering needs of CPython + # by using explicit closes(). See implementation of Interpreter __del__. + if platform.python_implementation() != 'CPython': + raise RuntimeError('Delegates are currently only supported into CPython' + 'due to missing immediate reference counting.') + + self._library = ctypes.pydll.LoadLibrary(library) + self._library.tflite_plugin_create_delegate.argtypes = [ + ctypes.POINTER(ctypes.c_char_p), + ctypes.POINTER(ctypes.c_char_p), ctypes.c_int, + ctypes.CFUNCTYPE(None, ctypes.c_char_p) + ] + # The return type is really 'TfLiteDelegate*', but 'void*' is close enough. + self._library.tflite_plugin_create_delegate.restype = ctypes.c_void_p + + # Convert the options from a dictionary to lists of char pointers. + options = options or {} + options_keys = (ctypes.c_char_p * len(options))() + options_values = (ctypes.c_char_p * len(options))() + for idx, (key, value) in enumerate(options.items()): + options_keys[idx] = str(key).encode('utf-8') + options_values[idx] = str(value).encode('utf-8') + + class ErrorMessageCapture: + + def __init__(self): + self.message = '' + + def report(self, x): + self.message += x if isinstance(x, str) else x.decode('utf-8') + + capture = ErrorMessageCapture() + error_capturer_cb = ctypes.CFUNCTYPE(None, ctypes.c_char_p)(capture.report) + # Do not make a copy of _delegate_ptr. It is freed by Delegate's finalizer. + self._delegate_ptr = self._library.tflite_plugin_create_delegate( + options_keys, options_values, len(options), error_capturer_cb) + if self._delegate_ptr is None: + raise ValueError(capture.message) + + def __del__(self): + # __del__ can not be called multiple times, so if the delegate is destroyed. + # don't try to destroy it twice. + if self._library is not None: + self._library.tflite_plugin_destroy_delegate.argtypes = [ctypes.c_void_p] + self._library.tflite_plugin_destroy_delegate(self._delegate_ptr) + self._library = None + + def _get_native_delegate_pointer(self): + """Returns the native TfLiteDelegate pointer. + + It is not safe to copy this pointer because it needs to be freed. + + Returns: + TfLiteDelegate * + """ + return self._delegate_ptr + + +@_tf_export('lite.experimental.load_delegate') +def load_delegate(library, options=None): + """Returns loaded Delegate object. + + Example usage: + + ``` + import tensorflow as tf + + try: + delegate = tf.lite.experimental.load_delegate('delegate.so') + except ValueError: + // Fallback to CPU + + if delegate: + interpreter = tf.lite.Interpreter( + model_path='model.tflite', + experimental_delegates=[delegate]) + else: + interpreter = tf.lite.Interpreter(model_path='model.tflite') + ``` + + This is typically used to leverage EdgeTPU for running TensorFlow Lite models. + For more information see: https://coral.ai/docs/edgetpu/tflite-python/ + + Args: + library: Name of shared library containing the + [TfLiteDelegate](https://www.tensorflow.org/lite/performance/delegates). + options: Dictionary of options that are required to load the delegate. All + keys and values in the dictionary should be convertible to str. Consult + the documentation of the specific delegate for required and legal options. + (default None) + + Returns: + Delegate object. + + Raises: + ValueError: Delegate failed to load. + RuntimeError: If delegate loading is used on unsupported platform. + """ + try: + delegate = Delegate(library, options) + except ValueError as e: + raise ValueError('Failed to load delegate from {}\n{}'.format( + library, str(e))) + return delegate + + +class SignatureRunner: + """SignatureRunner class for running TFLite models using SignatureDef. + + This class should be instantiated through TFLite Interpreter only using + get_signature_runner method on Interpreter. + Example, + signature = interpreter.get_signature_runner("my_signature") + result = signature(input_1=my_input_1, input_2=my_input_2) + print(result["my_output"]) + print(result["my_second_output"]) + All names used are this specific SignatureDef names. + + Notes: + No other function on this object or on the interpreter provided should be + called while this object call has not finished. + """ + + def __init__(self, interpreter=None, signature_key=None): + """Constructor. + + Args: + interpreter: Interpreter object that is already initialized with the + requested model. + signature_key: SignatureDef key to be used. + """ + if not interpreter: + raise ValueError('None interpreter provided.') + if not signature_key: + raise ValueError('None signature_key provided.') + self._interpreter = interpreter + self._interpreter_wrapper = interpreter._interpreter + self._signature_key = signature_key + signature_defs = interpreter._get_full_signature_list() + if signature_key not in signature_defs: + raise ValueError('Invalid signature_key provided.') + self._signature_def = signature_defs[signature_key] + self._outputs = self._signature_def['outputs'].items() + self._inputs = self._signature_def['inputs'] + + self._subgraph_index = ( + self._interpreter_wrapper.GetSubgraphIndexFromSignature( + self._signature_key)) + + def __call__(self, **kwargs): + """Runs the SignatureDef given the provided inputs in arguments. + + Args: + **kwargs: key,value for inputs to the model. Key is the SignatureDef input + name. Value is numpy array with the value. + + Returns: + dictionary of the results from the model invoke. + Key in the dictionary is SignatureDef output name. + Value is the result Tensor. + """ + + if len(kwargs) != len(self._inputs): + raise ValueError( + 'Invalid number of inputs provided for running a SignatureDef, ' + 'expected %s vs provided %s' % (len(self._inputs), len(kwargs))) + + # Resize input tensors + for input_name, value in kwargs.items(): + if input_name not in self._inputs: + raise ValueError('Invalid Input name (%s) for SignatureDef' % + input_name) + self._interpreter_wrapper.ResizeInputTensor( + self._inputs[input_name], np.array(value.shape, dtype=np.int32), + False, self._subgraph_index) + # Allocate tensors. + self._interpreter_wrapper.AllocateTensors(self._subgraph_index) + # Set the input values. + for input_name, value in kwargs.items(): + self._interpreter_wrapper.SetTensor(self._inputs[input_name], value, + self._subgraph_index) + + self._interpreter_wrapper.Invoke(self._subgraph_index) + result = {} + for output_name, output_index in self._outputs: + result[output_name] = self._interpreter_wrapper.GetTensor( + output_index, self._subgraph_index) + return result + + def get_input_details(self): + """Gets input tensor details. + + Returns: + A dictionary from input name to tensor details where each item is a + dictionary with details about an input tensor. Each dictionary contains + the following fields that describe the tensor: + + + `name`: The tensor name. + + `index`: The tensor index in the interpreter. + + `shape`: The shape of the tensor. + + `shape_signature`: Same as `shape` for models with known/fixed shapes. + If any dimension sizes are unknown, they are indicated with `-1`. + + `dtype`: The numpy data type (such as `np.int32` or `np.uint8`). + + `quantization`: Deprecated, use `quantization_parameters`. This field + only works for per-tensor quantization, whereas + `quantization_parameters` works in all cases. + + `quantization_parameters`: A dictionary of parameters used to quantize + the tensor: + ~ `scales`: List of scales (one if per-tensor quantization). + ~ `zero_points`: List of zero_points (one if per-tensor quantization). + ~ `quantized_dimension`: Specifies the dimension of per-axis + quantization, in the case of multiple scales/zero_points. + + `sparsity_parameters`: A dictionary of parameters used to encode a + sparse tensor. This is empty if the tensor is dense. + """ + result = {} + for input_name, tensor_index in self._inputs.items(): + result[input_name] = self._interpreter._get_tensor_details( # pylint: disable=protected-access + tensor_index, self._subgraph_index) + return result + + def get_output_details(self): + """Gets output tensor details. + + Returns: + A dictionary from input name to tensor details where each item is a + dictionary with details about an output tensor. The dictionary contains + the same fields as described for `get_input_details()`. + """ + result = {} + for output_name, tensor_index in self._outputs: + result[output_name] = self._interpreter._get_tensor_details( # pylint: disable=protected-access + tensor_index, self._subgraph_index) + return result + + +@_tf_export('lite.experimental.OpResolverType') +@enum.unique +class OpResolverType(enum.Enum): + """Different types of op resolvers for Tensorflow Lite. + + * `AUTO`: Indicates the op resolver that is chosen by default in TfLite + Python, which is the "BUILTIN" as described below. + * `BUILTIN`: Indicates the op resolver for built-in ops with optimized kernel + implementation. + * `BUILTIN_REF`: Indicates the op resolver for built-in ops with reference + kernel implementation. It's generally used for testing and debugging. + * `BUILTIN_WITHOUT_DEFAULT_DELEGATES`: Indicates the op resolver for + built-in ops with optimized kernel implementation, but it will disable + the application of default TfLite delegates (like the XNNPACK delegate) to + the model graph. Generally this should not be used unless there are issues + with the default configuration. + """ + # Corresponds to an op resolver chosen by default in TfLite Python. + AUTO = 0 + + # Corresponds to tflite::ops::builtin::BuiltinOpResolver in C++. + BUILTIN = 1 + + # Corresponds to tflite::ops::builtin::BuiltinRefOpResolver in C++. + BUILTIN_REF = 2 + + # Corresponds to + # tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates in C++. + BUILTIN_WITHOUT_DEFAULT_DELEGATES = 3 + + +def _get_op_resolver_id(op_resolver_type=OpResolverType.AUTO): + """Get a integer identifier for the op resolver.""" + + # Note: the integer identifier value needs to be same w/ op resolver ids + # defined in interpreter_wrapper/interpreter_wrapper.cc. + return { + # Note AUTO and BUILTIN currently share the same identifier. + OpResolverType.AUTO: 1, + OpResolverType.BUILTIN: 1, + OpResolverType.BUILTIN_REF: 2, + OpResolverType.BUILTIN_WITHOUT_DEFAULT_DELEGATES: 3 + }.get(op_resolver_type, None) + + +@_tf_export('lite.Interpreter') +class Interpreter: + """Interpreter interface for running TensorFlow Lite models. + + Models obtained from `TfLiteConverter` can be run in Python with + `Interpreter`. + + As an example, let's generate a simple Keras model and convert it to TFLite + (`TfLiteConverter` also supports other input formats with `from_saved_model` + and `from_concrete_function`) + + >>> x = np.array([[1.], [2.]]) + >>> y = np.array([[2.], [4.]]) + >>> model = tf.keras.models.Sequential([ + ... tf.keras.layers.Dropout(0.2), + ... tf.keras.layers.Dense(units=1, input_shape=[1]) + ... ]) + >>> model.compile(optimizer='sgd', loss='mean_squared_error') + >>> model.fit(x, y, epochs=1) + >>> converter = tf.lite.TFLiteConverter.from_keras_model(model) + >>> tflite_model = converter.convert() + + `tflite_model` can be saved to a file and loaded later, or directly into the + `Interpreter`. Since TensorFlow Lite pre-plans tensor allocations to optimize + inference, the user needs to call `allocate_tensors()` before any inference. + + >>> interpreter = tf.lite.Interpreter(model_content=tflite_model) + >>> interpreter.allocate_tensors() # Needed before execution! + + Sample execution: + + >>> output = interpreter.get_output_details()[0] # Model has single output. + >>> input = interpreter.get_input_details()[0] # Model has single input. + >>> input_data = tf.constant(1., shape=[1, 1]) + >>> interpreter.set_tensor(input['index'], input_data) + >>> interpreter.invoke() + >>> interpreter.get_tensor(output['index']).shape + (1, 1) + + Use `get_signature_runner()` for a more user-friendly inference API. + """ + + def __init__( + self, + model_path=None, + model_content=None, + experimental_delegates=None, + num_threads=None, + experimental_op_resolver_type=OpResolverType.AUTO, + experimental_preserve_all_tensors=False, + experimental_disable_delegate_clustering=False, + experimental_default_delegate_latest_features=False, + ): + """Constructor. + + Args: + model_path: Path to TF-Lite Flatbuffer file. + model_content: Content of model. + experimental_delegates: Experimental. Subject to change. List of + [TfLiteDelegate](https://www.tensorflow.org/lite/performance/delegates) + objects returned by lite.load_delegate(). + num_threads: Sets the number of threads used by the interpreter and + available to CPU kernels. If not set, the interpreter will use an + implementation-dependent default number of threads. Currently, only a + subset of kernels, such as conv, support multi-threading. num_threads + should be >= 1. + experimental_op_resolver_type: The op resolver used by the interpreter. It + must be an instance of OpResolverType. By default, we use the built-in + op resolver which corresponds to tflite::ops::builtin::BuiltinOpResolver + in C++. + experimental_preserve_all_tensors: If true, then intermediate tensors used + during computation are preserved for inspection, and if the passed op + resolver type is AUTO or BUILTIN, the type will be changed to + BUILTIN_WITHOUT_DEFAULT_DELEGATES so that no Tensorflow Lite default + delegates are applied. If false, getting intermediate tensors could + result in undefined values or None, especially when the graph is + successfully modified by the Tensorflow Lite default delegate. + experimental_disable_delegate_clustering: If true, don't perform delegate + clustering during delegate graph partitioning phase. Disabling delegate + clustering will make the execution order of ops respect the + explicitly-inserted control dependencies in the graph (inserted via + `with tf.control_dependencies()`) since the TF Lite converter will drop + control dependencies by default. Most users shouldn't turn this flag to + True if they don't insert explicit control dependencies or the graph + execution order is expected. For automatically inserted control + dependencies (with `tf.Variable`, `tf.Print` etc), the user doesn't need + to turn this flag to True since they are respected by default. Note that + this flag is currently experimental, and it might be removed/updated if + the TF Lite converter doesn't drop such control dependencies in the + model. Default is False. + experimental_default_delegate_latest_features: If true, default delegates + may enable all flag protected features. Default is False; + + Raises: + ValueError: If the interpreter was unable to create. + """ + if not hasattr(self, '_custom_op_registerers'): + self._custom_op_registerers = [] + + actual_resolver_type = experimental_op_resolver_type + if experimental_preserve_all_tensors and ( + experimental_op_resolver_type == OpResolverType.AUTO or + experimental_op_resolver_type == OpResolverType.BUILTIN): + actual_resolver_type = OpResolverType.BUILTIN_WITHOUT_DEFAULT_DELEGATES + op_resolver_id = _get_op_resolver_id(actual_resolver_type) + if op_resolver_id is None: + raise ValueError('Unrecognized passed in op resolver type: {}'.format( + experimental_op_resolver_type)) + + if num_threads is not None: + if not isinstance(num_threads, int): + raise ValueError('type of num_threads should be int') + if num_threads < 1: + raise ValueError('num_threads should >= 1') + + if model_path and not model_content: + custom_op_registerers_by_name = [ + x for x in self._custom_op_registerers if isinstance(x, str) + ] + custom_op_registerers_by_func = [ + x for x in self._custom_op_registerers if not isinstance(x, str) + ] + self._interpreter = _interpreter_wrapper.CreateWrapperFromFile( + model_path, + op_resolver_id, + custom_op_registerers_by_name, + custom_op_registerers_by_func, + experimental_preserve_all_tensors, + experimental_disable_delegate_clustering, + int(num_threads or 1), + experimental_default_delegate_latest_features, + ) + if not self._interpreter: + raise ValueError('Failed to open {}'.format(model_path)) + elif model_content and not model_path: + custom_op_registerers_by_name = [ + x for x in self._custom_op_registerers if isinstance(x, str) + ] + custom_op_registerers_by_func = [ + x for x in self._custom_op_registerers if not isinstance(x, str) + ] + # Take a reference, so the pointer remains valid. + # Since python strings are immutable then PyString_XX functions + # will always return the same pointer. + self._model_content = model_content + self._interpreter = _interpreter_wrapper.CreateWrapperFromBuffer( + model_content, + op_resolver_id, + custom_op_registerers_by_name, + custom_op_registerers_by_func, + experimental_preserve_all_tensors, + experimental_disable_delegate_clustering, + int(num_threads or 1), + experimental_default_delegate_latest_features, + ) + elif not model_content and not model_path: + raise ValueError('`model_path` or `model_content` must be specified.') + else: + raise ValueError('Can\'t both provide `model_path` and `model_content`') + + # Each delegate is a wrapper that owns the delegates that have been loaded + # as plugins. The interpreter wrapper will be using them, but we need to + # hold them in a list so that the lifetime is preserved at least as long as + # the interpreter wrapper. + self._delegates = [] + if experimental_delegates: + self._delegates = experimental_delegates + for delegate in self._delegates: + self._interpreter.ModifyGraphWithDelegate( + delegate._get_native_delegate_pointer()) # pylint: disable=protected-access + self._signature_defs = self.get_signature_list() + + self._metrics = metrics.TFLiteMetrics() + self._metrics.increase_counter_interpreter_creation() + + def __del__(self): + # Must make sure the interpreter is destroyed before things that + # are used by it like the delegates. NOTE this only works on CPython + # probably. + # TODO(b/136468453): Remove need for __del__ ordering needs of CPython + # by using explicit closes(). See implementation of Interpreter __del__. + self._interpreter = None + self._delegates = None + + def allocate_tensors(self): + self._ensure_safe() + return self._interpreter.AllocateTensors() + + def _safe_to_run(self): + """Returns true if there exist no numpy array buffers. + + This means it is safe to run tflite calls that may destroy internally + allocated memory. This works, because in the wrapper.cc we have made + the numpy base be the self._interpreter. + """ + # NOTE, our tensor() call in cpp will use _interpreter as a base pointer. + # If this environment is the only _interpreter, then the ref count should be + # 2 (1 in self and 1 in temporary of sys.getrefcount). + return sys.getrefcount(self._interpreter) == 2 + + def _ensure_safe(self): + """Makes sure no numpy arrays pointing to internal buffers are active. + + This should be called from any function that will call a function on + _interpreter that may reallocate memory e.g. invoke(), ... + + Raises: + RuntimeError: If there exist numpy objects pointing to internal memory + then we throw. + """ + if not self._safe_to_run(): + raise RuntimeError("""There is at least 1 reference to internal data + in the interpreter in the form of a numpy array or slice. Be sure to + only hold the function returned from tensor() if you are using raw + data access.""") + + # Experimental and subject to change + def _get_op_details(self, op_index): + """Gets a dictionary with arrays of ids for tensors involved with an op. + + Args: + op_index: Operation/node index of node to query. + + Returns: + a dictionary containing the index, op name, and arrays with lists of the + indices and types for the inputs and outputs of the op/nodes. + """ + operand_types = [ + self._get_tensor_details(tensor_idx, subgraph_index=0)['dtype'] + for tensor_idx in self._interpreter.NodeInputs(op_index) + if tensor_idx != -1 + ] + result_types = [ + self._get_tensor_details(tensor_idx, subgraph_index=0)['dtype'] + for tensor_idx in self._interpreter.NodeOutputs(op_index) + if tensor_idx != -1 + ] + details = { + 'index': int(op_index), + 'op_name': self._interpreter.NodeName(op_index), + 'inputs': self._interpreter.NodeInputs(op_index), + 'outputs': self._interpreter.NodeOutputs(op_index), + 'operand_types': operand_types, + 'result_types': result_types, + } + return details + + def _get_tensor_details(self, tensor_index, subgraph_index): + """Gets tensor details. + + Args: + tensor_index: Tensor index of tensor to query. + subgraph_index: Index of the subgraph. + + Returns: + A dictionary containing the following fields of the tensor: + 'name': The tensor name. + 'index': The tensor index in the subgraph. + 'shape': The shape of the tensor. + 'quantization': Deprecated, use 'quantization_parameters'. This field + only works for per-tensor quantization, whereas + 'quantization_parameters' work in all cases. + 'quantization_parameters': The parameters used to quantize the tensor: + 'scales': List of scales (one if per-tensor quantization) + 'zero_points': List of zero_points (one if per-tensor quantization) + 'quantized_dimension': Specifies the dimension of per-axis + quantization, in the case of multiple scales/zero_points. + + Raises: + ValueError: If tensor_index is invalid. + """ + tensor_index = int(tensor_index) + subgraph_index = int(subgraph_index) + tensor_name = self._interpreter.TensorName(tensor_index, subgraph_index) + tensor_size = self._interpreter.TensorSize(tensor_index, subgraph_index) + tensor_size_signature = self._interpreter.TensorSizeSignature( + tensor_index, subgraph_index) + tensor_type = self._interpreter.TensorType(tensor_index, subgraph_index) + tensor_quantization = self._interpreter.TensorQuantization( + tensor_index, subgraph_index) + tensor_quantization_params = self._interpreter.TensorQuantizationParameters( + tensor_index, subgraph_index) + tensor_sparsity_params = self._interpreter.TensorSparsityParameters( + tensor_index, subgraph_index) + + if not tensor_type: + raise ValueError('Could not get tensor details') + + details = { + 'name': tensor_name, + 'index': tensor_index, + 'shape': tensor_size, + 'shape_signature': tensor_size_signature, + 'dtype': tensor_type, + 'quantization': tensor_quantization, + 'quantization_parameters': { + 'scales': tensor_quantization_params[0], + 'zero_points': tensor_quantization_params[1], + 'quantized_dimension': tensor_quantization_params[2], + }, + 'sparsity_parameters': tensor_sparsity_params, + } + + return details + + # Experimental and subject to change + def _get_ops_details(self): + """Gets op details for every node. + + Returns: + A list of dictionaries containing arrays with lists of tensor ids for + tensors involved in the op. + """ + return [ + self._get_op_details(idx) for idx in range(self._interpreter.NumNodes()) + ] + + def num_subgraphs(self): + """Returns the number of subgraphs in the model.""" + return self._interpreter.NumSubgraphs() + + def get_tensor_details(self, subgraph_index=0): + """Gets tensor details for every tensor with valid tensor details from a subgraph. + + Tensors where required information about the tensor is not found are not + added to the list. This includes temporary tensors without a name. + + Args: + subgraph_index: Index of the subgraph to fetch the tensor. + + Returns: + A list of dictionaries containing tensor information. + """ + tensor_details = [] + num_subgraphs = self._interpreter.NumSubgraphs() + if subgraph_index < 0 or subgraph_index >= num_subgraphs: + raise ValueError( + f'subgraph_index is out of range: {subgraph_index} for the model,' + f' which has {num_subgraphs} subgraphs.' + ) + + for idx in range(self._interpreter.NumTensors(subgraph_index)): + try: + tensor_details.append(self._get_tensor_details(idx, subgraph_index)) + except ValueError: + pass + + return tensor_details + + def get_input_details(self): + """Gets model input tensor details. + + Returns: + A list in which each item is a dictionary with details about + an input tensor. Each dictionary contains the following fields + that describe the tensor: + + + `name`: The tensor name. + + `index`: The tensor index in the interpreter. + + `shape`: The shape of the tensor. + + `shape_signature`: Same as `shape` for models with known/fixed shapes. + If any dimension sizes are unknown, they are indicated with `-1`. + + `dtype`: The numpy data type (such as `np.int32` or `np.uint8`). + + `quantization`: Deprecated, use `quantization_parameters`. This field + only works for per-tensor quantization, whereas + `quantization_parameters` works in all cases. + + `quantization_parameters`: A dictionary of parameters used to quantize + the tensor: + ~ `scales`: List of scales (one if per-tensor quantization). + ~ `zero_points`: List of zero_points (one if per-tensor quantization). + ~ `quantized_dimension`: Specifies the dimension of per-axis + quantization, in the case of multiple scales/zero_points. + + `sparsity_parameters`: A dictionary of parameters used to encode a + sparse tensor. This is empty if the tensor is dense. + """ + return [ + self._get_tensor_details(i, subgraph_index=0) + for i in self._interpreter.InputIndices() + ] + + def set_tensor(self, tensor_index, value): + """Sets the value of the input tensor. + + Note this copies data in `value`. + + If you want to avoid copying, you can use the `tensor()` function to get a + numpy buffer pointing to the input buffer in the tflite interpreter. + + Args: + tensor_index: Tensor index of tensor to set. This value can be gotten from + the 'index' field in get_input_details. + value: Value of tensor to set. + + Raises: + ValueError: If the interpreter could not set the tensor. + """ + self._interpreter.SetTensor(tensor_index, value) + + def resize_tensor_input(self, input_index, tensor_size, strict=False): + """Resizes an input tensor. + + Args: + input_index: Tensor index of input to set. This value can be gotten from + the 'index' field in get_input_details. + tensor_size: The tensor_shape to resize the input to. + strict: Only unknown dimensions can be resized when `strict` is True. + Unknown dimensions are indicated as `-1` in the `shape_signature` + attribute of a given tensor. (default False) + + Raises: + ValueError: If the interpreter could not resize the input tensor. + + Usage: + ``` + interpreter = Interpreter(model_content=tflite_model) + interpreter.resize_tensor_input(0, [num_test_images, 224, 224, 3]) + interpreter.allocate_tensors() + interpreter.set_tensor(0, test_images) + interpreter.invoke() + ``` + """ + self._ensure_safe() + # `ResizeInputTensor` now only accepts int32 numpy array as `tensor_size + # parameter. + tensor_size = np.array(tensor_size, dtype=np.int32) + self._interpreter.ResizeInputTensor(input_index, tensor_size, strict) + + def get_output_details(self): + """Gets model output tensor details. + + Returns: + A list in which each item is a dictionary with details about + an output tensor. The dictionary contains the same fields as + described for `get_input_details()`. + """ + return [ + self._get_tensor_details(i, subgraph_index=0) + for i in self._interpreter.OutputIndices() + ] + + def get_signature_list(self): + """Gets the list of SignatureDefs in the model. + + Example, + ``` + signatures = interpreter.get_signature_list() + print(signatures) + + # { + # 'add': {'inputs': ['x', 'y'], 'outputs': ['output_0']} + # } + + Then using the names in the signature list you can get a callable from + get_signature_runner(). + ``` + + Returns: + A list of SignatureDef details in a dictionary structure. + It is keyed on the SignatureDef method name, and the value holds + a dictionary of inputs and outputs. + """ + full_signature_defs = self._interpreter.GetSignatureDefs() + for _, signature_def in full_signature_defs.items(): + signature_def['inputs'] = list(signature_def['inputs'].keys()) + signature_def['outputs'] = list(signature_def['outputs'].keys()) + return full_signature_defs + + def _get_full_signature_list(self): + """Gets list of SignatureDefs in the model. + + Example, + ``` + signatures = interpreter._get_full_signature_list() + print(signatures) + + # { + # 'add': {'inputs': {'x': 1, 'y': 0}, 'outputs': {'output_0': 4}} + # } + + Then using the names in the signature list you can get a callable from + get_signature_runner(). + ``` + + Returns: + A list of SignatureDef details in a dictionary structure. + It is keyed on the SignatureDef method name, and the value holds + dictionary of inputs and outputs. + """ + return self._interpreter.GetSignatureDefs() + + def get_signature_runner(self, signature_key=None): + """Gets callable for inference of specific SignatureDef. + + Example usage, + ``` + interpreter = tf.lite.Interpreter(model_content=tflite_model) + interpreter.allocate_tensors() + fn = interpreter.get_signature_runner('div_with_remainder') + output = fn(x=np.array([3]), y=np.array([2])) + print(output) + # { + # 'quotient': array([1.], dtype=float32) + # 'remainder': array([1.], dtype=float32) + # } + ``` + + None can be passed for signature_key if the model has a single Signature + only. + + All names used are these specific SignatureDef names. + + + Args: + signature_key: Signature key for the SignatureDef, it can be None if and + only if the model has a single SignatureDef. The Default value is None. + + Returns: + This returns a callable that can run inference for SignatureDef defined + by argument 'signature_key'. + The callable will take key arguments corresponding to the arguments of the + SignatureDef, that should have numpy values. + The callable will return dictionary that maps from output names to numpy + values of the computed results. + + Raises: + ValueError: If passed signature_key is invalid. + """ + if signature_key is None: + if len(self._signature_defs) != 1: + raise ValueError( + 'SignatureDef signature_key is None and model has {0} Signatures. ' + 'None is only allowed when the model has 1 SignatureDef'.format( + len(self._signature_defs))) + else: + signature_key = next(iter(self._signature_defs)) + return SignatureRunner(interpreter=self, signature_key=signature_key) + + def get_tensor(self, tensor_index, subgraph_index=0): + """Gets the value of the output tensor (get a copy). + + If you wish to avoid the copy, use `tensor()`. This function cannot be used + to read intermediate results. + + Args: + tensor_index: Tensor index of tensor to get. This value can be gotten from + the 'index' field in get_output_details. + subgraph_index: Index of the subgraph to fetch the tensor. Default value + is 0, which means to fetch from the primary subgraph. + + Returns: + a numpy array. + """ + return self._interpreter.GetTensor(tensor_index, subgraph_index) + + def tensor(self, tensor_index): + """Returns function that gives a numpy view of the current tensor buffer. + + This allows reading and writing to these tensors w/o copies. This more + closely mirrors the C++ Interpreter class interface's tensor() member, hence + the name. Be careful not to hold these output references through calls + to `allocate_tensors()` and `invoke()`. This function cannot be used to read + intermediate results. + + Usage: + + ``` + interpreter.allocate_tensors() + input = interpreter.tensor(interpreter.get_input_details()[0]["index"]) + output = interpreter.tensor(interpreter.get_output_details()[0]["index"]) + for i in range(10): + input().fill(3.) + interpreter.invoke() + print("inference %s" % output()) + ``` + + Notice how this function avoids making a numpy array directly. This is + because it is important to not hold actual numpy views to the data longer + than necessary. If you do, then the interpreter can no longer be invoked, + because it is possible the interpreter would resize and invalidate the + referenced tensors. The NumPy API doesn't allow any mutability of the + underlying buffers. + + WRONG: + + ``` + input = interpreter.tensor(interpreter.get_input_details()[0]["index"])() + output = interpreter.tensor(interpreter.get_output_details()[0]["index"])() + interpreter.allocate_tensors() # This will throw RuntimeError + for i in range(10): + input.fill(3.) + interpreter.invoke() # this will throw RuntimeError since input, output + ``` + + Args: + tensor_index: Tensor index of tensor to get. This value can be gotten from + the 'index' field in get_output_details. + + Returns: + A function that can return a new numpy array pointing to the internal + TFLite tensor state at any point. It is safe to hold the function forever, + but it is not safe to hold the numpy array forever. + """ + return lambda: self._interpreter.tensor(self._interpreter, tensor_index) + + def invoke(self): + """Invoke the interpreter. + + Be sure to set the input sizes, allocate tensors and fill values before + calling this. Also, note that this function releases the GIL so heavy + computation can be done in the background while the Python interpreter + continues. No other function on this object should be called while the + invoke() call has not finished. + + Raises: + ValueError: When the underlying interpreter fails raise ValueError. + """ + self._ensure_safe() + self._interpreter.Invoke() + + def reset_all_variables(self): + return self._interpreter.ResetVariableTensors() + + # Experimental and subject to change. + def _native_handle(self): + """Returns a pointer to the underlying tflite::Interpreter instance. + + This allows extending tflite.Interpreter's functionality in a custom C++ + function. Consider how that may work in a custom pybind wrapper: + + m.def("SomeNewFeature", ([](py::object handle) { + auto* interpreter = + reinterpret_cast(handle.cast()); + ... + })) + + and corresponding Python call: + + SomeNewFeature(interpreter.native_handle()) + + Note: This approach is fragile. Users must guarantee the C++ extension build + is consistent with the tflite.Interpreter's underlying C++ build. + """ + return self._interpreter.interpreter() + + +class InterpreterWithCustomOps(Interpreter): + """Interpreter interface for TensorFlow Lite Models that accepts custom ops. + + The interface provided by this class is experimental and therefore not exposed + as part of the public API. + + Wraps the tf.lite.Interpreter class and adds the ability to load custom ops + by providing the names of functions that take a pointer to a BuiltinOpResolver + and add a custom op. + """ + + def __init__(self, custom_op_registerers=None, **kwargs): + """Constructor. + + Args: + custom_op_registerers: List of str (symbol names) or functions that take a + pointer to a MutableOpResolver and register a custom op. When passing + functions, use a pybind function that takes a uintptr_t that can be + recast as a pointer to a MutableOpResolver. + **kwargs: Additional arguments passed to Interpreter. + + Raises: + ValueError: If the interpreter was unable to create. + """ + self._custom_op_registerers = custom_op_registerers or [] + super(InterpreterWithCustomOps, self).__init__(**kwargs) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/interpreter_wrapper/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/interpreter_wrapper/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/interpreter_wrapper/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/interpreter_wrapper/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2581c9dd3d91dfb894d3c38b311ffad469e88d2d Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/interpreter_wrapper/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/interpreter_wrapper/_pywrap_tensorflow_interpreter_wrapper.pyi b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/interpreter_wrapper/_pywrap_tensorflow_interpreter_wrapper.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5a2099d01c9e5f1595ac3ebb749d88ac57b78f8a --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/interpreter_wrapper/_pywrap_tensorflow_interpreter_wrapper.pyi @@ -0,0 +1,49 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from typing import Any + +class InterpreterWrapper: + def __init__(self, *args, **kwargs) -> None: ... + def AllocateTensors(self, subgraph_index: int = ...) -> object: ... + def GetSignatureDefs(self) -> object: ... + def GetSubgraphIndexFromSignature(self, arg0: str) -> object: ... + def GetTensor(self, tensor_index: int, subgraph_index: int = ...) -> object: ... + def InputIndices(self) -> object: ... + def Invoke(self, subgraph_index: int = ...) -> object: ... + def ModifyGraphWithDelegate(self, arg0: int) -> object: ... + def NodeInputs(self, arg0: int) -> object: ... + def NodeName(self, arg0: int) -> str: ... + def NodeOutputs(self, arg0: int) -> object: ... + def NumNodes(self) -> int: ... + def NumSubgraphs(self) -> int: ... + def NumTensors(self, arg0: int) -> int: ... + def OutputIndices(self) -> object: ... + def ResetVariableTensors(self) -> object: ... + def ResizeInputTensor(self, i: int, value: object, strict: bool, subgraph_index: int = ...) -> object: ... + def SetNumThreads(self, arg0: int) -> object: ... + def SetTensor(self, i: int, value: object, subgraph_index: int = ...) -> object: ... + def TensorName(self, arg0: int, arg1: int) -> str: ... + def TensorQuantization(self, arg0: int, arg1: int) -> object: ... + def TensorQuantizationParameters(self, arg0: int, arg1: int) -> object: ... + def TensorSize(self, arg0: int, arg1: int) -> object: ... + def TensorSizeSignature(self, arg0: int, arg1: int) -> object: ... + def TensorSparsityParameters(self, arg0: int, arg1: int) -> object: ... + def TensorType(self, arg0: int, arg1: int) -> object: ... + def interpreter(self) -> int: ... + def tensor(self, base_object: object, tensor_index: int, subgraph_index: int = ...) -> object: ... + +def CreateWrapperFromBuffer(*args, **kwargs) -> Any: ... +def CreateWrapperFromFile(*args, **kwargs) -> Any: ... diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/interpreter_wrapper/_pywrap_tensorflow_interpreter_wrapper.so b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/interpreter_wrapper/_pywrap_tensorflow_interpreter_wrapper.so new file mode 100644 index 0000000000000000000000000000000000000000..bcbef0c9ae409368710f75daba6dd52e64ca189c --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/interpreter_wrapper/_pywrap_tensorflow_interpreter_wrapper.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7d1b046049f4bba4c4a141965c6edd1fe25dcd343363ee528189a59b93a4d7b +size 6379712 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/lite.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/lite.py new file mode 100644 index 0000000000000000000000000000000000000000..87cb2bdf8969f701bcf0d095fd3f0c1436029bdc --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/lite.py @@ -0,0 +1,3427 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""TensorFlow Lite tooling helper functionality.""" + +import enum +import functools +import gc +import pprint +import shutil +import sys +import tempfile +import time +import warnings + +from absl import logging + +from google.protobuf import text_format as _text_format +from google.protobuf.message import DecodeError +from tensorflow.compiler.mlir.quantization.stablehlo import quantization_config_pb2 as qc +from tensorflow.compiler.mlir.quantization.tensorflow.python import representative_dataset as rd +from tensorflow.core.framework import graph_pb2 as _graph_pb2 +from tensorflow.lite.experimental.microfrontend.python.ops import audio_microfrontend_op # pylint: disable=unused-import +from tensorflow.lite.profiling.proto import profiling_info_pb2 # pylint: disable=unused-import +from tensorflow.lite.python import conversion_metadata_schema_py_generated as conversion_metadata_fb +from tensorflow.lite.python import lite_constants as constants +from tensorflow.lite.python.convert import build_conversion_flags as _build_conversion_flags +from tensorflow.lite.python.convert import convert_graphdef as _convert_graphdef +from tensorflow.lite.python.convert import convert_graphdef_with_arrays as _convert_graphdef_with_arrays +from tensorflow.lite.python.convert import convert_jax_hlo as _convert_jax_hlo +from tensorflow.lite.python.convert import convert_saved_model as _convert_saved_model +from tensorflow.lite.python.convert import ConverterError # pylint: disable=unused-import +from tensorflow.lite.python.convert import deduplicate_readonly_buffers as _deduplicate_readonly_buffers +from tensorflow.lite.python.convert import mlir_quantize as _mlir_quantize +from tensorflow.lite.python.convert import mlir_sparsify as _mlir_sparsify +from tensorflow.lite.python.convert import OpsSet +from tensorflow.lite.python.convert import toco_convert # pylint: disable=unused-import +from tensorflow.lite.python.convert_phase import Component +from tensorflow.lite.python.convert_phase import convert_phase +from tensorflow.lite.python.convert_phase import SubComponent +from tensorflow.lite.python.convert_saved_model import freeze_saved_model as _freeze_saved_model +from tensorflow.lite.python.interpreter import Interpreter # pylint: disable=unused-import +from tensorflow.lite.python.interpreter import load_delegate # pylint: disable=unused-import +from tensorflow.lite.python.interpreter import OpResolverType # pylint: disable=unused-import +from tensorflow.lite.python.metrics import metrics +from tensorflow.lite.python.op_hint import convert_op_hints_to_stubs # pylint: disable=unused-import +from tensorflow.lite.python.op_hint import is_ophint_converted as _is_ophint_converted +from tensorflow.lite.python.op_hint import OpHint # pylint: disable=unused-import +from tensorflow.lite.python.optimize import calibrator as _calibrator +from tensorflow.lite.python.util import _jit +from tensorflow.lite.python.util import build_debug_info_func as _build_debug_info_func +from tensorflow.lite.python.util import convert_debug_info_func as _convert_debug_info_func +from tensorflow.lite.python.util import freeze_graph as _freeze_graph +from tensorflow.lite.python.util import get_debug_info as _get_debug_info +from tensorflow.lite.python.util import get_grappler_config as _get_grappler_config +from tensorflow.lite.python.util import get_model_hash as _get_model_hash +from tensorflow.lite.python.util import get_save_spec as _get_save_spec +from tensorflow.lite.python.util import get_sparsity_modes as _get_sparsity_modes +from tensorflow.lite.python.util import get_tensor_name as _get_tensor_name +from tensorflow.lite.python.util import get_tensors_from_tensor_names as _get_tensors_from_tensor_names +from tensorflow.lite.python.util import get_tf_type_name as _get_tf_type_name +from tensorflow.lite.python.util import is_frozen_graph as _is_frozen_graph +from tensorflow.lite.python.util import model_input_signature as _model_input_signature +from tensorflow.lite.python.util import modify_model_io_type as _modify_model_io_type +from tensorflow.lite.python.util import populate_conversion_metadata as _populate_conversion_metadata +from tensorflow.lite.python.util import run_graph_optimizations as _run_graph_optimizations +from tensorflow.lite.python.util import set_tensor_shapes as _set_tensor_shapes +from tensorflow.lite.python.util import trace_model_call as _trace_model_call +from tensorflow.lite.tools import flatbuffer_utils +from tensorflow.lite.tools.optimize.debugging.python.debugger import QuantizationDebugger # pylint: disable=unused-import +from tensorflow.lite.tools.optimize.debugging.python.debugger import QuantizationDebugOptions # pylint: disable=unused-import +from tensorflow.python.client import session as _session +from tensorflow.python.eager import context +from tensorflow.python.eager import def_function as _def_function +from tensorflow.python.eager import function as _function +from tensorflow.python.framework import byte_swap_tensor as bst +from tensorflow.python.framework import convert_to_constants as _convert_to_constants +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import tensor_spec +from tensorflow.python.framework import versions +from tensorflow.python.framework.errors_impl import NotFoundError as _NotFoundError +from tensorflow.python.framework.importer import import_graph_def as _import_graph_def +from tensorflow.python.platform import gfile +from tensorflow.python.saved_model import loader_impl as _loader_impl +from tensorflow.python.saved_model import save as _save +from tensorflow.python.saved_model import save_options as _save_options +from tensorflow.python.saved_model import signature_constants as _signature_constants +from tensorflow.python.saved_model import tag_constants as _tag_constants +from tensorflow.python.saved_model.load import load as _load +from tensorflow.python.saved_model.loader_impl import parse_saved_model_with_debug_info as _parse_saved_model_with_debug_info +from tensorflow.python.util import deprecation as _deprecation +from tensorflow.python.util import keras_deps +from tensorflow.python.util import nest +from tensorflow.python.util.tf_export import tf_export as _tf_export + + +@_tf_export("lite.Optimize") +class Optimize(enum.Enum): + """Enum defining the optimizations to apply when generating a tflite model. + + DEFAULT + The default optimization strategy that enables post-training quantization. + The type of post-training quantization that will be used is dependent on + the other converter options supplied. Refer to the + [documentation](/lite/performance/post_training_quantization) for further + information on the types available and how to use them. + + OPTIMIZE_FOR_SIZE + Deprecated. Does the same as DEFAULT. + + OPTIMIZE_FOR_LATENCY + Deprecated. Does the same as DEFAULT. + + EXPERIMENTAL_SPARSITY + Experimental flag, subject to change. + + Enable optimization by taking advantage of the sparse model weights + trained with pruning. + + The converter will inspect the sparsity pattern of the model weights and + do its best to improve size and latency. + The flag can be used alone to optimize float32 models with sparse weights. + It can also be used together with the DEFAULT optimization mode to + optimize quantized models with sparse weights. + """ + + # Default optimization strategy that quantizes model weights. Enhanced + # optimizations are gained by providing a representative dataset that + # quantizes biases and activations as well. + # Converter will do its best to reduce size and latency, while minimizing + # the loss in accuracy. + DEFAULT = "DEFAULT" + + # Deprecated. Does the same as DEFAULT. + OPTIMIZE_FOR_SIZE = "OPTIMIZE_FOR_SIZE" + + # Deprecated. Does the same as DEFAULT. + OPTIMIZE_FOR_LATENCY = "OPTIMIZE_FOR_LATENCY" + + # Experimental flag, subject to change. + # Enable optimization by taking advantage of the sparse model weights trained + # with pruning. + # + # The converter will inspect the sparsity pattern of the model weights and do + # its best to improve size and latency. + # The flag can be used alone to optimize float32 models with sparse weights. + # It can also be used together with the DEFAULT optimization mode to optimize + # quantized models with sparse weights. + EXPERIMENTAL_SPARSITY = "EXPERIMENTAL_SPARSITY" + + def __str__(self): + return str(self.value) + + +# TODO(b/198099651): move converter implementation out of lite.py +@_tf_export("lite.RepresentativeDataset") +class RepresentativeDataset: + """Representative dataset used to optimize the model. + + This is a generator function that provides a small dataset to calibrate or + estimate the range, i.e, (min, max) of all floating-point arrays in the model + (such as model input, activation outputs of intermediate layers, and model + output) for quantization. Usually, this is a small subset of a few hundred + samples randomly chosen, in no particular order, from the training or + evaluation dataset. + """ + + def __init__(self, input_gen): + """Creates a representative dataset. + + Args: + input_gen: A generator function that generates input samples for the model + and has the same order, type and shape as the inputs to the model. + Usually, this is a small subset of a few hundred samples randomly + chosen, in no particular order, from the training or evaluation dataset. + """ + self.input_gen = input_gen + + +@_tf_export("lite.TargetSpec") +class TargetSpec: + """Specification of target device used to optimize the model. + + Attributes: + supported_ops: Experimental flag, subject to change. Set of `tf.lite.OpsSet` + options, where each option represents a set of operators supported by the + target device. (default {tf.lite.OpsSet.TFLITE_BUILTINS})) + supported_types: Set of `tf.dtypes.DType` data types supported on the target + device. If initialized, optimization might be driven by the smallest type + in this set. (default set()) + experimental_select_user_tf_ops: Experimental flag, subject to change. Set + of user's TensorFlow operators' names that are required in the TensorFlow + Lite runtime. These ops will be exported as select TensorFlow ops in the + model (in conjunction with the tf.lite.OpsSet.SELECT_TF_OPS flag). This is + an advanced feature that should only be used if the client is using TF ops + that may not be linked in by default with the TF ops that are provided + when using the SELECT_TF_OPS path. The client is responsible for linking + these ops into the target runtime. + experimental_supported_backends: Experimental flag, subject to change. Set + containing names of supported backends. Currently only "GPU" is supported, + more options will be available later. + """ + + def __init__( + self, + supported_ops=None, + supported_types=None, + experimental_select_user_tf_ops=None, + experimental_supported_backends=None, + ): + if supported_ops is None: + supported_ops = {OpsSet.TFLITE_BUILTINS} + self.supported_ops = supported_ops + if supported_types is None: + supported_types = set() + self.supported_types = supported_types + if experimental_select_user_tf_ops is None: + experimental_select_user_tf_ops = set() + self.experimental_select_user_tf_ops = experimental_select_user_tf_ops + self.experimental_supported_backends = experimental_supported_backends + self._experimental_custom_op_registerers = [] + # Hint for the supported accumulation type used for inference. Typically + # used for fp16 post-training quantization, where some models can use fp16 + # accumulators instead of the typical fp32 type. + self._experimental_supported_accumulation_type = None + + +class QuantizationMode: + """QuantizationMode determines the quantization type from user options.""" + + def __init__( + self, + optimizations, + target_spec, + representative_dataset, + graph_def, + disable_per_channel=False, + experimental_new_dynamic_range_quantizer=False, + experimental_low_bit_qat=False, + full_integer_quantization_bias_type=None, + experimental_mlir_variable_quantization=False, + ): + self._optimizations = optimizations + for deprecated_optimization in [ + Optimize.OPTIMIZE_FOR_SIZE, + Optimize.OPTIMIZE_FOR_LATENCY, + ]: + if deprecated_optimization in self._optimizations: + logging.warning( + ( + "Optimization option %s is deprecated, please use" + " optimizations=[Optimize.DEFAULT] instead." + ), + deprecated_optimization, + ) + + self._target_spec = target_spec + self._representative_dataset = representative_dataset + self._graph_def = graph_def + if self._is_int8_target_required(): + self._validate_int8_required() + + self.enable_mlir_variable_quantization = ( + experimental_mlir_variable_quantization + ) + if self._is_float16_target_required(): + self._validate_float16_required() + self._disable_per_channel = disable_per_channel + + self._enable_new_dynamic_range_quantizer = ( + experimental_new_dynamic_range_quantizer + ) + # Allow training with lower than 8 bit weights to be converted + # to constants with trained scale. + self._experimental_low_bit_qat = experimental_low_bit_qat + + self._full_integer_quantization_bias_type = ( + full_integer_quantization_bias_type + ) + self._validate_full_integer_quantization_bias_type() + + def is_post_training_int8_only_quantization(self): + return ( + self.is_any_optimization_enabled() + and self._representative_dataset is not None + and not self._is_int16x8_target_required() + and not self.is_allow_float() + and self._is_int8_target_required() + ) + + def is_post_training_int8_quantization_with_float_fallback(self): + return ( + self.is_any_optimization_enabled() + and self._representative_dataset is not None + and not self._is_int16x8_target_required() + and self.is_allow_float() + and self._smallest_supported_type() == _dtypes.int8 + ) + + def is_post_training_int8_quantization(self): + return ( + self.is_post_training_int8_only_quantization() + or self.is_post_training_int8_quantization_with_float_fallback() + ) + + def is_post_training_int16x8_only_quantization(self): + return ( + self.is_any_optimization_enabled() + and self._representative_dataset is not None + and self._is_int16x8_target_required() + and not self.is_allow_float() + ) + + def is_post_training_int16x8_quantization_with_float_fallback(self): + return ( + self.is_any_optimization_enabled() + and self._representative_dataset is not None + and self._is_int16x8_target_required() + and self.is_allow_float() + ) + + def is_post_training_int16x8_quantization(self): + return ( + self.is_post_training_int16x8_only_quantization() + or self.is_post_training_int16x8_quantization_with_float_fallback() + ) + + def is_post_training_integer_quantization(self): + return ( + self.is_post_training_int8_quantization() + or self.is_post_training_int16x8_quantization() + ) + + def is_low_bit_quantize_aware_training(self): + return ( + self.is_any_optimization_enabled() + and self.is_quantization_aware_trained_model() + and self._experimental_low_bit_qat + ) + + def is_quantization_aware_training(self): + return ( + self.is_any_optimization_enabled() + and self.is_quantization_aware_trained_model() + and not self.is_low_bit_quantize_aware_training() + ) + + def is_integer_quantization(self): + return ( + self.is_post_training_integer_quantization() + or self.is_quantization_aware_training() + or self.is_low_bit_quantize_aware_training() + ) + + def is_post_training_dynamic_range_quantization(self): + # Post-training dynamic range quantization is only enabled if post-training + # int8 quantization and training time quantization was not done. + return ( + self.is_any_optimization_enabled() + and self._representative_dataset is None + and not self.is_quantization_aware_trained_model() + and self._smallest_supported_type() == _dtypes.int8 + ) + + def is_post_training_float16_quantization(self): + return ( + self.is_any_optimization_enabled() + and self._smallest_supported_type().size == 2 + and _dtypes.float16 in self._target_spec.supported_types + ) + + def is_bfloat16_quantization(self): + return ( + self.is_any_optimization_enabled() + and self._smallest_supported_type().size == 2 + and _dtypes.bfloat16 in self._target_spec.supported_types + ) + + def activations_type(self): + if self.is_integer_quantization(): + if self._is_int16x8_target_required(): + return _dtypes.int16 + else: + return _dtypes.int8 + else: + return _dtypes.float32 + + def bias_type(self): + if self._full_integer_quantization_bias_type: + return self._full_integer_quantization_bias_type + + if self.activations_type() == _dtypes.int16: + return _dtypes.int64 + elif self.activations_type() == _dtypes.int8: + return _dtypes.int32 + else: + return _dtypes.float32 + + def converter_flags(self, inference_ty=None, inference_input_ty=None): + """Flags to the converter.""" + + if self.is_integer_quantization(): + is_low_bit_qat = self.is_low_bit_quantize_aware_training() + return { + "inference_type": ( + inference_ty + if inference_ty is not None + else self.activations_type() + ), + "inference_input_type": _dtypes.float32, + "post_training_quantize": False, # disable dynamic range quantization + "quantize_to_float16": False, # disable float16 quantization + "disable_infer_tensor_range": is_low_bit_qat, + "use_fake_quant_num_bits": is_low_bit_qat, + "enable_mlir_variable_quantization": ( + self.enable_mlir_variable_quantization + ), + } + elif self.is_post_training_dynamic_range_quantization(): + return { + "inference_type": _dtypes.float32, + "inference_input_type": _dtypes.float32, + "post_training_quantize": True, # enable dynamic range quantization + "quantize_to_float16": False, # disable float16 quantization + # experimental: disable per-channel (per-axis) quantization. + "disable_per_channel_quantization": self._disable_per_channel, + "enable_mlir_dynamic_range_quantizer": ( + self._enable_new_dynamic_range_quantizer + ), + "enable_mlir_variable_quantization": ( + self.enable_mlir_variable_quantization + ), + } + elif self.is_post_training_float16_quantization(): + return { + "inference_type": _dtypes.float32, + "inference_input_type": _dtypes.float32, + "post_training_quantize": True, + "quantize_to_float16": True, # enable float16 quantization + # pylint: disable=protected-access + "accumulation_type": ( + self._target_spec._experimental_supported_accumulation_type + ), + # pylint: enable=protected-access + "allow_bfloat16": self.is_bfloat16_quantization(), + "enable_mlir_dynamic_range_quantizer": ( + self._enable_new_dynamic_range_quantizer + ), + "enable_mlir_variable_quantization": ( + self.enable_mlir_variable_quantization + ), + } + else: + # Note this might still trigger (uint8) quantization to be compatible with + # the old converter. + return { + "inference_type": ( + inference_ty if inference_ty is not None else _dtypes.float32 + ), + "inference_input_type": inference_input_ty, + "post_training_quantize": False, # enable dynamic range quantization + "quantize_to_float16": False, # disable float16 quantization + "allow_bfloat16": self.is_bfloat16_quantization(), + } + + # Below are helpers for the above functions. + + def _validate_int8_required(self): + """Int8 mode requires certain parameters to exist and be compatible.""" + # Validate target_spec attibute. + if set(self._target_spec.supported_ops) == { + OpsSet.TFLITE_BUILTINS_INT8 + } and not ( + set(self._target_spec.supported_types) == set() + or set(self._target_spec.supported_types) == {_dtypes.int8} + ): + raise ValueError( + "As full integer quantization has been enabled by setting " + "`target_spec.supported_ops`={tf.lite.OpsSet.TFLITE_BUILTINS_INT8}, " + "thus `target_spec.supported_types` should be left uninitizalized " + "or set to {tf.int8}." + ) + if set(self._target_spec.supported_types) == {_dtypes.int8}: + self._target_spec.supported_ops = {OpsSet.TFLITE_BUILTINS_INT8} + + # Check if representative_dataset is specified. + if ( + not self._representative_dataset + and not self.is_quantization_aware_training() + ): + raise ValueError( + "For full integer quantization, a " + "`representative_dataset` must be specified." + ) + + # Update represenative dataset to the expected format. + if self._representative_dataset: + if not isinstance(self._representative_dataset, RepresentativeDataset): + self._representative_dataset = RepresentativeDataset( + self._representative_dataset + ) + + def _validate_float16_required(self): + """Float16 mode requires certain parameters to exist and be compatible.""" + if self.enable_mlir_variable_quantization: + raise ValueError( + "`_experimental_variable_quantization` is only supported for full" + " integer quantization." + ) + + def _validate_full_integer_quantization_bias_type(self): + """Validates bias type for full interger quantization.""" + bias_type = self._full_integer_quantization_bias_type + if not bias_type: + return + + if self.activations_type() == _dtypes.float32: + raise ValueError( + "`full_integer_quantization_bias_type` is only supported for full" + " integer quantization." + ) + + if self.activations_type() == _dtypes.int8 and bias_type != _dtypes.int32: + raise ValueError( + "Expected bias type to be `dtypes.int32` for Int8Quant. " + f"Current setting bias type: {bias_type}" + ) + + if ( + self.activations_type() == _dtypes.int16 + and bias_type != _dtypes.int32 + and bias_type != _dtypes.int64 + ): + raise ValueError( + "Expected bias type to be `dtypes.int32` or `dtypes.int64` for " + f"Int16Quant. Current setting bias type: {bias_type}" + ) + + def _is_int8_target_required(self): + return ( + OpsSet.TFLITE_BUILTINS_INT8 in set(self._target_spec.supported_ops) + ) or (set(self._target_spec.supported_types) == set([_dtypes.int8])) + + def _is_int16x8_target_required(self): + return ( + OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 + in set(self._target_spec.supported_ops) + ) + + def is_allow_float(self): + return (OpsSet.TFLITE_BUILTINS in set(self._target_spec.supported_ops)) or ( + OpsSet.SELECT_TF_OPS in set(self._target_spec.supported_ops) + ) + + def _is_float16_target_required(self): + return _dtypes.float16 in self._target_spec.supported_types + + def is_any_optimization_enabled(self): + return bool( + set(self._optimizations).intersection([ + Optimize.OPTIMIZE_FOR_LATENCY, + Optimize.OPTIMIZE_FOR_SIZE, + Optimize.DEFAULT, + ]) + ) + + def _smallest_supported_type(self): + if self._target_spec.supported_types: + return min(self._target_spec.supported_types, key=lambda x: x.size) + else: + # The default smallest supported type is INT8. + return _dtypes.int8 + + def is_quantization_aware_trained_model(self): + """Checks if the graph contains any training-time quantization ops.""" + training_quant_ops = frozenset({ + "FakeQuantWithMinMaxVars", + "FakeQuantWithMinMaxVarsPerChannel", + "FakeQuantWithMinMaxArgs", + "QuantizeAndDequantizeV2", + "QuantizeAndDequantizeV3", + }) + + if self._graph_def: + for node_def in self._graph_def.node: + if node_def.op in training_quant_ops: + return True + for function in self._graph_def.library.function: + for node_def in function.node_def: + if node_def.op in training_quant_ops: + return True + return False + + +class TFLiteConverterBase: + """Converter superclass to share functionality between V1 and V2 converters.""" + + # Stores the original model type temporarily to transmit the information + # from the factory class methods to TFLiteConverterBase init function. + _original_model_type = conversion_metadata_fb.ModelType.NONE + + def __init__(self): + self.optimizations = set() + self.representative_dataset = None + self.target_spec = TargetSpec() + self.allow_custom_ops = False + self.experimental_new_converter = True + self.experimental_new_quantizer = True + self.experimental_enable_resource_variables = True + self._experimental_calibrate_only = False + self._experimental_sparsify_model = False + self._experimental_disable_per_channel = False + self._debug_info = None # contains the stack traces of all the original + # nodes in the `GraphDef` to the converter. + self.saved_model_dir = None + self._saved_model_tags = None + self._saved_model_version = 0 + self._saved_model_exported_names = [] + self._tflite_metrics = metrics.TFLiteConverterMetrics() + self._collected_converter_params = {} + self.unfold_batchmatmul = False + self.legalize_custom_tensor_list_ops = False + self._experimental_lower_tensor_list_ops = True + self._experimental_default_to_single_batch_in_tensor_list_ops = False + self._experimental_unfold_large_splat_constant = False + self._experimental_tf_quantization_mode = None + # If unset, bias:int32 is by default except 16x8 quant. + # For 16x8 quant, bias:int64 is used to prevent any overflow by default. + # The accumulator type will be the same as bias type set by + # full_integer_quantization_bias_type. + self._experimental_full_integer_quantization_bias_type = None + # Provides specs for quantization, whether preset or custom. + self._experimental_quantization_options = None # Deprecated + # Whether to use StableHLO Quantizer instead of TFLite Quantizer. + self.experimental_use_stablehlo_quantizer = False + # Quantization configuration to pass to StableHLO Quantizer. + self.experimental_stablehlo_quantizer_config = None + # Initializes conversion metadata. + self.exclude_conversion_metadata = False + self._metadata = conversion_metadata_fb.ConversionMetadataT() + self._metadata.environment = conversion_metadata_fb.EnvironmentT() + self._metadata.options = conversion_metadata_fb.ConversionOptionsT() + self._metadata.environment.tensorflowVersion = versions.__version__ + self._metadata.environment.modelType = self._get_original_model_type() + self._experimental_enable_dynamic_update_slice = False + self._experimental_preserve_assert_op = False + self._experimental_guarantee_all_funcs_one_use = False + + # When the value is true, the MLIR quantantizer triggers dynamic range + # quantization in MLIR instead of the old quantizer. Used only if + # experimental_new_quantizer is on. + self.experimental_new_dynamic_range_quantizer = True + # Experimental flag to enable low-bit QAT in 8 bit. + self._experimental_low_bit_qat = False + # Experimental flag to add all TF ops (including custom TF ops) to the + # converted model as flex ops. + self._experimental_allow_all_select_tf_ops = False + + self._experimental_variable_quantization = False + self._experimental_disable_fuse_mul_and_fc = False + self._experimental_use_buffer_offset = False + self._experimental_reduce_type_precision = False + self._experimental_qdq_conversion_mode = None + self._experimental_disable_per_channel_quantization_for_dense_layers = False + self._experimental_enable_composite_direct_lowering = False + self.model_origin_framework = constants.UNSET + self.canonicalizing_inf_as_min_max_float = True + + # Debug parameters + self.ir_dump_dir = None + self.ir_dump_pass_regex = None + self.ir_dump_func_regex = None + self.enable_timing = None + self.print_ir_before = None + self.print_ir_after = None + self.print_ir_module_scope = None + self.elide_elementsattrs_if_larger = None + + def _grappler_config(self, optimizers=None): + """Creates a tf.compat.v1.ConfigProto for configuring Grappler. + + Args: + optimizers: List of strings that represents the list of optimizers. + + Returns: + tf.ConfigProto. + """ + if not optimizers: + optimizers = [] + # MLIR converter will take care of constant folding instead of grappler. + if not self.experimental_new_converter: + optimizers.append("constfold") + + is_only_flex_enabled = set([OpsSet.SELECT_TF_OPS]) == set( + self.target_spec.supported_ops + ) + if is_only_flex_enabled: + # The layout optimizer turns NHCW to NCHW. This provides performance + # optimizations when Flex mode is enabled. However, this is not compatible + # with builtin ops. + optimizers.append("layout") + return _get_grappler_config(optimizers) + + def _quantize( + self, + result, + input_type, + output_type, + activations_type, + bias_type, + allow_float, + enable_variable_quantization, + debug_options, + ): + """Quantize the model.""" + # pylint: disable=protected-access + custom_op_registerers_by_name = [ + x + for x in self.target_spec._experimental_custom_op_registerers + if isinstance(x, str) + ] + custom_op_registerers_by_func = [ + x + for x in self.target_spec._experimental_custom_op_registerers + if not isinstance(x, str) + ] + # pylint: enable=protected-access + if not isinstance(self.representative_dataset, RepresentativeDataset): + self.representative_dataset = RepresentativeDataset( + self.representative_dataset + ) + + # Add intermediate tensors to the model if needed. + result = _calibrator.add_intermediate_tensors(result) + calibrate_quantize = _calibrator.Calibrator( + result, custom_op_registerers_by_name, custom_op_registerers_by_func + ) + if self._experimental_calibrate_only or self.experimental_new_quantizer: + calibrated = calibrate_quantize.calibrate( + self.representative_dataset.input_gen + ) + + if self._experimental_calibrate_only: + return calibrated + elif self.experimental_new_quantizer and ( + activations_type != _dtypes.int16 + ): + return _mlir_quantize( + calibrated, + self._experimental_disable_per_channel, + input_data_type=input_type, + output_data_type=output_type, + enable_variable_quantization=enable_variable_quantization, + disable_per_channel_for_dense_layers=self._experimental_disable_per_channel_quantization_for_dense_layers, + debug_options_str=debug_options.SerializeToString(), + ) + else: + return calibrate_quantize.calibrate_and_quantize( + self.representative_dataset.input_gen, + input_type, + output_type, + allow_float, + activations_type, + bias_type, + disable_per_channel=self._experimental_disable_per_channel, + ) + + def _is_unknown_shapes_allowed(self): + # Unknown dimensions are only allowed with the new converter. + return self.experimental_new_converter + + def _get_base_converter_args(self): + """Returns the base converter args. + + Returns: + {key str: val} + """ + args = { + "input_format": constants.TENSORFLOW_GRAPHDEF, + "allow_custom_ops": self.allow_custom_ops, + "debug_info": self._debug_info, + "target_ops": self.target_spec.supported_ops, + "enable_mlir_converter": self.experimental_new_converter, + "select_user_tf_ops": self.target_spec.experimental_select_user_tf_ops, + "supported_backends": self.target_spec.experimental_supported_backends, + "unfold_batchmatmul": self.unfold_batchmatmul, + "legalize_custom_tensor_list_ops": self.legalize_custom_tensor_list_ops, + "lower_tensor_list_ops": self._experimental_lower_tensor_list_ops, + "unfold_large_splat_constant": ( + self._experimental_unfold_large_splat_constant + ), + "default_to_single_batch_in_tensor_list_ops": ( + self._experimental_default_to_single_batch_in_tensor_list_ops + ), + "tf_quantization_mode": self._experimental_tf_quantization_mode, + "experimental_enable_resource_variables": ( + self.experimental_enable_resource_variables + ), + "enable_dynamic_update_slice": ( + self._experimental_enable_dynamic_update_slice + ), + "preserve_assert_op": self._experimental_preserve_assert_op, + "guarantee_all_funcs_one_use": ( + self._experimental_guarantee_all_funcs_one_use + ), + "allow_all_select_tf_ops": self._experimental_allow_all_select_tf_ops, + "disable_fuse_mul_and_fc": self._experimental_disable_fuse_mul_and_fc, + "quantization_options": self._experimental_quantization_options, + "ir_dump_dir": self.ir_dump_dir, + "ir_dump_pass_regex": self.ir_dump_pass_regex, + "ir_dump_func_regex": self.ir_dump_func_regex, + "enable_timing": self.enable_timing, + "print_ir_before": self.print_ir_before, + "print_ir_after": self.print_ir_after, + "print_ir_module_scope": self.print_ir_module_scope, + "elide_elementsattrs_if_larger": self.elide_elementsattrs_if_larger, + "use_buffer_offset": self._experimental_use_buffer_offset, + "reduce_type_precision": self._experimental_reduce_type_precision, + "use_stablehlo_quantizer": self.experimental_use_stablehlo_quantizer, + "stablehlo_quantizer_config": ( + self.experimental_stablehlo_quantizer_config + ), + "qdq_conversion_mode": self._experimental_qdq_conversion_mode, + "disable_per_channel_quantization_for_dense_layers": ( + self._experimental_disable_per_channel_quantization_for_dense_layers + ), + "enable_composite_direct_lowering": ( + self._experimental_enable_composite_direct_lowering + ), + "model_origin_framework": self.model_origin_framework, + "canonicalizing_inf_as_min_max_float": ( + self.canonicalizing_inf_as_min_max_float + ), + } + + if self.saved_model_dir: + args.update({ + "saved_model_dir": self.saved_model_dir, + "saved_model_version": self._saved_model_version, + "saved_model_tags": self._saved_model_tags, + "saved_model_exported_names": self._saved_model_exported_names, + }) + + if self._experimental_quantization_options: + logging.warning( + "Configs from custom methods in experimental_quantization_options" + " may not produce a valid tflite model. Note that currently this" + " option only supports StableHLO path. Setting this option in TFLite" + " path will be a no-op." + ) + + if self.experimental_use_stablehlo_quantizer: + self._assign_stablehlo_quantization_config_or_populate_default(args) + elif self.experimental_stablehlo_quantizer_config is not None: + raise ValueError( + "QuantizationConfig should be provided only when" + " experimental_use_stablehlo_quantizer is set to true." + ) + + return args + + def _assign_stablehlo_quantization_config_or_populate_default(self, args): + """Assigns `QuantizationConfig` to `args` or populate default. + + Args: + args: Dictionary of argument names and associated values. + """ + if ( + self.experimental_stablehlo_quantizer_config is not None + and Optimize.DEFAULT not in self.optimizations + ): + args["quantization_config"] = self.experimental_stablehlo_quantizer_config + elif Optimize.DEFAULT in self.optimizations and self.representative_dataset: + if len(self._saved_model_exported_names) != 1: + raise ValueError( + "StableHLO quantizer is only supported when converting from a" + " SavedModel with one signature key." + ) + + signature_key = self._saved_model_exported_names[0] + + # Convert a programmatically provided representative dataset to a + # temporary TFRecord file to be used by the StableHLO quantizer. + tfrecord_file_path = tempfile.mkstemp( + suffix=".tfrecord", prefix=signature_key + )[1] + rd.TfRecordRepresentativeDatasetSaver( + {signature_key: tfrecord_file_path} + ).save({signature_key: self.representative_dataset()}) + + quantization_config = qc.QuantizationConfig( + static_range_ptq_preset=qc.StaticRangePtqPreset( + representative_datasets=[ + qc.RepresentativeDatasetConfig( + tf_record=qc.TfRecordFile(path=tfrecord_file_path) + ) + ], + enable_per_channel_quantized_weight=True, + enable_full_int_quantization=True, + ), + # For ODML use cases, uniform quantized types should be left intact. + pipeline_config=qc.PipelineConfig( + unpack_quantized_types=False, + ), + ) + + args["quantization_config"] = quantization_config + # TODO: b/307626463 - Enable StableHLO quantizer DRQ when Optimize.DEFAULT + # is set without representative dataset. + else: + raise ValueError( + "StableHLO quantizer only supports static-range and weight-only PTQ." + ) + + def _contains_function_with_implements_attr(self, saved_model_proto): + meta_graph = saved_model_proto.meta_graphs[0] + for function in meta_graph.graph_def.library.function: + if function.attr.get("_implements", None) or function.attr.get( + "api_implements", None + ): + return True + return False + + def _parse_saved_model_args(self, always_enable_saved_model_import=False): + """Parses SavedModel arguments from the given Keras/RNN SavedModel. + + Args: + always_enable_saved_model_import: Bool. When the value is true, it enables + MLIR saved model import path regardless of checking the conditions. + """ + if not self.experimental_new_converter: + self.saved_model_dir = None + return + if self.saved_model_dir: + try: + saved_model_proto, _ = _parse_saved_model_with_debug_info( + self.saved_model_dir + ) + except OSError: + # If it fails to read the given saved model, it will fall back to the + # frozen graph def path. + self.saved_model_dir = None + return + if ( + not always_enable_saved_model_import + and not self._contains_function_with_implements_attr( + saved_model_proto + ) + ): + self.saved_model_dir = None + return + + if not self._saved_model_exported_names: + self._saved_model_exported_names = [] + self._saved_model_version = saved_model_proto.saved_model_schema_version + if self._saved_model_version == 0: + self.saved_model_dir = None + logging.warning("SavedModel schema version is zero.") + return + if self._saved_model_version not in [1, 2]: + raise ValueError( + "SavedModel file format({0}) is not supported".format( + self._saved_model_version + ) + ) + + def _sparsify_model(self): + return Optimize.EXPERIMENTAL_SPARSITY in self.optimizations + + def _increase_conversion_attempt_metric(self): + self._tflite_metrics.increase_counter_converter_attempt() + + def _increase_conversion_success_metric(self): + self._tflite_metrics.increase_counter_converter_success() + + @classmethod + def _set_original_model_type(cls, model_type): + """Stores the original model type.""" + if model_type == conversion_metadata_fb.ModelType.NONE: + raise ValueError("The original model type should be specified.") + cls._original_model_type = model_type + + def _get_original_model_type(self): + """One-time getter to return original model type and set it to NONE.""" + model_type = TFLiteConverterBase._original_model_type + TFLiteConverterBase._original_model_type = ( + conversion_metadata_fb.ModelType.NONE + ) + return model_type + + def _save_conversion_params_metric( + self, graph_def=None, inference_type=None, inference_input_type=None + ): + """Set conversion parameter metrics.""" + converter_kwargs = self._collected_converter_params + converter_kwargs.update(self._get_base_converter_args()) + + # Optimization parameters. + quant_mode = QuantizationMode( + self.optimizations, + self.target_spec, + self.representative_dataset, + graph_def, + self._experimental_disable_per_channel, + self.experimental_new_dynamic_range_quantizer, + self._experimental_low_bit_qat, + self._experimental_full_integer_quantization_bias_type, + self._experimental_variable_quantization, + ) + converter_kwargs.update({ + "tf_version": self._metadata.environment.tensorflowVersion, + "api_version": self._metadata.environment.apiVersion, + "original_model_format": self._metadata.environment.modelType, + "optimization_default": quant_mode.is_any_optimization_enabled(), + "optimization_post_training_dynamic_range": ( + quant_mode.is_post_training_dynamic_range_quantization() + ), + "optimization_post_training_float16": ( + quant_mode.is_post_training_float16_quantization() + ), + "optimization_post_training_integer_quantize": ( + quant_mode.is_post_training_integer_quantization() + ), + "optimization_qat": quant_mode.is_quantization_aware_training(), + "optimization_low_bit_qat": ( + quant_mode.is_low_bit_quantize_aware_training() + ), + "optimization_sparsify": self._sparsify_model(), + "activations_type": quant_mode.activations_type(), + }) + converter_kwargs.update( + quant_mode.converter_flags(inference_type, inference_input_type) + ) + + # pylint: disable=protected-access + if self.target_spec._experimental_supported_accumulation_type: + converter_kwargs.update({ + "accumulation_type": ( + self.target_spec._experimental_supported_accumulation_type + ) + }) + # pylint: enable=protected-access + + def format_element(elem): + if isinstance(elem, enum.Enum): + return str(elem.value) + return pprint.pformat(elem) + + def format_param(param): + if isinstance(param, (list, tuple, set)): + if not param: + return "None" # Return None if empty. + string_list = [format_element(x) for x in param] + return ",".join(sorted(string_list)) + return format_element(param) + + for key, value in converter_kwargs.items(): + self._tflite_metrics.set_converter_param(key, format_param(value)) + self._tflite_metrics.set_export_required() + + # Set conversion option metadata. + self._metadata.options.allowCustomOps = self.allow_custom_ops + self._metadata.options.enableSelectTfOps = ( + OpsSet.SELECT_TF_OPS in self.target_spec.supported_ops + ) + self._metadata.options.forceSelectTfOps = set( + [OpsSet.SELECT_TF_OPS] + ) == set(self.target_spec.supported_ops) + self._metadata.options.modelOptimizationModes = [] + + if quant_mode.is_post_training_float16_quantization(): + self._metadata.options.modelOptimizationModes.append( + conversion_metadata_fb.ModelOptimizationMode.PTQ_FLOAT16 + ) + + if quant_mode.is_post_training_dynamic_range_quantization(): + self._metadata.options.modelOptimizationModes.append( + conversion_metadata_fb.ModelOptimizationMode.PTQ_DYNAMIC_RANGE + ) + + if quant_mode.is_post_training_int8_quantization(): + self._metadata.options.modelOptimizationModes.append( + conversion_metadata_fb.ModelOptimizationMode.PTQ_FULL_INTEGER + ) + + if quant_mode.is_post_training_int16x8_quantization(): + self._metadata.options.modelOptimizationModes.append( + conversion_metadata_fb.ModelOptimizationMode.PTQ_INT16 + ) + + if quant_mode.is_quantization_aware_training(): + self._metadata.options.modelOptimizationModes.append( + conversion_metadata_fb.ModelOptimizationMode.QUANTIZATION_AWARE_TRAINING + ) + + def _set_conversion_latency_metric(self, value): + self._tflite_metrics.set_converter_latency(value) + + @convert_phase(Component.OPTIMIZE_TFLITE_MODEL) + def _optimize_tflite_model( + self, model, quant_mode, debug_options, quant_io=True + ): + """Apply optimizations on a TFLite model.""" + + # Disable TFLite quantization pass when + # `experimental_use_stablehlo_quantizer` is set to `True`. StableHLO + # Quantizer performs quantization during the conversion step, which happens + # before `_optimize_tflite_model`. + if ( + quant_mode.is_integer_quantization() + and not self.experimental_use_stablehlo_quantizer + ): + in_type, out_type = self.inference_input_type, self.inference_output_type + + if quant_mode.is_post_training_integer_quantization(): + q_in_type = in_type if in_type and quant_io else _dtypes.float32 + q_out_type = out_type if out_type and quant_io else _dtypes.float32 + q_activations_type = quant_mode.activations_type() + q_bias_type = quant_mode.bias_type() + q_allow_float = quant_mode.is_allow_float() + q_variable_quantization = quant_mode.enable_mlir_variable_quantization + model = self._quantize( + model, + q_in_type, + q_out_type, + q_activations_type, + q_bias_type, + q_allow_float, + q_variable_quantization, + debug_options, + ) + + m_in_type = in_type if in_type else _dtypes.float32 + m_out_type = out_type if out_type else _dtypes.float32 + # Skip updating model io types if MLIR quantizer already takes care of it + if not ( + quant_mode.is_post_training_integer_quantization() + and self.experimental_new_quantizer + and quant_io + and (m_in_type in [_dtypes.int8, _dtypes.uint8, _dtypes.float32]) + and (m_out_type in [_dtypes.int8, _dtypes.uint8, _dtypes.float32]) + ): + model = _modify_model_io_type(model, m_in_type, m_out_type) + + if self._sparsify_model(): + model = _mlir_sparsify(model) + + if not self._experimental_use_buffer_offset: + try: + model_object = flatbuffer_utils.convert_bytearray_to_object(model) + if _check_model_use_buffer_offset(model_object): + return model + model = _deduplicate_readonly_buffers(model) + except Exception: # pylint: disable=broad-except + # Skip buffer deduplication when flatbuffer library is not ready to be + # utilized. + logging.warning( + "Buffer deduplication procedure will be skipped when flatbuffer " + "library is not properly loaded" + ) + + return model + + def _convert_and_export_metrics(self, convert_func, *args, **kwargs): + """Wraps around convert function to export metrics. + + Args: + convert_func: The convert function to wrap. + *args: Positional arguments of the convert function. + **kwargs: The keyword arguments of the convert function. + + Returns: + The decorator to wrap the convert function. + """ + self._increase_conversion_attempt_metric() + self._save_conversion_params_metric() + start_time = time.process_time() + result = convert_func(self, *args, **kwargs) + elapsed_time_ms = (time.process_time() - start_time) * 1000 + if result: + self._increase_conversion_success_metric() + self._set_conversion_latency_metric(round(elapsed_time_ms)) + self._tflite_metrics.export_metrics() + if self.exclude_conversion_metadata or self._experimental_use_buffer_offset: + return result + # TODO(b/286886803): add support for adding user metadata with + # use_buffer_offset flags + model_object = flatbuffer_utils.convert_bytearray_to_object(result) + if _check_model_use_buffer_offset(model_object): + return result + # Populates the conversion metadata. + # TODO(b/202090541): Collects sparsity block size information. + sparsity_modes = _get_sparsity_modes(model_object) + model_hash = _get_model_hash(model_object) + self._metadata.options.modelOptimizationModes.extend(sparsity_modes) + self._metadata.environment.modelHash = model_hash + model_object = _populate_conversion_metadata(model_object, self._metadata) + return flatbuffer_utils.convert_object_to_bytearray(model_object) + + +def _check_model_use_buffer_offset(model_object): + """Checks if a model object uses buffer offsets to store constant buffers. + + Args: + model_object: tflite model, a python object + + Returns: + True of the model_object has the metadata entry "buffer_location" + False otherwise + """ + if not model_object.metadata: + return False + for meta in model_object.metadata: + if meta.name.decode("utf-8") == "buffer_location": + return True + + return False + + +def _export_metrics(convert_func): + """The decorator around convert function to export metrics.""" + + @functools.wraps(convert_func) + def wrapper(self, *args, **kwargs): + # pylint: disable=protected-access + return self._convert_and_export_metrics(convert_func, *args, **kwargs) + # pylint: enable=protected-access + + return wrapper + + +class TFLiteConverterBaseV2(TFLiteConverterBase): + """Converter subclass to share functionality between V2 converters.""" + + def __init__(self): + """Constructor for TFLiteConverter.""" + super(TFLiteConverterBaseV2, self).__init__() + self.inference_input_type = _dtypes.float32 + self.inference_output_type = _dtypes.float32 + self._metadata.environment.apiVersion = 2 + + def _validate_inference_input_output_types(self, quant_mode): + """Validate inference_input_type and inference_output_type flags.""" + default_types = [_dtypes.float32] + # We support integer input/output for integer quantized models only. + if quant_mode.is_integer_quantization(): + if quant_mode.is_post_training_int16x8_quantization(): + all_types = default_types + [_dtypes.int16] + else: + all_types = default_types + [_dtypes.int8, _dtypes.uint8, _dtypes.int16] + if ( + self.inference_input_type not in all_types + or self.inference_output_type not in all_types + ): + all_types_names = ["tf." + t.name for t in all_types] + raise ValueError( + "The inference_input_type and inference_output_type " + "must be in {}.".format(all_types_names) + ) + elif ( + self.inference_input_type not in default_types + or self.inference_output_type not in default_types + ): + raise ValueError( + "The inference_input_type and inference_output_type " + "must be tf.float32." + ) + + @convert_phase(Component.PREPARE_TF_MODEL, SubComponent.LOAD_SAVED_MODEL) + def _load_saved_model(self, saved_model_dir, saved_model_tags): + """Load graph_def from saved model with the default serving signature key. + + Args: + saved_model_dir: Directory of the SavedModel. + saved_model_tags: Set of tags identifying the MetaGraphDef within the + SavedModel to analyze. + + Returns: + graph_def: The loaded GraphDef. + input_tensors: List of input tensors. + output_tensors: List of output tensors. + """ + graph = _ops.Graph() + saved_model = _loader_impl.SavedModelLoader(saved_model_dir) + saved_model.load_graph(graph, tags=saved_model_tags) + meta_graph = saved_model.get_meta_graph_def_from_tags(saved_model_tags) + graph_def = meta_graph.graph_def + signature_def = meta_graph.signature_def[ + _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY + ] + input_tensors = [ + graph.get_tensor_by_name(signature_def.inputs[key].name) + for key in signature_def.inputs + ] + output_tensors = [ + graph.get_tensor_by_name(signature_def.outputs[key].name) + for key in signature_def.outputs + ] + return graph_def, input_tensors, output_tensors + + @convert_phase(Component.PREPARE_TF_MODEL, SubComponent.VALIDATE_INPUTS) + def _validate_inputs(self, graph_def, input_tensors): + """Validate the input parameters. + + Args: + graph_def: The TensorFlow GraphDef. + input_tensors: List of input tensors. + + Raise: + ValueError: Input shape is not specified. Invalid quantization parameters. + """ + # Update conversion params with graph_def. + self._save_conversion_params_metric(graph_def) + self._quant_mode = QuantizationMode( + self.optimizations, + self.target_spec, + self.representative_dataset, + graph_def, + self._experimental_disable_per_channel, + self.experimental_new_dynamic_range_quantizer, + self._experimental_low_bit_qat, + self._experimental_full_integer_quantization_bias_type, + self._experimental_variable_quantization, + ) + self._validate_inference_input_output_types(self._quant_mode) + + if not self._is_unknown_shapes_allowed(): + # Checks dimensions in input tensor. + for tensor in input_tensors: + # Note that shape_list might be empty for scalar shapes. + shape_list = tensor.shape.as_list() + if None in shape_list[1:]: + raise ValueError( + "None is only supported in the 1st dimension. Tensor '{0}' has " + "invalid shape '{1}'.".format( + _get_tensor_name(tensor), shape_list + ) + ) + elif shape_list and shape_list[0] is None: + # Set the batch size to 1 if undefined. + shape = tensor.shape.as_list() + shape[0] = 1 + tensor.set_shape(shape) + + if self._trackable_obj is None or not hasattr( + self._trackable_obj, "graph_debug_info" + ): + self._debug_info = _get_debug_info( + _build_debug_info_func(self._funcs[0].graph), graph_def + ) + else: + self._debug_info = _get_debug_info( + _convert_debug_info_func(self._trackable_obj.graph_debug_info), + graph_def, + ) + + @convert_phase(Component.PREPARE_TF_MODEL, SubComponent.OPTIMIZE_TF_MODEL) + def _optimize_tf_model( + self, graph_def, input_tensors, output_tensors, frozen_func + ): + """Run a Grappler pass to optimize the TensorFlow graph. + + Args: + graph_def: Frozen GraphDef to be optimized. + input_tensors: List of input tensors. + output_tensors: List of output tensors. + frozen_func: TensorFlow Graph. + + Returns: + The optimized TensorFlow graph. + """ + grappler_config = self._grappler_config() + # Skip running grappler when there are no optimizers to run. If not, + # grappler will run with the default optimizer set and it will lead to + # causing an unexpected behavior. + if grappler_config.graph_options.rewrite_options.optimizers: + graph_def = _run_graph_optimizations( + graph_def, + input_tensors, + output_tensors, + config=grappler_config, + graph=frozen_func.graph, + ) + return graph_def + + def _convert_from_saved_model(self, graph_def): + """Helper method that converts saved model. + + Args: + graph_def: GraphDef object for the model, used only for stats. + + Returns: + The converted TFLite model. + """ + # Update conversion params with graph_def. + self._save_conversion_params_metric(graph_def) + # Get quantization options and do some sanity checks. + quant_mode = QuantizationMode( + self.optimizations, + self.target_spec, + self.representative_dataset, + graph_def, + self._experimental_disable_per_channel, + self.experimental_new_dynamic_range_quantizer, + self._experimental_low_bit_qat, + self._experimental_full_integer_quantization_bias_type, + self._experimental_variable_quantization, + ) + self._validate_inference_input_output_types(quant_mode) + converter_kwargs = { + "enable_tflite_resource_variables": ( + self.experimental_enable_resource_variables + ) + } + converter_kwargs.update(self._get_base_converter_args()) + converter_kwargs.update(quant_mode.converter_flags()) + + result = _convert_saved_model(**converter_kwargs) + return self._optimize_tflite_model( + result, + quant_mode, + _build_conversion_flags(**converter_kwargs).debug_options, + quant_io=self.experimental_new_quantizer, + ) + + def convert(self, graph_def, input_tensors, output_tensors): + """Converts a TensorFlow GraphDef based on instance variables. + + Args: + graph_def: Frozen TensorFlow GraphDef. + input_tensors: List of input tensors. + output_tensors: List of output tensors. + + Returns: + The converted data in serialized format. + + Raises: + ValueError: + No concrete function is specified. + Multiple concrete functions are specified. + Input shape is not specified. + Invalid quantization parameters. + """ + self._validate_inputs(graph_def, input_tensors) + converter_kwargs = self._get_base_converter_args() + converter_kwargs.update(self._quant_mode.converter_flags()) + if not self.experimental_new_converter: + logging.warning( + "Please consider switching to the new converter by setting " + "experimental_new_converter=True. " + "The old converter is deprecated." + ) + else: + logging.info( + "Using new converter: If you encounter a problem " + "please file a bug. You can opt-out " + "by setting experimental_new_converter=False" + ) + + # Converts model. + result = _convert_graphdef( + input_data=graph_def, + input_tensors=input_tensors, + output_tensors=output_tensors, + **converter_kwargs, + ) + + return self._optimize_tflite_model( + result, + self._quant_mode, + _build_conversion_flags(**converter_kwargs).debug_options, + quant_io=self.experimental_new_quantizer, + ) + + +class TFLiteSavedModelConverterV2(TFLiteConverterBaseV2): + """Converts the given SavedModel into TensorFlow Lite model. + + Attributes: + saved_model_dir: Directory of the SavedModel. + """ + + def __init__( + self, + saved_model_dir, + saved_model_tags=None, + saved_model_exported_names=None, + trackable_obj=None, + ): + """Constructor for TFLiteConverter. + + Args: + saved_model_dir: Directory of the SavedModel. + saved_model_tags: Set of tags identifying the MetaGraphDef within the + SavedModel to analyze. All tags in the tag set must be present. (default + {tf.saved_model.SERVING}). + saved_model_exported_names: Names to be exported when the saved model + import path is on. + trackable_obj: tf.AutoTrackable object associated with `funcs`. A + reference to this object needs to be maintained so that Variables do not + get garbage collected since functions have a weak reference to + Variables. This is only required when the tf.AutoTrackable object is not + maintained by the user (e.g. `from_saved_model`). + """ + super(TFLiteSavedModelConverterV2, self).__init__() + self.saved_model_dir = saved_model_dir + self._saved_model_tags = saved_model_tags + self._saved_model_exported_names = saved_model_exported_names + self._trackable_obj = trackable_obj + self._parse_saved_model_args(always_enable_saved_model_import=True) + + @_export_metrics + def convert(self): + """Converts a TensorFlow GraphDef based on instance variables. + + Returns: + The converted data in serialized format. + + Raises: + ValueError: + No concrete function is specified. + Multiple concrete functions are specified. + Input shape is not specified. + Invalid quantization parameters. + """ + graph_def, input_tensors, output_tensors = self._load_saved_model( + self.saved_model_dir, self._saved_model_tags + ) + # If we can't use saved model importer, then fallback + # to frozen graph conversion path. + if self.saved_model_dir is None or not self.experimental_new_converter: + graph_def, _, _, _ = _freeze_saved_model( + self.saved_model_dir, + None, + None, + None, + self._saved_model_tags, + _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY, + ) + # We make sure to clear the saved_model_dir as there is some + # legacy code down in the caller that checks this. + self.saved_model_dir = None + return super(TFLiteSavedModelConverterV2, self).convert( + graph_def, input_tensors, output_tensors + ) + + trackable_obj = _load(self.saved_model_dir, self._saved_model_tags) + if trackable_obj is None: + self._debug_info = _get_debug_info( + _build_debug_info_func(self._funcs[0].graph), graph_def + ) + else: + self._debug_info = _get_debug_info( + _convert_debug_info_func(trackable_obj.graph_debug_info), + graph_def, + ) + + del trackable_obj + gc.collect() + return self._convert_from_saved_model(graph_def) + + +class TFLiteKerasModelConverterV2(TFLiteConverterBaseV2): + """Converts the given Keras model into TensorFlow Lite model.""" + + def __init__(self, keras_model, trackable_obj=None): + """Constructor for TFLiteConverter. + + Args: + keras_model: tf.Keras.Model. + trackable_obj: tf.AutoTrackable object associated with `funcs`. A + reference to this object needs to be maintained so that Variables do not + get garbage collected since functions have a weak reference to + Variables. This is only required when the tf.AutoTrackable object is not + maintained by the user (e.g. `from_saved_model`). + """ + super(TFLiteKerasModelConverterV2, self).__init__() + self._keras_model = keras_model + self._trackable_obj = trackable_obj + self.experimental_lower_to_saved_model = True + + @convert_phase( + Component.PREPARE_TF_MODEL, SubComponent.CONVERT_KERAS_TO_SAVED_MODEL + ) + def _convert_keras_to_saved_model(self, output_dir): + """Save Keras model to the SavedModel format. + + Args: + output_dir: The output directory to save the SavedModel. + + Returns: + graph_def: The frozen GraphDef. + input_tensors: List of input tensors. + output_tensors: List of output tensors. + """ + try: + + def _is_keras_3(): + """Returns true if _keras_model is a Keras 3+ model.""" + try: + import keras # pylint: disable=g-import-not-at-top + + return keras.__version__.startswith("3") and isinstance( + self._keras_model, keras.layers.Layer + ) + except ImportError: + return False + + if _is_keras_3(): + import keras # pylint: disable=g-import-not-at-top + + # Keras 3 model `export` by default saves model.__call__ with + # training=True. Need to export the model call with training=False for + # inference only and TFLite conversion. + export_archive = keras.export.ExportArchive() + export_archive.track(self._keras_model) + if isinstance( + self._keras_model, + (keras.src.models.Functional, keras.src.models.Sequential), + ): + input_signature = nest.map_structure( + lambda x: tensor_spec.TensorSpec( + x.shape, dtype=x.dtype, name=x.name + ), + self._keras_model.inputs, + ) + if isinstance(input_signature, list) and len(input_signature) > 1: + input_signature = [input_signature] + else: + save_spec = _get_save_spec(self._keras_model) + if not save_spec: + raise ValueError( + "The model provided has never been called. " + "It must be called at least once before export." + ) + input_signature = [save_spec] + inference_fn = functools.partial( + self._keras_model.__call__, training=False + ) + export_archive.add_endpoint("serve", inference_fn, input_signature) + export_archive.write_out(output_dir) + else: + _save.save( + self._keras_model, + output_dir, + options=_save_options.SaveOptions(save_debug_info=True), + ) + except Exception: # pylint: disable=broad-except + # When storing the given keras model to a saved model is failed, let's + # use original keras model conversion pipeline. + return None, None, None + self.saved_model_dir = output_dir + self._saved_model_tags = set([_tag_constants.SERVING]) + self._saved_model_exported_names = [ + _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY + ] + self._parse_saved_model_args( + always_enable_saved_model_import=self.experimental_lower_to_saved_model + ) + if self.saved_model_dir: + graph_def, input_tensors, output_tensors = self._load_saved_model( + self.saved_model_dir, self._saved_model_tags + ) + self._trackable_obj = _load(self.saved_model_dir, self._saved_model_tags) + return graph_def, input_tensors, output_tensors + return None, None, None + + @convert_phase(Component.PREPARE_TF_MODEL, SubComponent.FREEZE_KERAS_MODEL) + def _freeze_keras_model(self): + """Freeze Keras model to frozen graph. + + Returns: + graph_def: The frozen GraphDef. + input_tensors: List of input tensors. + output_tensors: List of output tensors. + frozen_func: The frozen ConcreteFunction. + """ + input_signature = None + # If the model's call is not a `tf.function`, then we need to first get its + # input signature from `model_input_signature` method. We can't directly + # call `trace_model_call` because otherwise the batch dimension is set + # to None. + # Once we have better support for dynamic shapes, we can remove this. + if not isinstance(self._keras_model.call, _def_function.Function): + # Pass `keep_original_batch_size=True` will ensure that we get an input + # signature including the batch dimension specified by the user. + # TODO(b/169898786): Use the Keras public API when TFLite moves out of TF + input_signature = _model_input_signature( + self._keras_model, keep_original_batch_size=True + ) + + # TODO(b/169898786): Use the Keras public API when TFLite moves out of TF + func = _trace_model_call(self._keras_model, input_signature) + concrete_func = func.get_concrete_function() + self._funcs = [concrete_func] + + frozen_func, graph_def = ( + _convert_to_constants.convert_variables_to_constants_v2_as_graph( + self._funcs[0], lower_control_flow=False + ) + ) + + input_tensors = [ + tensor + for tensor in frozen_func.inputs + if tensor.dtype != _dtypes.resource + ] + output_tensors = frozen_func.outputs + return graph_def, input_tensors, output_tensors, frozen_func + + def _convert_as_saved_model(self): + """Converts a Keras model as a saved model. + + Returns: + The converted data in serialized format. + """ + temp_dir = tempfile.mkdtemp() + try: + graph_def, input_tensors, output_tensors = ( + self._convert_keras_to_saved_model(temp_dir) + ) + if self.saved_model_dir: + return super(TFLiteKerasModelConverterV2, self).convert( + graph_def, input_tensors, output_tensors + ) + finally: + shutil.rmtree(temp_dir, True) + + @_export_metrics + def convert(self): + """Converts a keras model based on instance variables. + + Returns: + The converted data in serialized format. + + Raises: + ValueError: + Multiple concrete functions are specified. + Input shape is not specified. + Invalid quantization parameters. + """ + saved_model_convert_result = self._convert_as_saved_model() + if saved_model_convert_result: + return saved_model_convert_result + + graph_def, input_tensors, output_tensors, frozen_func = ( + self._freeze_keras_model() + ) + + graph_def = self._optimize_tf_model( + graph_def, input_tensors, output_tensors, frozen_func + ) + + return super(TFLiteKerasModelConverterV2, self).convert( + graph_def, input_tensors, output_tensors + ) + + +class TFLiteFrozenGraphConverterV2(TFLiteConverterBaseV2): + """Converts the given frozen graph into TensorFlow Lite model.""" + + def __init__(self, funcs, trackable_obj=None): + """Constructor for TFLiteConverter. + + Args: + funcs: List of TensorFlow ConcreteFunctions. The list should not contain + duplicate elements. + trackable_obj: tf.AutoTrackable object associated with `funcs`. A + reference to this object needs to be maintained so that Variables do not + get garbage collected since functions have a weak reference to + Variables. This is only required when the tf.AutoTrackable object is not + maintained by the user (e.g. `from_saved_model`). + """ + super(TFLiteFrozenGraphConverterV2, self).__init__() + self._funcs = funcs + self._trackable_obj = trackable_obj + self.experimental_lower_to_saved_model = True + + @convert_phase( + Component.PREPARE_TF_MODEL, SubComponent.FREEZE_CONCRETE_FUNCTION + ) + def _freeze_concrete_function(self): + """Convert the given ConcreteFunction to frozen graph. + + Returns: + graph_def: The frozen GraphDef. + input_tensors: List of input tensors. + output_tensors: List of output tensors. + frozen_func: The frozen ConcreteFunction. + + Raises: + ValueError: none or multiple ConcreteFunctions provided. + """ + if len(self._funcs) == 0: # pylint: disable=g-explicit-length-test + raise ValueError("No ConcreteFunction is specified.") + + if len(self._funcs) > 1: + raise ValueError( + "This converter can only convert a single " + "ConcreteFunction. Converting multiple functions is " + "under development." + ) + + frozen_func, graph_def = ( + _convert_to_constants.convert_variables_to_constants_v2_as_graph( + self._funcs[0], lower_control_flow=False + ) + ) + + input_tensors = [ + tensor + for tensor in frozen_func.inputs + if tensor.dtype != _dtypes.resource + ] + output_tensors = frozen_func.outputs + return graph_def, input_tensors, output_tensors, frozen_func + + @convert_phase( + Component.PREPARE_TF_MODEL, + SubComponent.CONVERT_CONCRETE_FUNCTIONS_TO_SAVED_MODEL, + ) + def _convert_concrete_functions_to_saved_model(self, output_dir): + """Save concrete functions to the SavedModel format. + + Args: + output_dir: The output directory to save the SavedModel. + + Returns: + graph_def: The frozen GraphDef. + input_tensors: List of input tensors. + output_tensors: List of output tensors. + """ + if len(self._funcs) == 0: # pylint: disable=g-explicit-length-test + raise ValueError("No ConcreteFunction is specified.") + + if not self.experimental_lower_to_saved_model: + return None, None, None + + # Without the provided trackable obj, it is not able to serialize the given + # concrete functions as a saved model format. Also when trackable obj is + # a function, use the original concrete function conversion pipeline. + if not self._trackable_obj or isinstance( + self._trackable_obj, + (_function.ConcreteFunction, _def_function.Function), + ): + return None, None, None + + signatures = {} + signature_keys = [] + try: + if len(self._funcs) == 1: + signatures[_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = ( + self._funcs[0] + ) + signature_keys = [ + _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY + ] + else: + for func in self._funcs: + signatures[func.graph.name] = func + signature_keys.append(func.graph.name) + + _save.save( + self._trackable_obj, + output_dir, + signatures=signatures, + options=_save_options.SaveOptions(save_debug_info=True), + ) + except Exception: # pylint: disable=broad-except + # When storing the given concrete function to a saved model is failed, + # let's use original concrete function conversion pipeline. + return None, None, None + + self.saved_model_dir = output_dir + self._saved_model_tags = set([_tag_constants.SERVING]) + self._saved_model_exported_names = signature_keys + self._parse_saved_model_args(always_enable_saved_model_import=True) + if self.saved_model_dir: + graph_def, input_tensors, output_tensors = self._load_saved_model( + self.saved_model_dir, self._saved_model_tags + ) + self._trackable_obj = _load(self.saved_model_dir, self._saved_model_tags) + return graph_def, input_tensors, output_tensors + return None, None, None + + def _convert_as_saved_model(self): + """Converts the given concrete functions as a saved model format. + + Returns: + The converted data in serialized format. + """ + temp_dir = tempfile.mkdtemp() + try: + graph_def, input_tensors, _ = ( + self._convert_concrete_functions_to_saved_model(temp_dir) + ) + if self.saved_model_dir: + self._validate_inputs(graph_def, input_tensors) + return self._convert_from_saved_model(graph_def) + finally: + shutil.rmtree(temp_dir, True) + return None + + @_export_metrics + def convert(self): + """Converts a TensorFlow GraphDef based on instance variables. + + Returns: + The converted data in serialized format. + + Raises: + ValueError: + No concrete function is specified. + Multiple concrete functions are specified. + Input shape is not specified. + Invalid quantization parameters. + """ + if self.experimental_lower_to_saved_model: + saved_model_convert_result = self._convert_as_saved_model() + if saved_model_convert_result: + return saved_model_convert_result + + graph_def, input_tensors, output_tensors, frozen_func = ( + self._freeze_concrete_function() + ) + + graph_def = self._optimize_tf_model( + graph_def, input_tensors, output_tensors, frozen_func + ) + + return super(TFLiteFrozenGraphConverterV2, self).convert( + graph_def, input_tensors, output_tensors + ) + + +class TFLiteJaxConverterV2(TFLiteConverterBaseV2): + """Converts the given jax model into TensorFlow Lite model.""" + + def __init__(self, serving_funcs, inputs): + """Constructor for TFLiteConverter. + + Args: + serving_funcs: A list functions of the serving func of the jax module, the + model params should already be inlined. (e.g., `serving_func = + functools.partial(model, params=params)`) + inputs: Array of input tensor placeholders tuple,s like `jnp.zeros`. For + example, wrapped in an array like "[('input1', input1), ('input2', + input2)]]". + + Jax functions are polymorphic, for example: + + ```python + def add(a, b): + return a + b + ``` + + Will yield different computations if different input signatures are passed + in: Pass `add(10.0, 20.0)` will yield a scalar `add` while pass + `add(np.random((100, 1)), np.random(100, 100))` will yield a broadcasting + add. We will need the input information to do tracing for the converter + to properly convert the model. So it's important to pass in the desired + `input placeholders` with the correct input shape/type. + + In the converted tflite model, the function name will be default to "main", + the output names will be the traced outputs. The output ordering shall + match the serving function. + """ # fmt: skip + + super(TFLiteJaxConverterV2, self).__init__() + self._serving_funcs = serving_funcs + self._inputs = inputs + + @_export_metrics + def convert(self): + """Converts a Jax serving func based on instance variables. + + Returns: + The converted data in serialized format. + + Raises: + ImportError: + If cannot import the jit from jax. + ValueError: + No serving function is specified. + Input tensors are not specified. + The truth value of an array with more than one element is ambiguous. + Failed to convert the given Jax function to hlo. + """ + if not _jit: + raise ImportError("Cannot import jit from jax.") + + if not self._serving_funcs: + raise ValueError("No serving func is specified.") + + if not self._inputs: + raise ValueError("Input tensors are not specified.") + + if len(self._inputs) != len(self._serving_funcs): + msg = ( + "Input tensor mapping len {} does not match serving func len {}." + .format(len(self._inputs), len(self._serving_funcs)) + ) + raise ValueError(msg) + + if not isinstance(self._inputs, (tuple, list)): + raise ValueError( + "Input tensors should be pass in a tuple list wrapped in an array." + ) + + # TODO(b/197690428): Support multiple functions. + # Currently only support one serving function. + if len(self._serving_funcs) > 1: + raise ValueError("Currently only support single serving function.") + + if not isinstance(self._inputs[0], (tuple, list)): + raise ValueError("The input placeholders are not a dictionary.") + + input_names = [] + ordered_inputs = [] + for input_name, tensor in self._inputs[0]: + input_names.append(input_name) + ordered_inputs.append(tensor) + + try: + hlo_proto = ( + _jit(self._serving_funcs[0]) + .trace(*ordered_inputs) + .lower(lowering_platforms=("cpu",)) + .compiler_ir("hlo") + .as_serialized_hlo_module_proto() + ) + except Exception: # pylint: disable=broad-except + raise ValueError("Failed to convert the given Jax function to hlo.") + + # We need to set the hlo proto, and here we use serialized proto format + # since it's more compact. + converter_kwargs = { + "input_content": hlo_proto, + "input_names": input_names, + "is_proto_format": True, + } + converter_kwargs.update(self._get_base_converter_args()) + + # Get quantization options and do some checks. + quant_mode = QuantizationMode( + self.optimizations, self.target_spec, self.representative_dataset, None + ) + self._validate_inference_input_output_types(quant_mode) + converter_kwargs.update(quant_mode.converter_flags()) + result = _convert_jax_hlo(**converter_kwargs) + + return self._optimize_tflite_model( + result, + quant_mode, + _build_conversion_flags(**converter_kwargs).debug_options, + quant_io=self.experimental_new_quantizer, + ) + + +@_tf_export("lite.TFLiteConverter", v1=[]) +class TFLiteConverterV2(TFLiteFrozenGraphConverterV2): + """Converts a TensorFlow model into TensorFlow Lite model. + + Attributes: + optimizations: Experimental flag, subject to change. Set of optimizations to + apply. e.g {tf.lite.Optimize.DEFAULT}. (default None, must be None or a + set of values of type `tf.lite.Optimize`) + representative_dataset: A generator function used for integer quantization + where each generated sample has the same order, type and shape as the + inputs to the model. Usually, this is a small subset of a few hundred + samples randomly chosen, in no particular order, from the training or + evaluation dataset. This is an optional attribute, but required for full + integer quantization, i.e, if `tf.int8` is the only supported type in + `target_spec.supported_types`. Refer to `tf.lite.RepresentativeDataset`. + (default None) + target_spec: Experimental flag, subject to change. Specifications of the + target device, including supported ops set, supported types and a set + of user's defined TensorFlow operators required in the TensorFlow Lite + runtime. Refer to `tf.lite.TargetSpec`. + inference_input_type: Data type of the input layer. Note that integer types + (tf.int8 and tf.uint8) are currently only supported for post-training + integer quantization and quantization-aware training. (default tf.float32, + must be in {tf.float32, tf.int8, tf.uint8}) + inference_output_type: Data type of the output layer. Note that integer + types (tf.int8 and tf.uint8) are currently only supported for + post-training integer quantization and quantization-aware training. + (default tf.float32, must be in {tf.float32, tf.int8, tf.uint8}) + allow_custom_ops: Boolean indicating whether to allow custom operations. + When False, any unknown operation is an error. When True, custom ops are + created for any op that is unknown. The developer needs to provide these + to the TensorFlow Lite runtime with a custom resolver. (default False) + exclude_conversion_metadata: Whether not to embed the conversion metadata + into the converted model. (default False) + experimental_new_converter: Experimental flag, subject to change. Enables + MLIR-based conversion. (default True) + experimental_new_quantizer: Experimental flag, subject to change. Enables + MLIR-based quantization conversion instead of Flatbuffer-based conversion. + (default True) + experimental_enable_resource_variables: Experimental flag, subject to + change. Enables [resource + variables](https://tensorflow.org/guide/migrate/tf1_vs_tf2#resourcevariables_instead_of_referencevariables) + to be converted by this converter. This is only allowed if the + from_saved_model interface is used. (default True) + + Example usage: + + ```python + # Converting a SavedModel to a TensorFlow Lite model. + converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir) + tflite_model = converter.convert() + + # Converting a tf.Keras model to a TensorFlow Lite model. + converter = tf.lite.TFLiteConverter.from_keras_model(model) + tflite_model = converter.convert() + + # Converting ConcreteFunctions to a TensorFlow Lite model. + converter = tf.lite.TFLiteConverter.from_concrete_functions([func], model) + tflite_model = converter.convert() + + # Converting a Jax model to a TensorFlow Lite model. + converter = tf.lite.TFLiteConverter.experimental_from_jax( + [func], [[ ('input1', input1), ('input2', input2)]]) + tflite_model = converter.convert() + ``` + """ # fmt: skip + + # pylint: disable=useless-super-delegation + def __init__(self, funcs, trackable_obj=None): + """Constructor for TFLiteConverter. + + Args: + funcs: List of TensorFlow ConcreteFunctions. The list should not contain + duplicate elements. + trackable_obj: tf.AutoTrackable object associated with `funcs`. A + reference to this object needs to be maintained so that Variables do not + get garbage collected since functions have a weak reference to + Variables. This is only required when the tf.AutoTrackable object is not + maintained by the user (e.g. `from_saved_model`). + """ + super(TFLiteConverterV2, self).__init__(funcs, trackable_obj) + + @classmethod + def from_concrete_functions(cls, funcs, trackable_obj=None): + """Creates a TFLiteConverter object from ConcreteFunctions. + + Args: + funcs: List of TensorFlow ConcreteFunctions. The list should not contain + duplicate elements. Currently converter can only convert a single + ConcreteFunction. Converting multiple functions is under development. + trackable_obj: An `AutoTrackable` object (typically `tf.module`) + associated with `funcs`. A reference to this object needs to be + maintained so that Variables do not get garbage collected since + functions have a weak reference to Variables. + + Returns: + TFLiteConverter object. + + Raises: + Invalid input type. + """ + # pylint: disable=protected-access + TFLiteConverterBase._set_original_model_type( + conversion_metadata_fb.ModelType.TF_CONCRETE_FUNCTIONS + ) + # pylint: enable=protected-access + if trackable_obj is None: + logging.warning( + "Please consider providing the trackable_obj argument in the " + "from_concrete_functions. Providing without the trackable_obj " + "argument is deprecated and it will use the deprecated conversion " + "path." + ) + for func in funcs: + if not isinstance(func, _function.ConcreteFunction): + message = "This function takes in a list of ConcreteFunction." + if isinstance(func, _def_function.Function): + message += ( + " To get the ConcreteFunction from a Function," + " call get_concrete_function." + ) + raise ValueError(message) + return cls(funcs, trackable_obj) + + @classmethod + def from_saved_model(cls, saved_model_dir, signature_keys=None, tags=None): + """Creates a TFLiteConverter object from a SavedModel directory. + + Args: + saved_model_dir: SavedModel directory to convert. + signature_keys: List of keys identifying SignatureDef containing inputs + and outputs. Elements should not be duplicated. By default the + `signatures` attribute of the MetaGraphdef is used. (default + saved_model.signatures) + tags: Set of tags identifying the MetaGraphDef within the SavedModel to + analyze. All tags in the tag set must be present. (default + {tf.saved_model.SERVING} or {'serve'}) + + Returns: + TFLiteConverter object. + + Raises: + Invalid signature keys. + """ + # pylint: disable=protected-access + TFLiteConverterBase._set_original_model_type( + conversion_metadata_fb.ModelType.TF_SAVED_MODEL + ) + # pylint: enable=protected-access + # When run without eager enabled, this will return the legacy + # TFLiteConverter. + if not context.executing_eagerly(): + signature_key = None + if signature_keys: + if len(signature_keys) != 1: + raise ValueError("Only support a single signature key.") + else: + signature_key = signature_keys[0] + logging.warning( + "Invoking the TF1 implementation of TFLiteConverter " + "because eager is disabled. Consider enabling eager." + ) + return TFLiteConverter.from_saved_model( + saved_model_dir, signature_key=signature_key, tag_set=tags + ) + + if tags is None: + tags = set([_tag_constants.SERVING]) + + with context.eager_mode(): + saved_model = _load(saved_model_dir, tags) + if not signature_keys: + signature_keys = list(saved_model.signatures.keys()) + + if not signature_keys: + raise ValueError("Only support at least one signature key.") + + # Distinguishes SavedModel artifacts created by `model.export` + # from SavedModel created by `model.save`/`tf.saved_model.save`. + if ( + len(signature_keys) > 1 + and hasattr(saved_model, "serve") # `model.export` default endpoint + and not hasattr(saved_model, "_default_save_signature") + # `_default_save_signature` does not exist for `model.export` artifacts. + ): + # Default `serve` endpoint for `model.export` should be copied + # to `serving_default` to prevent issues in TF Lite serving. + saved_model.serving_default = saved_model.serve + delattr(saved_model, "serve") + signature_keys = ["serving_default"] + + funcs = [] + for key in signature_keys: + if key not in saved_model.signatures: + raise ValueError( + "Invalid signature key '{}' found. Valid keys are '{}'.".format( + key, ",".join(saved_model.signatures) + ) + ) + funcs.append(saved_model.signatures[key]) + + saved_model_converter = TFLiteSavedModelConverterV2( + saved_model_dir, tags, signature_keys + ) + if saved_model_converter.saved_model_dir: + return saved_model_converter + + return cls(funcs, saved_model) + + @classmethod + def from_keras_model(cls, model): + """Creates a TFLiteConverter object from a Keras model. + + Args: + model: tf.Keras.Model + + Returns: + TFLiteConverter object. + """ + # pylint: disable=protected-access + TFLiteConverterBase._set_original_model_type( + conversion_metadata_fb.ModelType.KERAS_MODEL + ) + # pylint: enable=protected-access + return TFLiteKerasModelConverterV2(model) + + @classmethod + @_deprecation.deprecated( + None, + "Use `jax2tf.convert` and (`lite.TFLiteConverter.from_saved_model`" + " or `lite.TFLiteConverter.from_concrete_functions`) instead.", + ) + def experimental_from_jax(cls, serving_funcs, inputs): + # Experimental API, subject to changes. + # TODO(b/197690428): Currently only supports single function. + """Creates a TFLiteConverter object from a Jax model with its inputs. + + Args: + serving_funcs: An array of Jax functions with all the weights applied + already. + inputs: An array of Jax input placeholders tuples list, e.g., + jnp.zeros(INPUT_SHAPE). Each tuple list should correspond with the + serving function. + + Returns: + TFLiteConverter object. + """ + # pylint: disable=protected-access + TFLiteConverterBase._set_original_model_type( + conversion_metadata_fb.ModelType.JAX + ) + # pylint: enable=protected-access + return TFLiteJaxConverterV2(serving_funcs, inputs) + + # pylint: disable=useless-super-delegation + def convert(self): + """Converts a TensorFlow GraphDef based on instance variables. + + Returns: + The converted data in serialized format. + + Raises: + ValueError: + No concrete function is specified. + Multiple concrete functions are specified. + Input shape is not specified. + Invalid quantization parameters. + """ + return super(TFLiteConverterV2, self).convert() + + +class TFLiteConverterBaseV1(TFLiteConverterBase): + """Converter subclass to share functionality between V1 converters.""" + + def __init__(self, experimental_debug_info_func): + """Constructor for TFLiteConverter. + + Args: + experimental_debug_info_func: An experimental function to retrieve the + graph debug info for a set of nodes from the `graph_def`. + """ + super(TFLiteConverterBaseV1, self).__init__() + self.inference_type = _dtypes.float32 + self.inference_input_type = None + self.inference_output_type = None + self.output_format = constants.TFLITE + self.quantized_input_stats = {} + self.default_ranges_stats = None + self.drop_control_dependency = True + self.reorder_across_fake_quant = False + self.change_concat_input_ranges = False + self.dump_graphviz_dir = None + self.dump_graphviz_video = False + self.conversion_summary_dir = None + self._debug_info_func = experimental_debug_info_func + self._metadata.environment.apiVersion = 1 + + def __setattr__(self, name, value): + if name == "post_training_quantize": + warnings.warn( + "Property %s is deprecated, " + "please use optimizations=[Optimize.DEFAULT]" + " instead." % name + ) + if value: + self.optimizations = [Optimize.DEFAULT] + else: + self.optimizations = [] + return + if name == "target_ops": + warnings.warn( + "Property %s is deprecated, please use " + "target_spec.supported_ops instead." % name + ) + self.target_spec.supported_ops = value + return + object.__setattr__(self, name, value) + + def __getattribute__(self, name): + if name == "post_training_quantize": + warnings.warn( + "Property %s is deprecated, " + "please use optimizations=[Optimize.DEFAULT]" + " instead." % name + ) + return Optimize.DEFAULT in set(self.optimizations) + if name == "target_ops": + warnings.warn( + "Property %s is deprecated, please use " + "target_spec.supported_ops instead." % name + ) + return self.target_spec.supported_ops + return object.__getattribute__(self, name) + + def _validate_quantized_input_stats(self, converter_kwargs, quant_mode): + """Ensure the `quantized_input_stats` flag is provided if required.""" + + quantized_types = frozenset({_dtypes.int8, _dtypes.uint8}) + + requires_quantized_input_stats = ( + converter_kwargs["inference_type"] in quantized_types + or converter_kwargs["inference_input_type"] in quantized_types + ) and not quant_mode.is_post_training_integer_quantization() + + if ( + requires_quantized_input_stats + and not converter_kwargs["quantized_input_stats"] + ): + raise ValueError( + "The `quantized_input_stats` flag must be defined when either " + "`inference_type` flag or `inference_input_type` flag is set to " + "tf.int8 or tf.uint8. Currently, `inference_type={}` and " + "`inference_input_type={}`.".format( + _get_tf_type_name(converter_kwargs["inference_type"]), + _get_tf_type_name(converter_kwargs["inference_input_type"]), + ) + ) + + @convert_phase(Component.PREPARE_TF_MODEL, SubComponent.VALIDATE_INPUTS) + def _validate_inputs(self, input_tensors, quantized_input_stats): + """Validate input parameters. + + Args: + input_tensors: List of input tensors. + quantized_input_stats: Map of input tensor names to a tuple of floats + representing the mean and standard deviation of the training data. + + Raises: + ValueError: + Input shape is not specified. + Quantization input stats is required but not provided. + """ + + if not self._is_unknown_shapes_allowed() and self._has_valid_tensors(): + # Checks dimensions in input tensor. + for tensor in input_tensors: + shape = tensor.shape + if not shape: + raise ValueError( + "Provide an input shape for input array '{0}'.".format( + _get_tensor_name(tensor) + ) + ) + # Note that shape_list might be empty for scalar shapes. + shape_list = shape.as_list() + if None in shape_list[1:]: + raise ValueError( + "None is only supported in the 1st dimension. Tensor '{0}' has " + "invalid shape '{1}'.".format( + _get_tensor_name(tensor), shape_list + ) + ) + elif shape_list and shape_list[0] is None: + self._set_batch_size(batch_size=1) + + # Get quantization stats. Ensures there is one stat per name if the stats + # are specified. + if quantized_input_stats: + self._quantized_stats = [] + invalid_stats = [] + for name in self.get_input_arrays(): + if name in quantized_input_stats: + self._quantized_stats.append(quantized_input_stats[name]) + else: + invalid_stats.append(name) + + if invalid_stats: + raise ValueError( + "Quantization input stats are not available for input " + "tensors '{0}'.".format(",".join(invalid_stats)) + ) + else: + self._quantized_stats = None + + @convert_phase(Component.PREPARE_TF_MODEL, SubComponent.OPTIMIZE_TF_MODEL) + def _optimize_tf_model( + self, graph_def, input_tensors, output_tensors, quant_mode + ): + """Run a Grappler pass to optimize the TensorFlow graph. + + Args: + graph_def: Frozen GraphDef to be optimized. + input_tensors: List of input tensors. + output_tensors: List of output tensors. + quant_mode: the quantization mode. + + Returns: + The optimized TensorFlow graph. + """ + # Disable grappler constant folding if there are training quant ops. + if self.saved_model_dir or quant_mode.is_quantization_aware_trained_model(): + return graph_def + + try: + # TODO(b/150163103): Merge `disabling lower using switch merge' calls. + # Grappler will also try to lower while loop into switch merge + # representation which is undesired for Ophints, so we simply remove + # those attributes to prevent Grappler from doing so. + graph = _convert_to_constants.disable_lower_using_switch_merge(graph_def) + # Run function inlining optimization to ensure any models generated + # through the from_frozen_graph path have been inlined. + optimized_graph = _run_graph_optimizations( + graph, + input_tensors, + output_tensors, + config=self._grappler_config(["function"]), + ) + return optimized_graph + except Exception: # pylint: disable=broad-except + return graph_def + + def convert(self): + """Converts a TensorFlow GraphDef based on instance variables. + + Returns: + The converted data in serialized format, either a TFLite Flatbuffer or + a Graphviz graph depending on value in `output_format`. + + Raises: + ValueError: + Input shape is not specified. + None value for dimension in input_tensor. + """ + self._validate_inputs(self._input_tensors, self.quantized_input_stats) + + quant_mode = QuantizationMode( + self.optimizations, + self.target_spec, + self.representative_dataset, + self._graph_def, + self._experimental_disable_per_channel, + self.experimental_new_dynamic_range_quantizer, + self._experimental_low_bit_qat, + self._experimental_full_integer_quantization_bias_type, + self._experimental_variable_quantization, + ) + + optimized_graph = self._optimize_tf_model( + self._graph_def, self._input_tensors, self._output_tensors, quant_mode + ) + + self._debug_info = _get_debug_info(self._debug_info_func, optimized_graph) + + converter_kwargs = self._get_base_converter_args() + converter_kwargs.update( + quant_mode.converter_flags( + self.inference_type, self.inference_input_type + ) + ) + converter_kwargs.update({ + "output_format": self.output_format, + "quantized_input_stats": self._quantized_stats, + "default_ranges_stats": self.default_ranges_stats, + "drop_control_dependency": self.drop_control_dependency, + "reorder_across_fake_quant": self.reorder_across_fake_quant, + "change_concat_input_ranges": self.change_concat_input_ranges, + "dump_graphviz_dir": self.dump_graphviz_dir, + "dump_graphviz_video": self.dump_graphviz_video, + "conversion_summary_dir": self.conversion_summary_dir, + }) + + self._validate_quantized_input_stats(converter_kwargs, quant_mode) + if not self.experimental_new_converter: + logging.warning( + "Please consider switching to the new converter by setting " + "experimental_new_converter=True. " + "The old converter is deprecated." + ) + else: + logging.info( + "Using experimental converter: If you encountered a problem " + "please file a bug. You can opt-out " + "by setting experimental_new_converter=False" + ) + # Converts model. + if self._has_valid_tensors(): + result = _convert_graphdef( + input_data=optimized_graph, + input_tensors=self._input_tensors, + output_tensors=self._output_tensors, + **converter_kwargs, + ) + else: + result = _convert_graphdef_with_arrays( + input_data=optimized_graph, + input_arrays_with_shape=self._input_arrays_with_shape, + output_arrays=self._output_arrays, + control_output_arrays=self._control_output_arrays, + **converter_kwargs, + ) + + return self._optimize_tflite_model( + result, + quant_mode, + _build_conversion_flags(**converter_kwargs).debug_options, + quant_io=self.experimental_new_quantizer, + ) + + def get_input_arrays(self): + """Returns a list of the names of the input tensors. + + Returns: + List of strings. + """ + if self._has_valid_tensors(): + return [_get_tensor_name(tensor) for tensor in self._input_tensors] + else: + return [name for name, _ in self._input_arrays_with_shape] + + def _has_valid_tensors(self): + """Checks if the input and output tensors have been initialized. + + Returns: + Bool. + """ + return self._input_tensors is not None and self._output_tensors + + def _set_batch_size(self, batch_size): + """Sets the first dimension of the input tensor to `batch_size`. + + Args: + batch_size: Batch size for the model. Replaces the first dimension of an + input size array if undefined. (default 1) + + Raises: + ValueError: input_tensor is not defined. + """ + if not self._has_valid_tensors(): + raise ValueError( + "The batch size cannot be set for this model. Please " + "use input_shapes parameter." + ) + + for tensor in self._input_tensors: + shape = tensor.shape.as_list() + if shape[0] is None: + shape[0] = batch_size + tensor.set_shape(shape) + + def _is_unknown_shapes_allowed(self): + # Ophint Converted nodes will need the shapes to be known. + if _is_ophint_converted(self._graph_def): + return False + + if not super(TFLiteConverterBaseV1, self)._is_unknown_shapes_allowed(): + return False + + # `conversion_summary_dir` calls the old converter. Unknown shapes are only + # supported by the MLIR converter. + if self.conversion_summary_dir: + logging.warning( + "`conversion_summary_dir` does not work with unknown shapes. " + "Graphs with unknown shapes might be different than when this flag " + "is disabled." + ) + return False + return True + + def _save_conversion_params_metric(self): + self._collected_converter_params.update({ + "output_format": self.output_format, + "default_ranges_stats": self.default_ranges_stats, + "drop_control_dependency": self.drop_control_dependency, + "reorder_across_fake_quant": self.reorder_across_fake_quant, + "change_concat_input_ranges": self.change_concat_input_ranges, + "dump_graphviz_dir": self.dump_graphviz_dir, + "dump_graphviz_video": self.dump_graphviz_video, + "conversion_summary_dir": self.conversion_summary_dir, + }) + super(TFLiteConverterBaseV1, self)._save_conversion_params_metric( + self._graph_def, self.inference_type, self.inference_input_type + ) + + +class TFLiteSavedModelConverter(TFLiteConverterBaseV1): + """Converts the given SavedModel into TensorFlow Lite model. + + Attributes: + saved_model_dir: Directory of the SavedModel. + """ + + def __init__( + self, + saved_model_dir, + saved_model_tags, + saved_model_exported_names, + experimental_debug_info_func=None, + ): + """Constructor for TFLiteConverter. + + Args: + saved_model_dir: Directory of the SavedModel. + saved_model_tags: Set of tags identifying the MetaGraphDef within the + SavedModel to analyze. All tags in the tag set must be present. (default + {tf.saved_model.SERVING}). + saved_model_exported_names: Names to be exported when the saved model + import path is on. + experimental_debug_info_func: An experimental function to retrieve the + graph debug info for a set of nodes from the `graph_def`. + + Raises: + ValueError: Invalid arguments. + """ + super(TFLiteSavedModelConverter, self).__init__( + experimental_debug_info_func + ) + self.saved_model_dir = saved_model_dir + self._saved_model_tags = saved_model_tags + self._saved_model_exported_names = saved_model_exported_names + + if len(self._saved_model_exported_names) != 1: + raise ValueError("Only supports a single signature key.") + + signature_key = self._saved_model_exported_names[0] + + result = _freeze_saved_model( + self.saved_model_dir, + None, + None, + None, + self._saved_model_tags, + signature_key, + ) + self._graph_def = result[0] + self._input_tensors = result[1] + self._output_tensors = result[2] + self._parse_saved_model_args() + + @_export_metrics + def convert(self): + """Converts a TensorFlow GraphDef based on instance variables. + + Note that in the converted TensorFlow Lite model, the input tensor's order + might be changed each time `convert` is called. To access input tensor + information, please consider using the `SignatureRunner` API + (`interpreter.get_signature_runner`). + + Returns: + The converted data in serialized format, either a TFLite Flatbuffer or + a Graphviz graph depending on value in `output_format`. + + Raises: + ValueError: + Input shape is not specified. + None value for dimension in input_tensor. + """ + return super(TFLiteSavedModelConverter, self).convert() + + +class TFLiteKerasModelConverter(TFLiteConverterBaseV1): + """Converts the given SavedModel into TensorFlow Lite model.""" + + def __init__( + self, + model_file, + input_arrays=None, + input_shapes=None, + output_arrays=None, + custom_objects=None, + ): + """Constructor for TFLiteConverter. + + Args: + model_file: Full filepath of HDF5 file containing the tf.keras model. + input_arrays: List of input tensors to freeze graph with. Uses input + arrays from SignatureDef when none are provided. (default None) + input_shapes: Dict of strings representing input tensor names to list of + integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}). + Automatically determined when input shapes is None (e.g., {"foo" : + None}). (default None) + output_arrays: List of output tensors to freeze graph with. Uses output + arrays from SignatureDef when none are provided. (default None) + custom_objects: Dict mapping names (strings) to custom classes or + functions to be considered during model deserialization. (default None) + + Raises: + ValueError: Invalid arguments. + """ + super(TFLiteKerasModelConverter, self).__init__( + experimental_debug_info_func=None + ) + # Handles Keras when Eager mode is enabled. + if context.executing_eagerly(): + if input_arrays or output_arrays: + raise ValueError( + "`input_arrays` and `output_arrays` are unsupported " + "with Eager mode. If your model requires any of these " + "parameters, please use disable_eager_execution()." + ) + + keras_model = keras_deps.get_load_model_function()( + model_file, custom_objects + ) + function = _trace_model_call(keras_model) + concrete_func = function.get_concrete_function() + + frozen_func = _convert_to_constants.convert_variables_to_constants_v2( + concrete_func, lower_control_flow=False + ) + _set_tensor_shapes(frozen_func.inputs, input_shapes) + self._keras_model = keras_model + self._graph_def = frozen_func.graph.as_graph_def() + self._input_tensors = frozen_func.inputs + self._output_tensors = frozen_func.outputs + self._debug_info_func = _build_debug_info_func(frozen_func.graph) + return + + # Handles Keras when Eager mode is disabled. + keras_deps.get_clear_session_function()() + keras_model = keras_deps.get_load_model_function()( + model_file, custom_objects + ) + sess = keras_deps.get_get_session_function()() + + # Get input and output tensors. + if input_arrays: + input_tensors = _get_tensors_from_tensor_names(sess.graph, input_arrays) + else: + input_tensors = keras_model.inputs + + if output_arrays: + output_tensors = _get_tensors_from_tensor_names(sess.graph, output_arrays) + else: + output_tensors = keras_model.outputs + _set_tensor_shapes(input_tensors, input_shapes) + + graph_def = _freeze_graph(sess, input_tensors, output_tensors) + self._keras_model = keras_model + self._graph_def = graph_def + self._input_tensors = input_tensors + self._output_tensors = output_tensors + self._debug_info_func = _build_debug_info_func(sess.graph) + + @convert_phase(Component.PREPARE_TF_MODEL, SubComponent.FREEZE_KERAS_MODEL) + def _freeze_keras_model(self, output_dir): + """Save Keras model to Saved Model format. + + Args: + output_dir: The output directory to save the SavedModel. + """ + try: + self._keras_model.save(output_dir, save_format="tf") + except Exception: # pylint: disable=broad-except + # When storing the given keras model to a saved model is failed, let's + # use original keras model conversion pipeline. + return None + tag_set = set([_tag_constants.SERVING]) + signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY + graph_def, input_tensors, output_tensors, sess_graph = _freeze_saved_model( + output_dir, None, None, None, tag_set, signature_key + ) + + self.saved_model_dir = output_dir + self._saved_model_tags = tag_set + self._saved_model_exported_names = [signature_key] + self._parse_saved_model_args() + if self.saved_model_dir: + self._graph_def = graph_def + self._input_tensors = input_tensors + self._output_tensors = output_tensors + self._debug_info_func = _build_debug_info_func(sess_graph) + + def _convert_as_saved_model(self): + """Converts a Keras model as a saved model. + + Returns: + The converted data in serialized format. + """ + temp_dir = tempfile.mkdtemp() + try: + self._freeze_keras_model(temp_dir) + if self.saved_model_dir: + return super(TFLiteKerasModelConverter, self).convert() + finally: + shutil.rmtree(temp_dir, True) + + @_export_metrics + def convert(self): + """Converts a Keras model based on instance variables. + + Returns: + The converted data in serialized format, either a TFLite Flatbuffer or + a Graphviz graph depending on value in `output_format`. + + Raises: + ValueError: + Input shape is not specified. + None value for dimension in input_tensor. + """ + saved_model_convert_result = self._convert_as_saved_model() + if saved_model_convert_result: + return saved_model_convert_result + + return super(TFLiteKerasModelConverter, self).convert() + + +class TFLiteFrozenGraphConverter(TFLiteConverterBaseV1): + """Converts the given frozen graph def into TensorFlow Lite model.""" + + def __init__( + self, + graph_def, + input_tensors, + output_tensors, + input_arrays_with_shape=None, + output_arrays=None, + experimental_debug_info_func=None, + ): + """Constructor for TFLiteConverter. + + Args: + graph_def: Frozen TensorFlow GraphDef. + input_tensors: List of input tensors. Type and shape are computed using + `foo.shape` and `foo.dtype`. + output_tensors: List of output tensors (only .name is used from this). + input_arrays_with_shape: Tuple of strings representing input tensor names + and list of integers representing input shapes (e.g., [("foo", [1, 16, + 16, 3])]). Use only when graph cannot be loaded into TensorFlow and when + `input_tensors` and `output_tensors` are None. (default None) + output_arrays: List of output tensors to freeze graph with. Use only when + graph cannot be loaded into TensorFlow and when `input_tensors` and + `output_tensors` are None. (default None) + experimental_debug_info_func: An experimental function to retrieve the + graph debug info for a set of nodes from the `graph_def`. + + Raises: + ValueError: Invalid arguments. + """ + super(TFLiteFrozenGraphConverter, self).__init__( + experimental_debug_info_func + ) + self._graph_def = graph_def + self._input_tensors = input_tensors + self._output_tensors = output_tensors + self._control_output_arrays = None + + # Attributes are used by models that cannot be loaded into TensorFlow. + if not self._has_valid_tensors(): + self._input_arrays_with_shape = input_arrays_with_shape + self._output_arrays = output_arrays + + if input_tensors is not None and input_arrays_with_shape is not None: + logging.warning( + "input_arrays_with_shape will be ignored when both the " + "given input_tensors and input_arrays_with_shape are not " + "None." + ) + + if output_tensors is not None and output_arrays is not None: + logging.warning( + "output_arrays will be ignored when both the given " + "output_tensors and output_arrays are not None." + ) + + @_export_metrics + def convert(self): + """Converts a TensorFlow GraphDef based on instance variables. + + Returns: + The converted data in serialized format, either a TFLite Flatbuffer or + a Graphviz graph depending on value in `output_format`. + + Raises: + ValueError: + Input shape is not specified. + None value for dimension in input_tensor. + """ + if not self._has_valid_tensors(): + if not self._input_arrays_with_shape or not ( + self._output_arrays or self._control_output_arrays + ): + raise ValueError( + "If input_tensors and output_tensors are None, both " + "input_arrays_with_shape and output_arrays|control_output_arrays " + "must be defined." + ) + return super(TFLiteFrozenGraphConverter, self).convert() + + +@_tf_export(v1=["lite.TFLiteConverter"]) +class TFLiteConverter(TFLiteFrozenGraphConverter): + """Convert a TensorFlow model into `output_format`. + + This is used to convert from a TensorFlow GraphDef, SavedModel or tf.keras + model into either a TFLite FlatBuffer or graph visualization. + + Attributes: + optimizations: Experimental flag, subject to change. Set of optimizations to + apply. e.g {tf.lite.Optimize.DEFAULT}. (default None, must be None or a + set of values of type `tf.lite.Optimize`) + representative_dataset: A generator function used for integer quantization + where each generated sample has the same order, type and shape as the + inputs to the model. Usually, this is a small subset of a few hundred + samples randomly chosen, in no particular order, from the training or + evaluation dataset. This is an optional attribute, but required for full + integer quantization, i.e, if `tf.int8` is the only supported type in + `target_spec.supported_types`. Refer to `tf.lite.RepresentativeDataset`. + (default None) + target_spec: Experimental flag, subject to change. Specifications of target + device, including supported ops set, supported types and a set of user's + defined TensorFlow operators required in the TensorFlow Lite runtime. + Refer to `tf.lite.TargetSpec`. + inference_type: Data type of numeric arrays, excluding the input layer. + (default tf.float32, must be in {tf.float32, tf.int8, tf.uint8}) + inference_input_type: Data type of the numeric arrays in the input layer. If + `inference_input_type` is in {tf.int8, tf.uint8}, then + `quantized_input_stats` must be provided. (default is the value assigned + to `inference_type`, must be in {tf.float32, tf.int8, tf.uint8}) + inference_output_type: Data type of the numeric arrays in the output layer. + (default is the value assigned to `inference_type`, must be in + {tf.float32, tf.int8, tf.uint8}) + quantized_input_stats: Map of input tensor names to a tuple of floats + representing the mean and standard deviation of the training data. (e.g., + {"foo" : (0., 1.)}). Required if `inference_input_type` is tf.int8 or + tf.uint8. (default None) + default_ranges_stats: Tuple of integers (min, max) representing range values + for all numeric arrays without a specified range. Intended for + experimenting with quantization via "dummy quantization". (default None) + allow_custom_ops: Boolean indicating whether to allow custom operations. + When False any unknown operation is an error. When True, custom ops are + created for any op that is unknown. The developer will need to provide + these to the TensorFlow Lite runtime with a custom resolver. (default + False) + drop_control_dependency: Boolean indicating whether to drop control + dependencies silently. This is due to TFLite not supporting control + dependencies. (default True) + reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant + nodes in unexpected locations. Used when the location of the FakeQuant + nodes is preventing graph transformations necessary to convert the graph. + Results in a graph that differs from the quantized training graph, + potentially causing differing arithmetic behavior. (default False) + change_concat_input_ranges: Boolean to change behavior of min/max ranges for + inputs and outputs of the concat operator for quantized models. Changes + the ranges of concat operator overlap when true. (default False) + output_format: Output file format. (default + tf.compat.v1.lite.constants.TFLITE, must be in + {tf.compat.v1.lite.constants.TFLITE, + tf.compat.v1.lite.constants.GRAPHVIZ_DOT}) + dump_graphviz_dir: Full filepath of folder to dump the graphs at various + stages of processing GraphViz .dot files. Preferred over + `output_format=tf.compat.v1.lite.constants.GRAPHVIZ_DOT` in order to keep + the requirements of the output file. (default None) + dump_graphviz_video: Boolean indicating whether to dump the GraphViz .dot + files after every graph transformation. Requires the `dump_graphviz_dir` + flag to be specified. (default False) + conversion_summary_dir: Full path of the directory to store conversion logs. + (default None) + exclude_conversion_metadata: Whether not to embed the conversion metadata + into the converted model. (default False) + target_ops: Deprecated. Please use `target_spec.supported_ops` instead. + post_training_quantize: Deprecated. Please use `optimizations` instead and + set it to `{tf.lite.Optimize.DEFAULT}`. (default False) + experimental_new_converter: Experimental flag, subject to change. Enables + MLIR-based conversion. (default True) + experimental_new_quantizer: Experimental flag, subject to change. Enables + MLIR-based quantization conversion instead of Flatbuffer-based conversion. + (default True) Example usage: ```python # Converting a GraphDef from + session. converter = tf.compat.v1.lite.TFLiteConverter.from_session( sess, + in_tensors, out_tensors) tflite_model = converter.convert() + open("converted_model.tflite", "wb").write(tflite_model) # Converting a + GraphDef from file. converter = + tf.compat.v1.lite.TFLiteConverter.from_frozen_graph( graph_def_file, + input_arrays, output_arrays) tflite_model = converter.convert() + open("converted_model.tflite", "wb").write(tflite_model) # Converting a + SavedModel. converter = + tf.compat.v1.lite.TFLiteConverter.from_saved_model( saved_model_dir) + tflite_model = converter.convert() open("converted_model.tflite", + "wb").write(tflite_model) # Converting a tf.keras model. converter = + tf.compat.v1.lite.TFLiteConverter.from_keras_model_file( keras_model) + tflite_model = converter.convert() open("converted_model.tflite", + "wb").write(tflite_model) ``` + """ + + # pylint: disable=useless-super-delegation + def __init__( + self, + graph_def, + input_tensors, + output_tensors, + input_arrays_with_shape=None, + output_arrays=None, + experimental_debug_info_func=None, + ): + """Constructor for TFLiteConverter. + + Args: + graph_def: Frozen TensorFlow GraphDef. + input_tensors: List of input tensors. Type and shape are computed using + `foo.shape` and `foo.dtype`. + output_tensors: List of output tensors (only .name is used from this). + input_arrays_with_shape: Tuple of strings representing input tensor names + and list of integers representing input shapes (e.g., [("foo" : [1, 16, + 16, 3])]). Use only when graph cannot be loaded into TensorFlow and when + `input_tensors` and `output_tensors` are None. (default None) + output_arrays: List of output tensors to freeze graph with. Use only when + graph cannot be loaded into TensorFlow and when `input_tensors` and + `output_tensors` are None. (default None) + experimental_debug_info_func: An experimental function to retrieve the + graph debug info for a set of nodes from the `graph_def`. + + Raises: + ValueError: Invalid arguments. + """ + super(TFLiteConverter, self).__init__( + graph_def, + input_tensors, + output_tensors, + input_arrays_with_shape, + output_arrays, + experimental_debug_info_func, + ) + + @classmethod + def from_session(cls, sess, input_tensors, output_tensors): + """Creates a TFLiteConverter class from a TensorFlow Session. + + Args: + sess: TensorFlow Session. + input_tensors: List of input tensors. Type and shape are computed using + `foo.shape` and `foo.dtype`. + output_tensors: List of output tensors (only .name is used from this). + + Returns: + TFLiteConverter class. + """ + # pylint: disable=protected-access + TFLiteConverterBase._set_original_model_type( + conversion_metadata_fb.ModelType.TF_SESSION + ) + # pylint: enable=protected-access + graph_def = _freeze_graph(sess, input_tensors, output_tensors) + return cls( + graph_def, + input_tensors, + output_tensors, + experimental_debug_info_func=_build_debug_info_func(sess.graph), + ) + + @classmethod + def from_frozen_graph( + cls, graph_def_file, input_arrays, output_arrays, input_shapes=None + ): + """Creates a TFLiteConverter class from a file containing a frozen GraphDef. + + Args: + graph_def_file: Full filepath of file containing frozen GraphDef. + input_arrays: List of input tensors to freeze graph with. + output_arrays: List of output tensors to freeze graph with. + input_shapes: Dict of strings representing input tensor names to list of + integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}). + Automatically determined when input shapes is None (e.g., {"foo" : + None}). (default None) + + Returns: + TFLiteConverter class. + + Raises: + IOError: + File not found. + Unable to parse input file. + ValueError: + The graph is not frozen. + input_arrays or output_arrays contains an invalid tensor name. + input_shapes is not correctly defined when required + """ + # pylint: disable=protected-access + TFLiteConverterBase._set_original_model_type( + conversion_metadata_fb.ModelType.TF_GRAPH_DEF + ) + # pylint: enable=protected-access + with _ops.Graph().as_default(): + with _session.Session() as sess: + # Read GraphDef from file. + if not gfile.Exists(graph_def_file): + raise IOError("File '{0}' does not exist.".format(graph_def_file)) + with gfile.GFile(graph_def_file, "rb") as f: + file_content = f.read() + + try: + graph_def = _graph_pb2.GraphDef() + graph_def.ParseFromString(file_content) + except (_text_format.ParseError, DecodeError): + try: + print("Ignore 'tcmalloc: large alloc' warnings.") + + if not isinstance(file_content, str): + file_content = file_content.decode("utf-8") + graph_def = _graph_pb2.GraphDef() + _text_format.Merge(file_content, graph_def) + except (_text_format.ParseError, DecodeError): + raise IOError( + "Unable to parse input file '{}'.".format(graph_def_file) + ) + + if sys.byteorder == "big": + bst.swap_tensor_content_in_graph_node(graph_def, "little", "big") + + # Handles models with custom TFLite ops that cannot be resolved in + # TensorFlow. + load_model_in_session = True + try: + _import_graph_def(graph_def, name="") + except _NotFoundError: + load_model_in_session = False + + if load_model_in_session: + # Check if graph is frozen. + if not _is_frozen_graph(sess): + raise ValueError("Please freeze the graph using freeze_graph.py.") + + # Get input and output tensors. + input_tensors = _get_tensors_from_tensor_names( + sess.graph, input_arrays + ) + output_tensors = _get_tensors_from_tensor_names( + sess.graph, output_arrays + ) + _set_tensor_shapes(input_tensors, input_shapes) + + return cls(sess.graph_def, input_tensors, output_tensors) + else: + if not input_shapes: + raise ValueError("input_shapes must be defined for this model.") + if set(input_arrays) != set(input_shapes.keys()): + raise ValueError( + "input_shapes must contain a value for each item " + "in input_array." + ) + + input_arrays_with_shape = [ + (name, input_shapes[name]) for name in input_arrays + ] + return cls( + graph_def, + input_tensors=None, + output_tensors=None, + input_arrays_with_shape=input_arrays_with_shape, + output_arrays=output_arrays, + ) + + @classmethod + def from_saved_model( + cls, + saved_model_dir, + input_arrays=None, + input_shapes=None, + output_arrays=None, + tag_set=None, + signature_key=None, + ): + """Creates a TFLiteConverter class from a SavedModel. + + Args: + saved_model_dir: SavedModel directory to convert. + input_arrays: List of input tensors to freeze graph with. Uses input + arrays from SignatureDef when none are provided. (default None) + input_shapes: Dict of strings representing input tensor names to list of + integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}). + Automatically determined when input shapes is None (e.g., {"foo" : + None}). (default None) + output_arrays: List of output tensors to freeze graph with. Uses output + arrays from SignatureDef when none are provided. (default None) + tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to + analyze. All tags in the tag set must be present. (default + {tf.saved_model.SERVING}) + signature_key: Key identifying SignatureDef containing inputs and outputs. + (default tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY) + + Returns: + TFLiteConverter class. + """ + # pylint: disable=protected-access + TFLiteConverterBase._set_original_model_type( + conversion_metadata_fb.ModelType.TF_SAVED_MODEL + ) + # pylint: enable=protected-access + if tag_set is None: + tag_set = set([_tag_constants.SERVING]) + if signature_key is None: + signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY + + saved_model_converter = TFLiteSavedModelConverter( + saved_model_dir, tag_set, [signature_key] + ) + if saved_model_converter.saved_model_dir: + return saved_model_converter + + result = _freeze_saved_model( + saved_model_dir, + input_arrays, + input_shapes, + output_arrays, + tag_set, + signature_key, + ) + + return cls( + graph_def=result[0], + input_tensors=result[1], + output_tensors=result[2], + experimental_debug_info_func=_build_debug_info_func(result[3]), + ) + + @classmethod + def from_keras_model_file( + cls, + model_file, + input_arrays=None, + input_shapes=None, + output_arrays=None, + custom_objects=None, + ): + """Creates a TFLiteConverter class from a tf.keras model file. + + Args: + model_file: Full filepath of HDF5 file containing the tf.keras model. + input_arrays: List of input tensors to freeze graph with. Uses input + arrays from SignatureDef when none are provided. (default None) + input_shapes: Dict of strings representing input tensor names to list of + integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}). + Automatically determined when input shapes is None (e.g., {"foo" : + None}). (default None) + output_arrays: List of output tensors to freeze graph with. Uses output + arrays from SignatureDef when none are provided. (default None) + custom_objects: Dict mapping names (strings) to custom classes or + functions to be considered during model deserialization. (default None) + + Returns: + TFLiteConverter class. + """ + # pylint: disable=protected-access + TFLiteConverterBase._set_original_model_type( + conversion_metadata_fb.ModelType.KERAS_MODEL + ) + # pylint: enable=protected-access + return TFLiteKerasModelConverter( + model_file, input_arrays, input_shapes, output_arrays, custom_objects + ) + + # pylint: disable=useless-super-delegation + def convert(self): + """Converts a TensorFlow GraphDef based on instance variables. + + Returns: + The converted data in serialized format, either a TFLite Flatbuffer or + a Graphviz graph depending on value in `output_format`. + + Raises: + ValueError: + Input shape is not specified. + None value for dimension in input_tensor. + """ + return super(TFLiteConverter, self).convert() + + +@_tf_export(v1=["lite.TocoConverter"]) +class TocoConverter: + """Convert a TensorFlow model into `output_format`. + + This class has been deprecated. Please use `lite.TFLiteConverter` instead. + """ + + @classmethod + @_deprecation.deprecated( + None, "Use `lite.TFLiteConverter.from_session` instead." + ) + def from_session(cls, sess, input_tensors, output_tensors): + """Creates a TocoConverter class from a TensorFlow Session.""" + return TFLiteConverter.from_session(sess, input_tensors, output_tensors) + + @classmethod + @_deprecation.deprecated( + None, "Use `lite.TFLiteConverter.from_frozen_graph` instead." + ) + def from_frozen_graph( + cls, graph_def_file, input_arrays, output_arrays, input_shapes=None + ): + """Creates a TocoConverter class from a file containing a frozen graph.""" + return TFLiteConverter.from_frozen_graph( + graph_def_file, input_arrays, output_arrays, input_shapes + ) + + @classmethod + @_deprecation.deprecated( + None, "Use `lite.TFLiteConverter.from_saved_model` instead." + ) + def from_saved_model( + cls, + saved_model_dir, + input_arrays=None, + input_shapes=None, + output_arrays=None, + tag_set=None, + signature_key=None, + ): + """Creates a TocoConverter class from a SavedModel.""" + return TFLiteConverter.from_saved_model( + saved_model_dir, + input_arrays, + input_shapes, + output_arrays, + tag_set, + signature_key, + ) + + @classmethod + @_deprecation.deprecated( + None, "Use `lite.TFLiteConverter.from_keras_model_file` instead." + ) + def from_keras_model_file( + cls, model_file, input_arrays=None, input_shapes=None, output_arrays=None + ): + """Creates a TocoConverter class from a tf.keras model file.""" + return TFLiteConverter.from_keras_model_file( + model_file, input_arrays, input_shapes, output_arrays + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/lite_constants.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/lite_constants.py new file mode 100644 index 0000000000000000000000000000000000000000..4fc63f79f8c0c5bc5975d4fdedf3dee084a9eff1 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/lite_constants.py @@ -0,0 +1,90 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Constants for TFLite.""" + +from tensorflow.compiler.mlir.lite import converter_flags_pb2 as _converter_flags_pb2 +from tensorflow.python.framework import dtypes +from tensorflow.python.util.all_util import remove_undocumented +from tensorflow.python.util.tf_export import tf_export as _tf_export + +FLOAT = dtypes.float32 +FLOAT16 = dtypes.float16 +INT32 = dtypes.int32 +INT64 = dtypes.int64 +STRING = dtypes.string +QUANTIZED_UINT8 = dtypes.uint8 +INT8 = dtypes.int8 +INT16 = dtypes.int16 +COMPLEX64 = dtypes.complex64 +TENSORFLOW_GRAPHDEF = _converter_flags_pb2.TENSORFLOW_GRAPHDEF +TFLITE = _converter_flags_pb2.TFLITE +GRAPHVIZ_DOT = _converter_flags_pb2.GRAPHVIZ_DOT +UNSET = _converter_flags_pb2.ConverterFlags.ModelOriginFramework.Name( + _converter_flags_pb2.ConverterFlags.UNSET +) +TENSORFLOW = _converter_flags_pb2.ConverterFlags.ModelOriginFramework.Name( + _converter_flags_pb2.ConverterFlags.TENSORFLOW +) +KERAS = _converter_flags_pb2.ConverterFlags.ModelOriginFramework.Name( + _converter_flags_pb2.ConverterFlags.KERAS +) +JAX = _converter_flags_pb2.ConverterFlags.ModelOriginFramework.Name( + _converter_flags_pb2.ConverterFlags.JAX +) +PYTORCH = _converter_flags_pb2.ConverterFlags.ModelOriginFramework.Name( + _converter_flags_pb2.ConverterFlags.PYTORCH +) + +_tf_export(v1=["lite.constants.FLOAT"]).export_constant(__name__, "FLOAT") +_tf_export(v1=["lite.constants.FLOAT16"]).export_constant(__name__, "FLOAT16") +_tf_export(v1=["lite.constants.INT32"]).export_constant(__name__, "INT32") +_tf_export(v1=["lite.constants.INT64"]).export_constant(__name__, "INT64") +_tf_export(v1=["lite.constants.STRING"]).export_constant(__name__, "STRING") +_tf_export(v1=["lite.constants.QUANTIZED_UINT8"]).export_constant( + __name__, "QUANTIZED_UINT8") +_tf_export(v1=["lite.constants.INT8"]).export_constant(__name__, "INT8") +_tf_export(v1=["lite.constants.INT16"]).export_constant(__name__, "INT16") +_tf_export(v1=["lite.constants.TFLITE"]).export_constant(__name__, "TFLITE") +_tf_export(v1=["lite.constants.GRAPHVIZ_DOT"]).export_constant( + __name__, "GRAPHVIZ_DOT") + +# Currently the default mode of operation is to shell to another python process +# to protect against crashes. However, it breaks some dependent targets because +# it forces us to depend on an external py_binary. The experimental API doesn't +# have that drawback. +EXPERIMENTAL_USE_TOCO_API_DIRECTLY = False + + +_allowed_symbols = [ + "FLOAT", + "FLOAT16", + "INT32", + "INT64", + "STRING", + "QUANTIZED_UINT8", + "INT8", + "INT16", + "COMPLEX64", + "TENSORFLOW_GRAPHDEF", + "TFLITE", + "GRAPHVIZ_DOT", + "UNSET", + "TENSORFLOW", + "KERAS", + "JAX", + "PYTORCH", + "EXPERIMENTAL_USE_TOCO_API_DIRECTLY", +] +remove_undocumented(__name__, _allowed_symbols) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bde919a658e14da06ab2c1c357a1b301709a0ffe Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/__pycache__/metrics.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/__pycache__/metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6dd972d7326863340045e0964dfce106d5532a34 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/__pycache__/metrics.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/__pycache__/metrics_interface.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/__pycache__/metrics_interface.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f9e2cf39f269894370ae9459998e31fe17f9447 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/__pycache__/metrics_interface.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/_pywrap_tensorflow_lite_metrics_wrapper.pyi b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/_pywrap_tensorflow_lite_metrics_wrapper.pyi new file mode 100644 index 0000000000000000000000000000000000000000..79b8ac2ac6314c6d18b56335efa4c728748b6815 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/_pywrap_tensorflow_lite_metrics_wrapper.pyi @@ -0,0 +1,18 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +class MetricsWrapper: + def __init__(self, arg0: str) -> None: ... + def ExportMetrics(self) -> object: ... diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/_pywrap_tensorflow_lite_metrics_wrapper.so b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/_pywrap_tensorflow_lite_metrics_wrapper.so new file mode 100644 index 0000000000000000000000000000000000000000..417abb4a35fe8baff83afbabd56742b9b86290dc --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/_pywrap_tensorflow_lite_metrics_wrapper.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c1bdc95546e0de97871d1003287aea7e5d0967a89b30dcb3d3af0178c4d9044 +size 3773272 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/metrics.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..f21859b610696b5f0c263ffdb8d81a7e7ddd7734 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/metrics.py @@ -0,0 +1,70 @@ +# Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Python TFLite metrics helper.""" +import os +from typing import Optional, Text + +# pylint: disable=g-import-not-at-top +if not os.path.splitext(__file__)[0].endswith( + os.path.join('tflite_runtime', 'metrics_portable')): + # This file is part of tensorflow package. + from tensorflow.lite.python.metrics import metrics_interface # type: ignore +else: + # This file is part of tflite_runtime package. + from tflite_runtime import metrics_interface # type: ignore +# pylint: enable=g-import-not-at-top + + +class TFLiteMetrics(metrics_interface.TFLiteMetricsInterface): + """TFLite metrics helper.""" + + def __init__(self, + model_hash: Optional[Text] = None, + model_path: Optional[Text] = None) -> None: + pass + + def increase_counter_debugger_creation(self): + pass + + def increase_counter_interpreter_creation(self): + pass + + def increase_counter_converter_attempt(self): + pass + + def increase_counter_converter_success(self): + pass + + def set_converter_param(self, name, value): + pass + + def set_converter_error(self, error_data): + pass + + def set_converter_latency(self, value): + pass + + +class TFLiteConverterMetrics(TFLiteMetrics): + """Similar to TFLiteMetrics but specialized for converter.""" + + def __del__(self): + pass + + def set_export_required(self): + pass + + def export_metrics(self): + pass diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/metrics_interface.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/metrics_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..d0f38f3d40d400d52c315a2a7db129807e099161 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/metrics_interface.py @@ -0,0 +1,48 @@ +# Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Python TFLite metrics helper interface.""" +import abc + + +class TFLiteMetricsInterface(metaclass=abc.ABCMeta): + """Abstract class for TFLiteMetrics.""" + + @abc.abstractmethod + def increase_counter_debugger_creation(self): + raise NotImplementedError + + @abc.abstractmethod + def increase_counter_interpreter_creation(self): + raise NotImplementedError + + @abc.abstractmethod + def increase_counter_converter_attempt(self): + raise NotImplementedError + + @abc.abstractmethod + def increase_counter_converter_success(self): + raise NotImplementedError + + @abc.abstractmethod + def set_converter_param(self, name, value): + raise NotImplementedError + + @abc.abstractmethod + def set_converter_error(self, error_data): + raise NotImplementedError + + @abc.abstractmethod + def set_converter_latency(self, value): + raise NotImplementedError diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/wrapper/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/wrapper/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/wrapper/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/wrapper/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..549e8a4f0c3fbe5f5a52ebf9264f4449a0d7c0bb Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/wrapper/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/wrapper/__pycache__/metrics_wrapper.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/wrapper/__pycache__/metrics_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09cf288eb2569d5f7f1031a5c60d3978df4f540a Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/wrapper/__pycache__/metrics_wrapper.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/wrapper/metrics_wrapper.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/wrapper/metrics_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..1a9778246d4a432960d0f5eb88bae0b69df08437 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/metrics/wrapper/metrics_wrapper.py @@ -0,0 +1,34 @@ +# Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Stub to make pywrap metrics wrapper accessible.""" + +from tensorflow.compiler.mlir.lite.metrics import converter_error_data_pb2 +from tensorflow.compiler.mlir.lite.python import wrap_converter +from tensorflow.lite.python.metrics._pywrap_tensorflow_lite_metrics_wrapper import MetricsWrapper # pylint: disable=unused-import + + +def retrieve_collected_errors(): + """Returns and clears the list of collected errors in ErrorCollector. + + The RetrieveCollectedErrors function in C++ returns a list of serialized proto + messages. This function will convert them to ConverterErrorData instances. + + Returns: + A list of ConverterErrorData. + """ + serialized_message_list = wrap_converter.wrapped_retrieve_collected_errors() + return list( + map(converter_error_data_pb2.ConverterErrorData.FromString, + serialized_message_list)) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/op_hint.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/op_hint.py new file mode 100644 index 0000000000000000000000000000000000000000..6f0201bc219ec2b93144cd42db55b9848e4995c6 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/op_hint.py @@ -0,0 +1,1338 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Define tflite op hints (intrinsic operations). + +This essentially allows defining a TensorFlow API for tflite operations in +Python with hints on how they are represented in TensorFlow Lite. This basically +is a form of tflite intrinsic. It wraps a subpart of a TensorFlow execution +graph and is useful for LSTMs and other complicated TensorFlow constructions +that are difficult to pattern match in TOCO, but are represented by a single +accelerated tflite op. + +Example: + def tflite_cool_activation(input): + # A cool activation function. + custom = tf.lite.OpHint("cool_activation") + input, = custom.add_inputs(input) + output = tf.sigmoid(input) * input + output, = custom.add_outputs(output) + return output + + image = tf.compat.v1.placeholder(tf.float32, (1, 16, 16, 1)) + output = tf.identity(tflite_cool_activation(image)) + + session = tf.compat.v1.Session() + + graphdef_to_convert = tf.lite.experimental.convert_op_hints_to_stubs(session) + tflite_graph = tf.compat.v1.lite.toco_convert( + graphdef_to_convert, [image], [output], allow_custom_ops=True) + with open("/tmp/graph.fb", "wb") as fp: + fp.write(tflite_graph) + +How does it work?: + +OpHint is a helper that you use when defining a vanilla python function. +It allows you to wrap arguments with tf.identities with some custom attributes. +These attributes allow you to find the original block of ops that was created. +For example, if you use cool_activation above you essentially get: + +a_input = tf.identity() +result = tf.multiply(tf.sigmoid(a_input), a_input) +output = tf.identity() + +a_input, output are identities that have parameters representing +what argument they are, what the name of the function they should turn into +in tf lite as well as a guid that uniquely identifies a particular invocation. + +Once you have built your whole tensorflow graph, you can run it and train it +as usual, but after you have done that, you need to convert the graph into +a form that replaces these subgraphs wrapped in identities to stub ops. These +ops don't actually exist in the normal TensorFlow runtime, but will be +understood by toco later. The generated TensorFlow Lite flatbuffer file will +contain a custom operator called "cool_activation". Developer needs to implement +and register this operator in TensorFlow Lite in order to do inference. +""" + +import collections as _collections +import copy as _copy +import json as _json +import uuid as _uuid + +from tensorflow.core.framework import attr_value_pb2 as _attr_value_pb2 +from tensorflow.core.framework import graph_pb2 as _graph_pb2 +from tensorflow.core.framework import node_def_pb2 as _node_def_pb2 +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import tensor_util as _tensor_util +from tensorflow.python.framework.graph_util_impl import _bfs_for_reachable_nodes +from tensorflow.python.framework.graph_util_impl import _extract_graph_summary +from tensorflow.python.ops import array_ops as _array_ops +from tensorflow.python.util import compat as _compat +from tensorflow.python.util import deprecation as _deprecation +from tensorflow.python.util.all_util import remove_undocumented +from tensorflow.python.util.tf_export import tf_export as _tf_export + + +@_tf_export(v1=["lite.OpHint"]) +@_deprecation.deprecated( + None, + "Please follow instructions under " + "https://www.tensorflow.org/lite/convert/operation_fusion for operation" + "fusion in tflite." +) +class OpHint: + """A class that helps build tflite function invocations. + + It allows you to take a bunch of TensorFlow ops and annotate the construction + such that toco knows how to convert it to tflite. This embeds a pseudo + function in a TensorFlow graph. This allows embedding high-level API usage + information in a lower level TensorFlow implementation so that an alternative + implementation can be substituted later. + + Essentially, any "input" into this pseudo op is fed into an identity, and + attributes are added to that input before being used by the constituent ops + that make up the pseudo op. A similar process is done to any output that + is to be exported from the current op. + + """ + # Attr constants that are used for representation in the GraphDef. These + # will be used on every Identity op that is involved in a total OpHint. + + # Name of the OpHint function (cosmetic). + FUNCTION_NAME_ATTR = "_tflite_function_name" + # UUID of the function (each OpHint gets a new uuid). + FUNCTION_UUID_ATTR = "_tflite_function_uuid" + # The input index of the input (or nothing if it is an output). + FUNCTION_INPUT_INDEX_ATTR = "_tflite_function_input_index" + # The output index of the output (or nothing if it is an input). + FUNCTION_OUTPUT_INDEX_ATTR = "_tflite_function_output_index" + # An index that orders aggregate arguments. Aggregate arguments are ones + # that are separate but will be fused horizontally. For example a static LSTM + # has a lstm cell for each time step. Each one has a separate opHint, but a + # fused SequentialLSTM will treat this as a single tensor. + FUNCTION_SORT_INDEX_ATTR = "_tflite_function_sort_index" + # The way in which multiple parts of the aggregate argument will be joined + # into a fused operand. Valid options are OpHint.AGGREGATE_FIRST, + # OpHint.AGGREGATE_LAST, OpHint.AGGREGATE_STACK. + FUNCTION_AGGREGATE_ATTR = "_tflite_function_aggregate" + # On fused OpHint stub, the order of inputs that the final LSTM call will + # have. What this means is that the TensorFlow order might be + # "foo", "bar", "stuff" and you might want the TF lite op order to be + # "stuff", "foo", "bar", -1 (where -1 is unused). So you would set this + # attribute to [2, 0, 1, -1]. + TFLITE_INPUT_INDICES = "_tflite_input_indices" + # OpHint level. + FUNCTION_LEVEL_ATTR = "_tflite_ophint_level" + # Ophint internal mapping, this is for high level Ophint only. + # This basically contains three kinds of mapping: + # 1) How parental ophinted inputs map to the first child ophinted inputs; + # 2) How internal children nodes are connected; + # 3) How parental ophinted outputs map to the last child ophinted outputs. + CHILDREN_INPUTS_MAPPINGS = "_tflite_children_ophint_inputs_mapping" + + # Types of aggregations + # stack: stacks all ophints with matching tags. i.e. for a static rnn. + # specifically, this is good for an input or output to a static rnn cell. + AGGREGATE_STACK = "stack" + # first: only takes the first output (one with lowest sort index) + # of matching tags. This is good for the input state to an RNN. + AGGREGATE_FIRST = "first" + # aggregation last takes only the last tag (one with highest sort index). + # This is good for an output value on the last stack item of a + # static rnn. + AGGREGATE_LAST = "last" + + class OpHintArgumentTracker: + """Conceptually tracks indices of arguments of "OpHint functions". + + The inputs and arguments of these functions both use an instance + of the class so they can have independent numbering. + """ + + def __init__(self, + function_name, + unique_function_id, + node_name_prefix, + attr_name, + level=1, + children_inputs_mappings=None): + """Initialize ophint argument. + + Args: + function_name: Name of the function that this tracks arguments for. + unique_function_id: UUID of function that this tracks arguments for. + node_name_prefix: How identities that are created are named. + attr_name: Name of attribute to use to store the index for this hint. + i.e. FUNCTION_INPUT_INDEX or FUNCTION_OUTPUT_INDEX + level: Hierarchical level of the Ophint node, a number. + children_inputs_mappings: Inputs/Outputs mapping for children hints. + """ + + # The global index is the argument index of the op. This is in contrast + # to the sort index which is the sequence number of a particular instance + # of a given global index. For example, you may have called add hint + # twice with the tag "foo". Then the global index will be 0 for both + # and the sort index will be 0 for the first added and 1 for the second. + self._function_name = function_name + self._unique_function_id = unique_function_id + self._next_global_index = 0 # The absolute global index + self._used_global_indices = set() + self._tag_to_global_index = {} # The argument index a given tag maps to + self._tag_to_next_sort_index = {} # The current index for each tag + self._node_name_prefix = node_name_prefix + self._attr_name = attr_name + self._level = level + self._children_inputs_mappings = children_inputs_mappings + + def _get_new_global_index(self, index_override): + """Return the next unused argument index in order or use an override. + + Args: + index_override: An index to use instead of the next available or None + to use the next available. + + Returns: + A valid global_index to use for the next hint argument. + + Raises: + ValueError: If the index_override is already used by another hint. + """ + if index_override is None: + global_index = self._next_global_index + else: + if index_override in self._used_global_indices: + raise ValueError("Index %d was already used by another call to add") + global_index = index_override + # Make next_global_index valid + self._used_global_indices.add(global_index) + while self._next_global_index in self._used_global_indices: + self._next_global_index += 1 + return global_index + + def add(self, arg, tag=None, name=None, aggregate=None, + index_override=None): + """Return a wrapped tensor of an input tensor as an argument. + + Args: + arg: A TensorFlow tensor that should be considered an argument. + tag: String tag to identify arguments that should be packed. + name: Name of argument. This is included in the Identity hint op names. + aggregate: Strategy to aggregate. + Acceptable values are OpHint.AGGREGATE_FIRST, OpHint.AGGREGATE_LAST, + and OpHint.AGGREGATE_STACK. + Note, aggregate is only valid if tag is specified. + index_override: Specify what input/output index should this be in the + final stub. i.e. add(arg0, index=1); add(arg1, index=0) will make the + final stub be as stub_func(inputs[arg1, arg0], outputs=[]) rather than + the default call order based ordering. + + Returns: + A tensor representing the wrapped argument. + + Raises: + ValueError: When indices are not consistent. + """ + + # Find the appropriate index + if tag is None: + if aggregate is not None: + raise ValueError("You must specify `tag` if using aggregate.") + global_index = self._get_new_global_index(index_override) + sort_index = None + else: + if aggregate is None: + raise ValueError("You must specify `aggregate` if using tag.") + if tag not in self._tag_to_global_index: + self._tag_to_global_index[tag] = ( + self._get_new_global_index(index_override)) + self._tag_to_next_sort_index[tag] = 0 + elif (index_override and + index_override != self._tag_to_global_index[tag]): + raise ValueError( + "Tag %r was called with two indices %r and %r" % + (tag, index_override, self._tag_to_global_index[tag])) + global_index = self._tag_to_global_index[tag] + sort_index = self._tag_to_next_sort_index[tag] + self._tag_to_next_sort_index[tag] += 1 + + uuid = self._unique_function_id + name = "%s-%s-%s-%r-%r-%s" % (self._node_name_prefix, self._function_name, + uuid, global_index, sort_index, name) + + identity_op = _array_ops.identity(arg, name=name) + + # pylint: disable=protected-access + identity_op.op._set_attr( + OpHint.FUNCTION_NAME_ATTR, + _attr_value_pb2.AttrValue( + s=_compat.as_bytes(self._function_name))) + identity_op.op._set_attr( + OpHint.FUNCTION_UUID_ATTR, + _attr_value_pb2.AttrValue( + s=_compat.as_bytes(self._unique_function_id))) + identity_op.op._set_attr( + self._attr_name, _attr_value_pb2.AttrValue(i=global_index)) + identity_op.op._set_attr(OpHint.FUNCTION_LEVEL_ATTR, + _attr_value_pb2.AttrValue(i=self._level)) + if self._children_inputs_mappings: + identity_op.op._set_attr( + OpHint.CHILDREN_INPUTS_MAPPINGS, + _attr_value_pb2.AttrValue( + s=_compat.as_bytes(_json.dumps( + self._children_inputs_mappings)))) + + if sort_index is not None: + identity_op.op._set_attr( + OpHint.FUNCTION_SORT_INDEX_ATTR, + _attr_value_pb2.AttrValue(i=sort_index)) + if aggregate is not None: + identity_op.op._set_attr( + OpHint.FUNCTION_AGGREGATE_ATTR, + _attr_value_pb2.AttrValue(s=_compat.as_bytes((aggregate)))) + # pylint: enable=protected-access + return identity_op + + def __init__(self, + function_name, + level=1, + children_inputs_mappings=None, + **kwargs): + """Create a OpHint. + + Args: + function_name: Name of the function (the custom op name in tflite) + level: OpHint level. + children_inputs_mappings: Children OpHint inputs/outputs mapping. + children_inputs_mappings should like below: + "parent_first_child_input": + [{"parent_input_index": num, "child_input_index": num}, ...] + "parent_last_child_output": + [{"parent_output_index": num, "child_output_index": num}, ...] + "internal_children_input_output": + [{"child_input_index": num, "child_output_index": num}, ...] + **kwargs: Keyword arguments of any constant attributes for the function. + """ + self._function_name = function_name + self._level = level + if self._level == 1: + assert children_inputs_mappings is None + else: + assert isinstance(children_inputs_mappings, dict) + self._children_inputs_mappings = children_inputs_mappings + if self._children_inputs_mappings is not None: + self._validate_children_inputs_mappings(self._children_inputs_mappings) + self._unique_function_id = _uuid.uuid1().hex + self._attrs_to_store_later = kwargs + self._stored_attrs = False + self._inputs = OpHint.OpHintArgumentTracker( + self._function_name, self._unique_function_id, "InputHint", + OpHint.FUNCTION_INPUT_INDEX_ATTR, level, self._children_inputs_mappings) + self._outputs = OpHint.OpHintArgumentTracker( + self._function_name, self._unique_function_id, "OutputHint", + OpHint.FUNCTION_OUTPUT_INDEX_ATTR, level, + self._children_inputs_mappings) + + def _validate_children_inputs_mappings(self, children_inputs_mappings): + """Validate children inputs mappings is in the right format. + + Args: + children_inputs_mappings: the Children ophint inputs/outputs mapping. + """ + assert isinstance(children_inputs_mappings, dict) + assert "parent_first_child_input" in children_inputs_mappings + assert "parent_last_child_output" in children_inputs_mappings + assert "internal_children_input_output" in children_inputs_mappings + + # validate parent_first_child_input. + + def assert_dictlist_has_keys(dictlist, keys): + for dikt in dictlist: + assert isinstance(dikt, dict) + for key in keys: + assert key in dikt + + assert_dictlist_has_keys( + children_inputs_mappings["parent_first_child_input"], + ["parent_ophint_input_index", "first_child_ophint_input_index"]) + assert_dictlist_has_keys( + children_inputs_mappings["parent_last_child_output"], + ["parent_output_index", "child_output_index"]) + assert_dictlist_has_keys( + children_inputs_mappings["internal_children_input_output"], + ["child_input_index", "child_output_index"]) + + def _setattr(self, dest_op, name, value): + tensor_value = _ops.convert_to_tensor(value) + # pylint: disable=protected-access + dest_op.op._set_attr(name, _attr_value_pb2.AttrValue( + tensor=tensor_value.op.node_def.attr["value"].tensor)) + # pylint: enable=protected-access + + def add_input(self, *args, **kwargs): + """Add a wrapped input argument to the hint. + + Args: + *args: The input tensor. + **kwargs: + "name" label + "tag" a tag to group multiple arguments that will be aggregated. I.e. + a string like 'cool_input'. Basically multiple inputs can be added + to the same hint for parallel operations that will eventually be + combined. An example would be static_rnn which creates multiple copies + of state or inputs. + "aggregate" aggregation strategy that is valid only for tag non None. + Acceptable values are OpHint.AGGREGATE_FIRST, OpHint.AGGREGATE_LAST, + and OpHint.AGGREGATE_STACK. + "index_override" The global index to use. This corresponds to the + argument order in the final stub that will be generated. + Returns: + The wrapped input tensor. + """ + return self._inputs.add(*args, **kwargs) + + def add_output(self, *args, **kwargs): + """Add a wrapped output argument to the hint. + + Args: + *args: The output tensor. + **kwargs: + "name" label + "tag" a tag to group multiple arguments that will be aggregated. I.e. + a string like 'cool_input'. Basically multiple inputs can be added + to the same hint for parallel operations that will eventually be + combined. An example would be static_rnn which creates multiple copies + of state or inputs. + "aggregate" aggregation strategy that is valid only for tag non None. + Acceptable values are OpHint.AGGREGATE_FIRST, OpHint.AGGREGATE_LAST, + and OpHint.AGGREGATE_STACK. + "index_override" The global index to use. This corresponds to the + argument order in the final stub that will be generated. + Returns: + The wrapped output tensor. + """ + return self._outputs.add(*args, **kwargs) + + def add_inputs(self, *args, **kwargs): + """Add a sequence of inputs to the function invocation. + + Args: + *args: List of inputs to be converted (should be Tf.Tensor). + **kwargs: This allows 'names' which should be a list of names. + + Returns: + Wrapped inputs (identity standins that have additional metadata). These + are also are also tf.Tensor's. + """ + if "names" in kwargs: + return [ + self._inputs.add(arg, name=name) + for arg, name in zip(args, kwargs["names"]) + ] + else: + return [self._inputs.add(arg) for arg in args] + + def add_outputs(self, *args, **kwargs): + """Add a sequence of outputs to the function invocation. + + Args: + *args: List of outputs to be converted (should be tf.Tensor). + **kwargs: See + + Returns: + Wrapped outputs (identity standins that have additional metadata). These + are also tf.Tensor's. + """ + if "names" in kwargs: + return [ + self._outputs.add(arg, name=name) + for arg, name in zip(args, kwargs["names"]) + ] + else: + return [self._outputs.add(arg) for arg in args] + + +class _LiteOperand: + """Abstract operand for a tflite hint function._dynamic_rnn_loop. + + This is a base class that handles representing arguments to an OpHint. + It also is able to serialize operands to the stubbed graph_def. + Child classes are responsible for being able to + store information about the hint identity operators. They are also responsible + for knowing how to serialize to output graphdefs. + + Typically this will be implemented by holding one or more identity nodes + that were previously discovered as hints. + """ + + def aggregate_and_return_name_for_input(self, out_graphdef): + """This adds the node(s) to out_graphdef and returns the input node name. + + Args: + out_graphdef: A graphdef that is ready to have this input added. + + Returns: + The output that the stub should use as an input for this operand. + + Raises: + RuntimeError: if the method is not implemented. + """ + del out_graphdef + raise RuntimeError("Unimplemented abstract method.") + + def aggregate_and_return_name_for_output(self, fused_op_name, output_index, + out_graphdef): + """Add node(s) to graph representing output operands and returns type. + + Args: + fused_op_name: name of the fused op stub name. + output_index: Output index that we are currently processing from stub. + out_graphdef: The destination graphdef we are currently building up. + + Returns: + The datatype of this identity. + + Raises: + RuntimeError: if the method is not implemented. + """ + del fused_op_name, output_index, out_graphdef + raise RuntimeError("Unimplemented abstract method.") + + +class _LiteSingleOperand(_LiteOperand): + """A simple operand that is non-aggregated (i.e. most hints).""" + + def __init__(self, node): + _LiteOperand.__init__(self) + self.node = node + self.name = _tensor_name_base(node.name) + + def flatten(self): + return [self.name] + + def aggregate_and_return_name_for_input(self, out_graphdef): + return self.name + + def aggregate_and_return_name_for_output(self, fused_op_name, index, + out_graphdef): + output_node = _copy.deepcopy(self.node) + del output_node.input[:] + output_node.input.append(_tensorflow_output_name(fused_op_name, index)) + out_graphdef.node.extend([output_node]) + return self.node.attr["type"].i + + def __str__(self): + return str(self.name) + + +class _LiteAggregateOperand(_LiteOperand): + """An operand for a tflite hint function that is aggregated from many. + + For example, an LSTM is a grid of operators that are all related. Inputs + going into them may need to be fused, so they should all be tracked as + related arguments. + """ + + def __init__(self, aggregation): + _LiteOperand.__init__(self) + self.aggregation = aggregation + self.names = {} + self.nodes = {} + self.flattened = None + + def add(self, sort, node): + self.names[sort] = _tensor_name_base(node.name) + self.nodes[sort] = node + + def flatten_nodes(self): + """Return a list of all the node protos in aggregation sorted order.""" + if not self.flattened: + self.flattened = [None] * len(self.nodes) + for idx, node in self.nodes.items(): + self.flattened[idx] = node + for n in self.nodes: + if n is None: + raise RuntimeError("Aggregate was missing argument.") + if self.aggregation == OpHint.AGGREGATE_FIRST: + self.flattened = self.flattened[:1] + elif self.aggregation == OpHint.AGGREGATE_LAST: + self.flattened = self.flattened[-1:] + elif self.aggregation == OpHint.AGGREGATE_STACK: + pass + else: + raise ValueError("Invalid aggregation type %r specified" % + self.aggregation) + return self.flattened + + def flatten(self): + """Return a list of all node names in aggregation sorted sorter.""" + return [_tensor_name_base(x.name) for x in self.flatten_nodes()] + + def aggregate_and_return_name_for_input(self, out_graphdef): + """This adds the nodes to out_graphdef and returns an aggregated output. + + In particular, if you have 4 inputs to a hint stub, this will be the + node that you can use as an output. I.e. you have 4 timesteps from a + static rnn, then a fused UnidirectionalLSTM will expect 1 input with + all 4 time steps. So here we make a pack and return the output name of + that pack. + + Args: + out_graphdef: A graphdef that is ready to have this input added. + + Returns: + The name of a pack that aggregates this node. + """ + flattened = self.flatten_nodes() + if (self.aggregation == OpHint.AGGREGATE_FIRST) or ( + self.aggregation == OpHint.AGGREGATE_LAST): + assert len(flattened) == 1 + if len(flattened) == 1 and self.aggregation != OpHint.AGGREGATE_STACK: + return _tensor_name_base(flattened[0].name) + else: + new_node = _node_def_pb2.NodeDef() + new_node.op = "Pack" + new_node.name = "OpHintStack-%s" % flattened[0].name + new_node.attr["N"].i = len(flattened) + new_node.attr["T"].type = flattened[0].attr["T"].type + for discrete in flattened: + new_node.input.append(_tensor_name_base(discrete.name)) + out_graphdef.node.extend([new_node]) + return new_node.name + + def aggregate_and_return_name_for_output(self, fused_op_name, output_index, + out_graphdef): + """This adds to `out_graphdef` all the unaggregated outputs. + + I.e. we are outputting from a fused stub, but we need to make it compatible + with the unfused original graph so we insert an unpack. Ideally in a later + stage the unpack -> pack sequences will be removed. + + Args: + fused_op_name: The name of the stub we are in the process of fusing. + output_index: The output output_index this object represents. + out_graphdef: The graphdef we are in the process of buildings + + Returns: + The type of the aggregated output (so we can finish building the stub + op). + """ + flattened = self.flatten_nodes() + if (self.aggregation == OpHint.AGGREGATE_FIRST) or ( + self.aggregation == OpHint.AGGREGATE_LAST): + assert len(flattened) == 1 + if len(flattened) == 1 and self.aggregation != OpHint.AGGREGATE_STACK: + temp_op = _LiteSingleOperand(flattened[0]) + return temp_op.aggregate_and_return_name_for_output( + fused_op_name, output_index, out_graphdef) + else: + stack_node = _node_def_pb2.NodeDef() + stack_node.op = "Unpack" + stack_node.name = "OpHintUnstack-%s" % flattened[0].name + stack_node.attr["num"].i = len(flattened) + output_type = flattened[0].attr["T"].type + stack_node.attr["T"].type = output_type + stack_node.input.append( + _tensorflow_output_name(fused_op_name, output_index)) + out_graphdef.node.extend([stack_node]) + + for idx, discrete in enumerate(flattened): + output_node = _copy.deepcopy(discrete) + del output_node.input[:] + output_node.input.append(_tensorflow_output_name(stack_node.name, idx)) + out_graphdef.node.extend([output_node]) + + return output_type + + def __str__(self): + s = "\t\t\tAGGREGATE %s\n" % self.aggregation + for sort, val in self.names.iteritems(): + s += "\t\t\t%d: %s\n" % (sort, val) + return s + + +class _LiteFuncCall: + """Represent a TensorFlow Lite custom function. + + This is uses to accumulate found hints in the graphdef into a single + conceptual unit. + + Attributes: + inputs: inputs to the op (hash from index # to argument) + outputs: outputs to the op (hash from index # to argument) + function_name: the tflite custom op name to use + uuid: a unique call id for this particular call (i.e. multiple function + calls would have the same function_name but different uuids. + params: A param name to key value for op constant data. I.e. for axis on a + reduction, strides on a convolution, etc. + level: Level of the OpHint. + children_inputs_mappings: If the Ophint has children, children inputs + mappings indicate how their inputs & outputs are mapped. + """ + + def __init__(self): + self.inputs = {} + self.outputs = {} + self.function_name = None + self.uuid = None + self.params = {} + self.level = -1 + self.children_inputs_mappings = {} + + def flattened_inputs_and_outputs(self): + """Return a list of inputs and outputs in a flattened format. + + Returns: + Tuple of (inputs, outputs). where input and output i a list of names. + """ + + def _flatten(input_or_output_dict): + flattened_items = [] + for item in input_or_output_dict.values(): + flattened_items.extend(item.flatten()) + return flattened_items + + return _flatten(self.inputs), _flatten(self.outputs) + + def __str__(self): + + def format_args(items): + s = "" + for idx, item in items.iteritems(): + s += ("\t\t%d:\n" % idx) + str(item) + return s + + inputs_str = "\tInputs\n" + format_args(self.inputs) + outputs_str = "\tOutputs\n" + format_args(self.outputs) + + return ( + "tflite function %s call %s level %d " + "\n\tinputs:\n\t\t%s\n\toutputs:\n\t\t%s" % + (self.function_name, self.uuid, self.level, inputs_str, outputs_str)) + + +def _find_all_hints_in_nodes(nodes): + """Look at the all the input nodes and return a list of LiteFuncCall objs. + + Args: + nodes: A TensorFlow graph_def to look for LiteFuncCalls. + + Returns: + a list of `LifeFuncCall` objects in the form + + """ + func_calls = _collections.defaultdict(_LiteFuncCall) + + for node in nodes: + attr = node.attr + # This is an op hint if it has a FUNCTION_UUID_ATTR, otherwise skip + if (OpHint.FUNCTION_UUID_ATTR not in attr or + not attr[OpHint.FUNCTION_UUID_ATTR].s): + continue + uuid = attr[OpHint.FUNCTION_UUID_ATTR].s + + # Start building function + call_def = func_calls[uuid] + call_def.uuid = uuid + call_def.function_name = attr[OpHint.FUNCTION_NAME_ATTR].s + call_def.level = attr[OpHint.FUNCTION_LEVEL_ATTR].i + # Get sorting and aggregation information + + sort = ( + attr[OpHint.FUNCTION_SORT_INDEX_ATTR].i + if OpHint.FUNCTION_SORT_INDEX_ATTR in attr else None) + if sort == -1: + sort = None + aggregation = None + if OpHint.FUNCTION_AGGREGATE_ATTR in attr: + aggregation = _compat.as_text(attr[OpHint.FUNCTION_AGGREGATE_ATTR].s) + + if OpHint.CHILDREN_INPUTS_MAPPINGS in attr: + call_def.children_inputs_mappings = _json.loads( + _compat.as_text(attr[OpHint.CHILDREN_INPUTS_MAPPINGS].s)) + + # Add the input or output + def put_operand(stuff, index, sort, operand, aggregation): + """Add a given index into the function structure.""" + if sort is None: + stuff[index] = _LiteSingleOperand(operand) + else: + if index not in stuff: + stuff[index] = _LiteAggregateOperand(aggregation) + stuff[index].add(sort, operand) + + if OpHint.FUNCTION_INPUT_INDEX_ATTR in attr: + put_operand(call_def.inputs, attr[OpHint.FUNCTION_INPUT_INDEX_ATTR].i, + sort, node, aggregation) + if OpHint.FUNCTION_OUTPUT_INDEX_ATTR in attr: + put_operand(call_def.outputs, attr[OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i, + sort, node, aggregation) + + # Remember attributes + for a in attr: + if a.startswith("_tflite_attr_"): + call_def.params[a.replace("_tflite_attr_,", "")] = attr[a].tensor + + return func_calls + + +def _extract_topology_sequence_mapping(nodes): + return dict( + (_tensor_name_base(node.name), idx) for idx, node in enumerate(nodes)) + + +def _find_children_hints_in_while_loop(function_def, nodes_mapping): + """Find children hints and all nodes inside the while loop. + + Args: + function_def: Function def of the while loop. + nodes_mapping: While loop input_arg : real node name. + + Returns: + Ordered children hints and all re-mapped nodes inside the while loop. + """ + new_nodes = [] + + # Make nodes inside function def inputs point to the real nodes. + for node in function_def.node_def: + for i, _ in enumerate(node.input): + if node.input[i] in nodes_mapping: + node.input[i] = nodes_mapping[node.input[i]] + new_nodes.append(_copy.deepcopy(node)) + name_to_seq_num = _extract_topology_sequence_mapping(function_def.node_def) + children_hints = _find_all_hints_in_nodes(new_nodes) + children_hints_q = [] + # Ordered by the outputs. + for hint in children_hints.values(): + _, output_names = hint.flattened_inputs_and_outputs() + seq = name_to_seq_num[output_names[0]] + for output_name in output_names: + seq = min(seq, name_to_seq_num[output_name]) + children_hints_q.append((seq, hint)) + children_hints_q.sort(key=lambda tup: tup[0]) + ordered_children_hints = [x[1] for x in children_hints_q] + return ordered_children_hints, new_nodes + + +def _find_children_hints(call, graph_def): + """Find all children hints. + + For a given OpHint, we find all children hints inside it, we also copy all the + nodes inside function defs (if applicable) to the original graph_def, they are + returned in a list as well. + + Args: + call: Parent OpHint that contains children ophints. + graph_def: Original graph def. + + Returns: + Ordered children hints inside the parent ophint; new graph def that contains + nodes inside function defs (if applicable); nodes inside function defs. + """ + name_to_input_name, _, _ = _extract_graph_summary(graph_def) + input_names, output_names = call.flattened_inputs_and_outputs() + + reachable_by_input = _bfs_for_reachable_nodes(input_names, name_to_input_name) + reachable_by_output = _bfs_for_reachable_nodes(output_names, + name_to_input_name) + output_nodes_set = set(output_names) + children_hints = [] + out = _graph_pb2.GraphDef() + out.library.CopyFrom(graph_def.library) + out.versions.CopyFrom(graph_def.versions) + function_def_nodes = set() + for node in graph_def.node: + out.node.extend([_copy.deepcopy(node)]) + n = _tensor_name_base(node.name) + if n in reachable_by_output: + if n not in reachable_by_input and n not in output_nodes_set: + # special handle for while loop function def. + if node.op == "While" or node.op == "StatelessWhile": + body_name = node.attr["body"].func.name + inputs_outside_loop = node.input + for function_def in graph_def.library.function: + if function_def.signature.name == body_name: + function_inputs = function_def.signature.input_arg + assert len(inputs_outside_loop) == len(function_inputs) + nodes_mapping = {} + for i, function_input in enumerate(function_inputs): + nodes_mapping[function_input.name] = inputs_outside_loop[i] + (children_hints_in_loop, + new_nodes) = _find_children_hints_in_while_loop( + function_def, nodes_mapping) + function_def_nodes.update([x.name for x in new_nodes]) + children_hints.extend(children_hints_in_loop) + out.node.extend(new_nodes) + + return children_hints, out, function_def_nodes + + +def _tensor_name_base(full_tensor_name): + """Removes the device assignment code from a tensor. + + e.g. _tensor_name_base("foo:3") => "foo" + + Args: + full_tensor_name: A tensor name that is annotated with a device placement + (this is what tensor flow introspection gives). + + Returns: + A name without any device assignment. + """ + if full_tensor_name.startswith("^"): + return full_tensor_name[1:] + return full_tensor_name.split(":")[0] + + +def _tensorflow_output_name(tensor_name, output_index): + return tensor_name if output_index == 0 else "%s:%d" % (tensor_name, + output_index) + + +def _check_subgraph_closed(n, reachable_by_input, input_nodes_set, + name_to_input_name): + """Checks to make sure node only connects to predecessor graph through inputs. + + Args: + n: Node to check + reachable_by_input: Nodes that are reachable by all inputs of subgraph + input_nodes_set: The set of nodes that are "inputs". + name_to_input_name: Maps from name to the list of inputs. + + Raises: + TypeError: If the given node uses items past inputs directly. + """ + next_to_visit = [n] + visited = set() + while next_to_visit: + current_node = next_to_visit.pop() + visited.add(current_node) + if (current_node in reachable_by_input and + current_node not in input_nodes_set): + raise TypeError("Node %s uses input %s not in input_nodes." % + (n, current_node)) + if current_node not in input_nodes_set: + next_to_visit += [ + input_node for input_node in name_to_input_name[current_node] + if input_node not in visited + ] + + +def _convert_single_op_hint_to_stub(call, + graph_def, + function_def_nodes=None, + is_last_run=True): + """Given a graph_def, converts `call` into a stub and returns a new graph_def. + + Args: + call: A single function call to be converted. + graph_def: A graph_def to use as input (that has call obviously). + function_def_nodes: Nodes inside the function def those are not connected to + the graph. + is_last_run: Whether it is the last run for a given pass (for OpHint has + children). + + Returns: + A new transformed graph-def that has call as a stub (single op). + + Note: after this process, the graph_def can no longer be loaded into + the tensorflow runtime, so all future manipulations are done in graph_def + level. + """ + if function_def_nodes is None: + function_def_nodes = set() + name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary( + graph_def) + input_names, output_names = call.flattened_inputs_and_outputs() + + reachable_by_input = _bfs_for_reachable_nodes(input_names, name_to_input_name) + reachable_by_output = _bfs_for_reachable_nodes(output_names, + name_to_input_name) + output_nodes_set = set(output_names) + nodes_after_fuse = [] + nodes_deleted_by_fuse = set() + # Classify each node. We want to keep everything reachable by input, but + # we don't know if things that are not reachable by output or input (things + # after fusing). + for node in graph_def.node: + n = _tensor_name_base(node.name) + if n in reachable_by_output: + if n not in reachable_by_input and n not in output_nodes_set: + nodes_deleted_by_fuse.add(n) + elif n not in reachable_by_input and n not in function_def_nodes: + # n is a node that after all the fusings, so keep it. + nodes_after_fuse.append(n) + else: + # In the last run, n is a node that is randomly in the graph but not + # connected to the chain of dependencies, we will delete n, otherwise + # we keep them. + if not is_last_run: + nodes_after_fuse.append(n) + + # Make a new graphdef with all the pre-input and input nodes + out = _graph_pb2.GraphDef() + reachable_by_input_sorted = sorted( + list(reachable_by_input), key=lambda n: name_to_seq_num[n]) + for node in reachable_by_input_sorted: + out.node.extend([_copy.deepcopy(name_to_node[node])]) + + # Create any stacks to aggregate arguments into to a single input + # i.e. for static_rnn's. + sorted_input_indices = list(call.inputs.keys()) + sorted_input_indices.sort() + sorted_output_indices = list(call.outputs.keys()) + sorted_output_indices.sort() + new_node = _node_def_pb2.NodeDef() + # Delegate to each operand to produce the proper new input for this stub node. + # In particular, an aggregate input will now be a Pack of some previously + # non-fused things. + + optional_input_node = _node_def_pb2.NodeDef() + optional_input_node.name = "Const" + str(_uuid.uuid1().hex) + optional_input_node.op = "Const" + optional_input_node.attr["dtype"].CopyFrom( + _attr_value_pb2.AttrValue(type=_dtypes.float32.as_datatype_enum)) + optional_input_node.attr["value"].CopyFrom( + _attr_value_pb2.AttrValue( + tensor=_tensor_util.make_tensor_proto([-1], _dtypes.float32, [1]))) + out.node.extend([optional_input_node]) + + max_index = max(sorted_input_indices) + 1 + for cur_index in range(max_index): + if cur_index in sorted_input_indices: + inputs = call.inputs[cur_index] + input_name = inputs.aggregate_and_return_name_for_input(out) + new_node.input.append(input_name) + else: + new_node.input.append(optional_input_node.name) + + new_node.attr[OpHint.TFLITE_INPUT_INDICES].list.i.extend(sorted_input_indices) + + # Create the function + new_node.op = call.function_name + new_node.name = call.uuid + out.node.extend([new_node]) + + # Now call each output argument to give them a chance to make the proper + # output type and add it to our new_node. + output_dtypes = [] + max_output_index = max(sorted_output_indices) + 1 + for cur_index in range(max_output_index): + if cur_index in sorted_output_indices: + output = call.outputs[cur_index] + output_dtype = ( + output.aggregate_and_return_name_for_output(new_node.name, cur_index, + out)) + else: + output_dtype = optional_input_node.attr["type"].i + output_dtypes.append(output_dtype) + new_node.attr["_output_types"].list.type[:] = output_dtypes + new_node.attr["_output_quantized"].b = False + + # Add post output nodes that do not depend on the outputs + for n in nodes_after_fuse: + should_keep = True + for input_name in name_to_input_name[n]: + if input_name in nodes_deleted_by_fuse: + should_keep = False + if should_keep: + out.node.extend([_copy.deepcopy(name_to_node[n])]) + + # Misc. graph_def data that needs copying. + out.library.CopyFrom(graph_def.library) + out.versions.CopyFrom(graph_def.versions) + + return out + + +def _remove_one_redundant_stack_unstack(in_graph_def): + """Removes a stack->unstack pattern from in_graph_def in a returned graph. + + Args: + in_graph_def: Graph def to use as input. + + Returns: + Simplified tuple (graph_def, changed_something) where changed_something + is true if anything was done. + """ + name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary( + in_graph_def) + del name_to_seq_num + + do_generic_pack_unpack = True + + out = _graph_pb2.GraphDef() + out.library.CopyFrom(in_graph_def.library) + out.versions.CopyFrom(in_graph_def.versions) + for n in in_graph_def.node: + node_name = _tensor_name_base(n.name) + if not node_name.startswith("OpHintStack") and not n.op.startswith("Pack"): + continue + next_to_visit = [node_name] + visited = set() + + unpack_nodes = set() + pack_node = node_name + + # Find a pattern of unstack connected to a stack (with identities + # in between. + matches_pattern = True + is_hint_created_stack = False + while next_to_visit: + current_node_name = next_to_visit[0] + visited.add(current_node_name) + del next_to_visit[0] + node = name_to_node[current_node_name] + is_op_hint_stack = node.name.startswith("OpHintStack") + is_op_hint_unstack = node.name.startswith("OpHintUnstack") + if (node.op == "Identity" or is_op_hint_stack or + (do_generic_pack_unpack and node.op == "Pack")): + is_hint_created_stack |= is_op_hint_stack + next_to_visit += [ + input_node for input_node in name_to_input_name[current_node_name] + if input_node not in visited + ] + elif (is_op_hint_unstack or + (do_generic_pack_unpack and node.op == "Unpack")): + unpack_nodes.add(node.name) + is_hint_created_stack &= is_op_hint_unstack + else: + matches_pattern = False + break + visited.add(node.name) + + if matches_pattern and len(unpack_nodes) == 1: + pack_node = node_name + + # Check to see if anyone depends on the intermediate identity or the + # Unstacked form + no_external_dependency = True + for other_n in in_graph_def.node: + if other_n.name in visited: + continue + for input_tensor in name_to_input_name[other_n.name]: + input_op = _tensor_name_base(input_tensor) + if input_op in visited and input_op != pack_node: + no_external_dependency = False + # Proceed with the substitution if the stack/unstack pair was created + # through hints, or that it was not, but nobody is consuming things + # between the stack and unstack. + if is_hint_created_stack or no_external_dependency: + end = unpack_nodes.pop() + end_input = name_to_node[end].input[0] + # All nodes that depend on the final stack need to be redone to use + for other_n in in_graph_def.node: + node_name = _tensor_name_base(other_n.name) + if node_name not in visited: + new_node = _copy.deepcopy(other_n) + new_node.input[:] = [ + (end_input if stripped == pack_node else non_stripped) + for stripped, non_stripped in zip(name_to_input_name[node_name], + new_node.input[:]) + ] + out.node.extend([new_node]) + return out, True + return in_graph_def, False + + +def _remove_redundant_stack_unstack(graph_def): + curr = graph_def + del graph_def + changed_stuff = True + while changed_stuff: + curr, changed_stuff = _remove_one_redundant_stack_unstack(curr) + return curr + + +def _get_correct_mapping(original_index, nodes): + # Special handle for the index is -1 case. + # If it is -1, return the last index. + if original_index == -1: + node_indices = nodes.keys() + node_indices = sorted(node_indices) + return node_indices[-1] + return original_index + + +def _convert_op_hints_to_stubs_helper( + graph_def, write_callback=lambda sess, graph_def: None): + """Converts a graph_def to a new graph_def where all op hints are stubbed. + + Args: + graph_def: A graph def that we should convert. + write_callback: A function pointer that can be used to write intermediate + steps of graph transformation (optional). + + Returns: + A new stubbed graph_def. + """ + hints = _find_all_hints_in_nodes(graph_def.node) + + hints_q = [] + for hint in hints.values(): + hints_q.append((hint.level, hint.uuid)) + + hints_q.sort(key=lambda tup: tup[0]) + for i in range(len(hints_q) - 1, -1, -1): + level, hint_uuid = hints_q[i] + + curr_graph_def = graph_def + del graph_def # prevent using graph_def again (common source of error) + for i in range(len(hints_q) - 1, -1, -1): + level, hint_uuid = hints_q[i] + if level >= 2: + children_hints, curr_graph_def, function_def_nodes = _find_children_hints( + hints[hint_uuid], curr_graph_def) + # pylint: disable=superfluous-parens + assert (len(children_hints) > 0) # pylint: disable=g-explicit-length-test + # pylint: enable=superfluous-parens + + # Re-wire the children hints inputs/outputs, so latter child's inputs + # connect to previous child node's outputs. + children_inputs_mappings = hints[hint_uuid].children_inputs_mappings + for j, child_hint in enumerate(children_hints): + if j == 0: + for mapping in children_inputs_mappings["parent_first_child_input"]: + parent_input_index = _get_correct_mapping( + mapping["parent_ophint_input_index"], hints[hint_uuid].inputs) + child_input_index = _get_correct_mapping( + mapping["first_child_ophint_input_index"], child_hint.inputs) + child_hint.inputs[child_input_index] = hints[hint_uuid].inputs[ + parent_input_index] + else: + for mapping in children_inputs_mappings[ + "internal_children_input_output"]: + input_index = _get_correct_mapping(mapping["child_input_index"], + child_hint.inputs) + output_index = _get_correct_mapping(mapping["child_output_index"], + children_hints[j - 1].outputs) + child_hint.inputs[input_index] = children_hints[ + j - 1].outputs[output_index] + if j == len(children_hints) - 1: + for mapping in children_inputs_mappings["parent_last_child_output"]: + parent_output_index = _get_correct_mapping( + mapping["parent_output_index"], hints[hint_uuid].outputs) + child_output_index = _get_correct_mapping( + mapping["child_output_index"], child_hint.outputs) + child_hint.outputs[child_output_index] = hints[hint_uuid].outputs[ + parent_output_index] + + for j, child_hint in enumerate(children_hints): + curr_graph_def = _convert_single_op_hint_to_stub( + child_hint, curr_graph_def, function_def_nodes, + j == len(children_hints) - 1) + else: + curr_graph_def = _convert_single_op_hint_to_stub(hints[hint_uuid], + curr_graph_def) + write_callback(curr_graph_def, "initial") + # The stubbing process can create stacks/unstacks in the case of LSTMs + # remove them. + curr_graph_def = _remove_redundant_stack_unstack(curr_graph_def) + return curr_graph_def + + +def find_all_hinted_output_nodes(session=None, graph_def=None): + """Find all Ophints output nodes in the graph. + + This is used to get all the output nodes those are ophinted, it is important + for operation like convert_variables_to_constants keep all ophints structure. + Note: only one of session or graph_def should be used, not both. + Why this can be useful? Some TensorFlow ops (e.g. bidirectional rnn), can + generate multiple outputs for unfused subgraph. If not all output nodes are + consumed, graph optimization can potentially drop the unused nodes and cause + ophints in an invalid states (due to missing ophinted output nodes). So it's + important for us to find all those hinted output nodes and make sure they're + not discarded away. + + Args: + session: A TensorFlow session that contains the graph to convert. + graph_def: A graph def that we should convert. + + Returns: + A list of OpHints output nodes. + Raises: + ValueError: If both session and graph_def are provided. + """ + if session is not None and graph_def is not None: + raise ValueError("Provide only one of session and graph_def.") + hinted_outputs_nodes = [] + if session is not None: + hints = _find_all_hints_in_nodes(session.graph_def.node) + elif graph_def is not None: + hints = _find_all_hints_in_nodes(graph_def.node) + for hint in hints.values(): + _, output_nodes = hint.flattened_inputs_and_outputs() + hinted_outputs_nodes.extend(output_nodes) + return hinted_outputs_nodes + + +def is_ophint_converted(graph_def): + if graph_def is None: + raise ValueError("Must provide the graph_def.") + ophint_converted = False + for node in graph_def.node: + attr = node.attr + if OpHint.FUNCTION_INPUT_INDEX_ATTR in attr: + ophint_converted = True + break + return ophint_converted + + +@_tf_export(v1=["lite.experimental.convert_op_hints_to_stubs"]) +@_deprecation.deprecated( + None, + "Please follow instructions under " + "https://www.tensorflow.org/lite/convert/operation_fusion for operation" + "fusion in tflite." +) +def convert_op_hints_to_stubs(session=None, + graph_def=None, + write_callback=lambda graph_def, comments: None): + """Converts a graphdef with LiteOp hints into stub operations. + + This is used to prepare for toco conversion of complex intrinsic usages. + Note: only one of session or graph_def should be used, not both. + + Args: + session: A TensorFlow session that contains the graph to convert. + graph_def: A graph def that we should convert. + write_callback: A function pointer that can be used to write intermediate + steps of graph transformation (optional). + + Returns: + A new graphdef with all ops contained in OpHints being replaced by + a single op call with the right parameters. + Raises: + ValueError: If both session and graph_def are provided. + """ + + if session is not None and graph_def is not None: + raise ValueError("Provide only one of session and graph_def.") + + if session is not None: + return _convert_op_hints_to_stubs_helper(session.graph_def, write_callback) + elif graph_def is not None: + return _convert_op_hints_to_stubs_helper(graph_def, write_callback) + else: + raise ValueError("Must specify session or graph_def as input.") + + +_allowed_symbols = [ + "OpHint", + "convert_op_hints_to_stubs", + "convert_op_hints_to_stubs_new", + "find_all_hinted_output_nodes", + "is_ophint_converted", +] +remove_undocumented(__name__, _allowed_symbols) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/optimize/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/optimize/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/optimize/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/optimize/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6b71fc1403b605ed646657a4e97d99da561e1e4 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/optimize/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/optimize/__pycache__/calibrator.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/optimize/__pycache__/calibrator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..517cef23d87ba21679754aa8b1f2ff285650d443 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/optimize/__pycache__/calibrator.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/optimize/_pywrap_tensorflow_lite_calibration_wrapper.pyi b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/optimize/_pywrap_tensorflow_lite_calibration_wrapper.pyi new file mode 100644 index 0000000000000000000000000000000000000000..b020337da48ed95f2eea3c956e82abbf30f51087 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/optimize/_pywrap_tensorflow_lite_calibration_wrapper.pyi @@ -0,0 +1,40 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from typing import Callable + +from typing import overload + +class CalibrationWrapper: + def __init__(self, arg0: object, arg1: list[str], arg2: list[Callable[[int],None]]) -> None: ... + def Calibrate(self) -> object: ... + @overload + def FeedTensor(self, arg0: object, arg1: str) -> object: ... + @overload + def FeedTensor(self, arg0: object) -> object: ... + @overload + def Prepare(self, arg0: object, arg1: str) -> object: ... + @overload + def Prepare(self, arg0: object) -> object: ... + @overload + def Prepare(self, arg0: str) -> object: ... + @overload + def Prepare(self) -> object: ... + @overload + def QuantizeModel(self, arg0: int, arg1: int, arg2: bool, arg3: int, arg4: int, arg5: bool) -> object: ... + @overload + def QuantizeModel(self, arg0: int, arg1: int, arg2: bool, arg3: str) -> object: ... + +def AddIntermediateTensors(arg0: object) -> object: ... diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/optimize/_pywrap_tensorflow_lite_calibration_wrapper.so b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/optimize/_pywrap_tensorflow_lite_calibration_wrapper.so new file mode 100644 index 0000000000000000000000000000000000000000..7789d64931bc45824676da39603ed484ec3ce4da --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/optimize/_pywrap_tensorflow_lite_calibration_wrapper.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6ae4ca7008821683f6d7f4d0e18b925ce410148e23db445d3941c952a7599ab +size 6276584 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/optimize/calibrator.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/optimize/calibrator.py new file mode 100644 index 0000000000000000000000000000000000000000..136890589a09fcb6c9a38d7cce232df7df864700 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/optimize/calibrator.py @@ -0,0 +1,255 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Python wrapper for post training quantization with calibration.""" +import numpy as np + +from tensorflow.lite.python.convert_phase import Component +from tensorflow.lite.python.convert_phase import convert_phase +from tensorflow.lite.python.convert_phase import SubComponent +from tensorflow.lite.python.interpreter import Interpreter +from tensorflow.python.framework import dtypes +from tensorflow.python.util.lazy_loader import LazyLoader + +# Lazy load since some of the performance benchmark skylark rules +# break dependencies. Must use double quotes to match code internal rewrite +# rule. +_calibration_wrapper = LazyLoader( + "_calibration_wrapper", + globals(), + ( + "tensorflow.lite.python.optimize." + "_pywrap_tensorflow_lite_calibration_wrapper" + ), +) + + +def add_intermediate_tensors(model_content): + """Adds intermediate tensors to fused op if needed.""" + return _calibration_wrapper.AddIntermediateTensors(model_content) + + +class Calibrator: + """Calibrates a floating point model and then quantizes it. + + This is an internal class, not a public interface. + """ + + def __init__( + self, + model_content, + custom_op_registerers_by_name=None, + custom_op_registerers_by_func=None, + ): + """Constructor. + + Args: + model_content: Content of a TF-Lite Flatbuffer file. + custom_op_registerers_by_name: List of str (symbol names) that take a + pointer to a MutableOpResolver and register custom ops. + custom_op_registerers_by_func: List of functions that take a pointer to a + MutableOpResolver and register custom ops. + + Raises: + ValueError: If the calibrator was unable to open the model. + """ + if not model_content: + raise ValueError("`model_content` must be specified.") + if custom_op_registerers_by_name is None: + custom_op_registerers_by_name = [] + if custom_op_registerers_by_func is None: + custom_op_registerers_by_func = [] + try: + self._calibrator = _calibration_wrapper.CalibrationWrapper( + model_content, + custom_op_registerers_by_name, + custom_op_registerers_by_func, + ) + self._model_content = model_content + except Exception as e: + raise ValueError("Failed to parse the model: %s." % e) + if not self._calibrator: + raise ValueError("Failed to parse the model.") + self._interpreter = None + + def _create_input_array_from_dict(self, signature_key, inputs): + input_array = [] + signature_runner = self._interpreter.get_signature_runner(signature_key) + input_details = sorted( + signature_runner.get_input_details().items(), + key=lambda item: item[1]["index"], + ) + for input_name, _ in input_details: + input_array.append(inputs[input_name]) + return input_array + + def _feed_tensors(self, dataset_gen, resize_input): + """Feed tensors to the calibrator.""" + initialized = {} + + for sample in dataset_gen(): + if isinstance(sample, tuple): + if not isinstance(sample[1], dict): + raise ValueError( + "You need to provide either a dictionary with input " + "names and values in the second argument in the " + "tuple" + ) + # Convert signature based inputs to the tensor index based data. + if self._interpreter is None: + self._interpreter = Interpreter(model_content=self._model_content) + signature_key = sample[0] + input_array = self._create_input_array_from_dict( + signature_key, sample[1] + ) + elif isinstance(sample, dict): + # Convert signature based inputs to the tensor index based data. + if self._interpreter is None: + self._interpreter = Interpreter(model_content=self._model_content) + signature_key = None + input_array = self._create_input_array_from_dict(None, sample) + elif isinstance(sample, list): + signature_key = None + input_array = sample + else: + raise ValueError( + "You need to provide either a dictionary with input " + "names and values, a tuple with signature key and a " + "dictionary with input names and values, or an array " + "with input values in the order of input tensors of " + "the graph in the representative_dataset function. " + "Unsupported value from dataset: {}.".format(sample) + ) + + if signature_key not in initialized: + initialized[signature_key] = True + if resize_input: + if signature_key is not None: + self._calibrator.Prepare( + [list(s.shape) for s in input_array], signature_key + ) + else: + self._calibrator.Prepare([list(s.shape) for s in input_array]) + else: + if signature_key is not None: + self._calibrator.Prepare(signature_key) + else: + self._calibrator.Prepare() + if signature_key is not None: + self._calibrator.FeedTensor(input_array, signature_key) + else: + self._calibrator.FeedTensor(input_array) + + @convert_phase( + Component.OPTIMIZE_TFLITE_MODEL, + SubComponent.QUANTIZE_USING_DEPRECATED_QUANTIZER, + ) + def calibrate_and_quantize( + self, + dataset_gen, + input_type, + output_type, + allow_float, + activations_type=dtypes.int8, + bias_type=dtypes.int32, + resize_input=True, + disable_per_channel=False, + ): + """Calibrates the model with specified generator and then quantizes it. + + The input shapes of the calibrator are resized with the calibration data if + `resize_input` is set. + + Returns: + A quantized model. + + Args: + dataset_gen: A generator that generates calibration samples. + input_type: A tf.dtype representing the desired real-value input type. + output_type: A tf.dtype representing the desired real-value output type. + allow_float: A boolean. False if the resulting model cannot perform float + computation, useful when targeting an integer-only backend. If False, an + error will be thrown if an operation cannot be quantized, otherwise the + model will fallback to float ops. + activations_type: A tf.dtype representing the desired type for + activations. + bias_type: A tf.dtype representing the desired type for bias. + resize_input: A boolean. True if the shape of the sample data is different + from the input. + disable_per_channel: A boolean. True if disabling per-channel + quantization. + """ + self._feed_tensors(dataset_gen, resize_input) + return self._calibrator.QuantizeModel( + np.dtype(input_type.as_numpy_dtype()).num, + np.dtype(output_type.as_numpy_dtype()).num, + allow_float, + np.dtype(activations_type.as_numpy_dtype()).num, + np.dtype(bias_type.as_numpy_dtype()).num, + disable_per_channel, + ) + + @convert_phase( + Component.OPTIMIZE_TFLITE_MODEL, + SubComponent.QUANTIZE_USING_DEPRECATED_QUANTIZER, + ) + def calibrate_and_quantize_single( + self, + dataset_gen, + input_type, + output_type, + allow_float, + op_output_name, + resize_input=True, + ): + """Calibrates the model with specified generator and then quantizes it. + + Only the single op with output op_output_name will be quantized. + The input shapes of the calibrator are resized with the calibration data. + + Returns: + A quantized model. + + Args: + dataset_gen: A generator that generates calibration samples. + input_type: A tf.dtype representing the desired real-value input type. + output_type: A tf.dtype representing the desired real-value output type. + allow_float: A boolean. False if the resulting model cannot perform float + computation, useful when targeting an integer-only backend. If False, an + error will be thrown if an operation cannot be quantized, otherwise the + model will fallback to float ops. + op_output_name: A string, only this op will be quantized. + resize_input: A boolean. True if the shape of the sample data is different + from the input. + """ + self._feed_tensors(dataset_gen, resize_input) + return self._calibrator.QuantizeModel( + np.dtype(input_type.as_numpy_dtype()).num, + np.dtype(output_type.as_numpy_dtype()).num, + allow_float, + op_output_name, + ) + + @convert_phase(Component.OPTIMIZE_TFLITE_MODEL, SubComponent.CALIBRATE) + def calibrate(self, dataset_gen): + """Calibrates the model with specified generator. + + Returns: + A model with min and max calibration stats. + + Args: + dataset_gen: A generator that generates calibration samples. + """ + self._feed_tensors(dataset_gen, resize_input=True) + return self._calibrator.Calibrate() diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/schema_py_generated.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/schema_py_generated.py new file mode 100644 index 0000000000000000000000000000000000000000..b18edcf0d8fd9d3f4c961f52efe1a543f4bb5787 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/schema_py_generated.py @@ -0,0 +1,18438 @@ +import flatbuffers + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +from flatbuffers.compat import import_numpy +np = import_numpy() + +class TensorType(object): + FLOAT32 = 0 + FLOAT16 = 1 + INT32 = 2 + UINT8 = 3 + INT64 = 4 + STRING = 5 + BOOL = 6 + INT16 = 7 + COMPLEX64 = 8 + INT8 = 9 + FLOAT64 = 10 + COMPLEX128 = 11 + UINT64 = 12 + RESOURCE = 13 + VARIANT = 14 + UINT32 = 15 + UINT16 = 16 + INT4 = 17 + BFLOAT16 = 18 + + +class QuantizationDetails(object): + NONE = 0 + CustomQuantization = 1 + +def QuantizationDetailsCreator(unionType, table): + from flatbuffers.table import Table + if not isinstance(table, Table): + return None + if unionType == QuantizationDetails().CustomQuantization: + return CustomQuantizationT.InitFromBuf(table.Bytes, table.Pos) + return None + + +class DimensionType(object): + DENSE = 0 + SPARSE_CSR = 1 + + +class SparseIndexVector(object): + NONE = 0 + Int32Vector = 1 + Uint16Vector = 2 + Uint8Vector = 3 + +def SparseIndexVectorCreator(unionType, table): + from flatbuffers.table import Table + if not isinstance(table, Table): + return None + if unionType == SparseIndexVector().Int32Vector: + return Int32VectorT.InitFromBuf(table.Bytes, table.Pos) + if unionType == SparseIndexVector().Uint16Vector: + return Uint16VectorT.InitFromBuf(table.Bytes, table.Pos) + if unionType == SparseIndexVector().Uint8Vector: + return Uint8VectorT.InitFromBuf(table.Bytes, table.Pos) + return None + + +class BuiltinOperator(object): + ADD = 0 + AVERAGE_POOL_2D = 1 + CONCATENATION = 2 + CONV_2D = 3 + DEPTHWISE_CONV_2D = 4 + DEPTH_TO_SPACE = 5 + DEQUANTIZE = 6 + EMBEDDING_LOOKUP = 7 + FLOOR = 8 + FULLY_CONNECTED = 9 + HASHTABLE_LOOKUP = 10 + L2_NORMALIZATION = 11 + L2_POOL_2D = 12 + LOCAL_RESPONSE_NORMALIZATION = 13 + LOGISTIC = 14 + LSH_PROJECTION = 15 + LSTM = 16 + MAX_POOL_2D = 17 + MUL = 18 + RELU = 19 + RELU_N1_TO_1 = 20 + RELU6 = 21 + RESHAPE = 22 + RESIZE_BILINEAR = 23 + RNN = 24 + SOFTMAX = 25 + SPACE_TO_DEPTH = 26 + SVDF = 27 + TANH = 28 + CONCAT_EMBEDDINGS = 29 + SKIP_GRAM = 30 + CALL = 31 + CUSTOM = 32 + EMBEDDING_LOOKUP_SPARSE = 33 + PAD = 34 + UNIDIRECTIONAL_SEQUENCE_RNN = 35 + GATHER = 36 + BATCH_TO_SPACE_ND = 37 + SPACE_TO_BATCH_ND = 38 + TRANSPOSE = 39 + MEAN = 40 + SUB = 41 + DIV = 42 + SQUEEZE = 43 + UNIDIRECTIONAL_SEQUENCE_LSTM = 44 + STRIDED_SLICE = 45 + BIDIRECTIONAL_SEQUENCE_RNN = 46 + EXP = 47 + TOPK_V2 = 48 + SPLIT = 49 + LOG_SOFTMAX = 50 + DELEGATE = 51 + BIDIRECTIONAL_SEQUENCE_LSTM = 52 + CAST = 53 + PRELU = 54 + MAXIMUM = 55 + ARG_MAX = 56 + MINIMUM = 57 + LESS = 58 + NEG = 59 + PADV2 = 60 + GREATER = 61 + GREATER_EQUAL = 62 + LESS_EQUAL = 63 + SELECT = 64 + SLICE = 65 + SIN = 66 + TRANSPOSE_CONV = 67 + SPARSE_TO_DENSE = 68 + TILE = 69 + EXPAND_DIMS = 70 + EQUAL = 71 + NOT_EQUAL = 72 + LOG = 73 + SUM = 74 + SQRT = 75 + RSQRT = 76 + SHAPE = 77 + POW = 78 + ARG_MIN = 79 + FAKE_QUANT = 80 + REDUCE_PROD = 81 + REDUCE_MAX = 82 + PACK = 83 + LOGICAL_OR = 84 + ONE_HOT = 85 + LOGICAL_AND = 86 + LOGICAL_NOT = 87 + UNPACK = 88 + REDUCE_MIN = 89 + FLOOR_DIV = 90 + REDUCE_ANY = 91 + SQUARE = 92 + ZEROS_LIKE = 93 + FILL = 94 + FLOOR_MOD = 95 + RANGE = 96 + RESIZE_NEAREST_NEIGHBOR = 97 + LEAKY_RELU = 98 + SQUARED_DIFFERENCE = 99 + MIRROR_PAD = 100 + ABS = 101 + SPLIT_V = 102 + UNIQUE = 103 + CEIL = 104 + REVERSE_V2 = 105 + ADD_N = 106 + GATHER_ND = 107 + COS = 108 + WHERE = 109 + RANK = 110 + ELU = 111 + REVERSE_SEQUENCE = 112 + MATRIX_DIAG = 113 + QUANTIZE = 114 + MATRIX_SET_DIAG = 115 + ROUND = 116 + HARD_SWISH = 117 + IF = 118 + WHILE = 119 + NON_MAX_SUPPRESSION_V4 = 120 + NON_MAX_SUPPRESSION_V5 = 121 + SCATTER_ND = 122 + SELECT_V2 = 123 + DENSIFY = 124 + SEGMENT_SUM = 125 + BATCH_MATMUL = 126 + PLACEHOLDER_FOR_GREATER_OP_CODES = 127 + CUMSUM = 128 + CALL_ONCE = 129 + BROADCAST_TO = 130 + RFFT2D = 131 + CONV_3D = 132 + IMAG = 133 + REAL = 134 + COMPLEX_ABS = 135 + HASHTABLE = 136 + HASHTABLE_FIND = 137 + HASHTABLE_IMPORT = 138 + HASHTABLE_SIZE = 139 + REDUCE_ALL = 140 + CONV_3D_TRANSPOSE = 141 + VAR_HANDLE = 142 + READ_VARIABLE = 143 + ASSIGN_VARIABLE = 144 + BROADCAST_ARGS = 145 + RANDOM_STANDARD_NORMAL = 146 + BUCKETIZE = 147 + RANDOM_UNIFORM = 148 + MULTINOMIAL = 149 + GELU = 150 + DYNAMIC_UPDATE_SLICE = 151 + RELU_0_TO_1 = 152 + UNSORTED_SEGMENT_PROD = 153 + UNSORTED_SEGMENT_MAX = 154 + UNSORTED_SEGMENT_SUM = 155 + ATAN2 = 156 + UNSORTED_SEGMENT_MIN = 157 + SIGN = 158 + BITCAST = 159 + BITWISE_XOR = 160 + RIGHT_SHIFT = 161 + STABLEHLO_LOGISTIC = 162 + STABLEHLO_ADD = 163 + STABLEHLO_DIVIDE = 164 + STABLEHLO_MULTIPLY = 165 + STABLEHLO_MAXIMUM = 166 + STABLEHLO_RESHAPE = 167 + STABLEHLO_CLAMP = 168 + STABLEHLO_CONCATENATE = 169 + STABLEHLO_BROADCAST_IN_DIM = 170 + STABLEHLO_CONVOLUTION = 171 + STABLEHLO_SLICE = 172 + STABLEHLO_CUSTOM_CALL = 173 + STABLEHLO_REDUCE = 174 + STABLEHLO_ABS = 175 + STABLEHLO_AND = 176 + STABLEHLO_COSINE = 177 + STABLEHLO_EXPONENTIAL = 178 + STABLEHLO_FLOOR = 179 + STABLEHLO_LOG = 180 + STABLEHLO_MINIMUM = 181 + STABLEHLO_NEGATE = 182 + STABLEHLO_OR = 183 + STABLEHLO_POWER = 184 + STABLEHLO_REMAINDER = 185 + STABLEHLO_RSQRT = 186 + STABLEHLO_SELECT = 187 + STABLEHLO_SUBTRACT = 188 + STABLEHLO_TANH = 189 + STABLEHLO_SCATTER = 190 + STABLEHLO_COMPARE = 191 + STABLEHLO_CONVERT = 192 + STABLEHLO_DYNAMIC_SLICE = 193 + STABLEHLO_DYNAMIC_UPDATE_SLICE = 194 + STABLEHLO_PAD = 195 + STABLEHLO_IOTA = 196 + STABLEHLO_DOT_GENERAL = 197 + STABLEHLO_REDUCE_WINDOW = 198 + STABLEHLO_SORT = 199 + STABLEHLO_WHILE = 200 + STABLEHLO_GATHER = 201 + STABLEHLO_TRANSPOSE = 202 + DILATE = 203 + STABLEHLO_RNG_BIT_GENERATOR = 204 + REDUCE_WINDOW = 205 + STABLEHLO_COMPOSITE = 206 + STABLEHLO_SHIFT_LEFT = 207 + STABLEHLO_CBRT = 208 + + +class BuiltinOptions(object): + NONE = 0 + Conv2DOptions = 1 + DepthwiseConv2DOptions = 2 + ConcatEmbeddingsOptions = 3 + LSHProjectionOptions = 4 + Pool2DOptions = 5 + SVDFOptions = 6 + RNNOptions = 7 + FullyConnectedOptions = 8 + SoftmaxOptions = 9 + ConcatenationOptions = 10 + AddOptions = 11 + L2NormOptions = 12 + LocalResponseNormalizationOptions = 13 + LSTMOptions = 14 + ResizeBilinearOptions = 15 + CallOptions = 16 + ReshapeOptions = 17 + SkipGramOptions = 18 + SpaceToDepthOptions = 19 + EmbeddingLookupSparseOptions = 20 + MulOptions = 21 + PadOptions = 22 + GatherOptions = 23 + BatchToSpaceNDOptions = 24 + SpaceToBatchNDOptions = 25 + TransposeOptions = 26 + ReducerOptions = 27 + SubOptions = 28 + DivOptions = 29 + SqueezeOptions = 30 + SequenceRNNOptions = 31 + StridedSliceOptions = 32 + ExpOptions = 33 + TopKV2Options = 34 + SplitOptions = 35 + LogSoftmaxOptions = 36 + CastOptions = 37 + DequantizeOptions = 38 + MaximumMinimumOptions = 39 + ArgMaxOptions = 40 + LessOptions = 41 + NegOptions = 42 + PadV2Options = 43 + GreaterOptions = 44 + GreaterEqualOptions = 45 + LessEqualOptions = 46 + SelectOptions = 47 + SliceOptions = 48 + TransposeConvOptions = 49 + SparseToDenseOptions = 50 + TileOptions = 51 + ExpandDimsOptions = 52 + EqualOptions = 53 + NotEqualOptions = 54 + ShapeOptions = 55 + PowOptions = 56 + ArgMinOptions = 57 + FakeQuantOptions = 58 + PackOptions = 59 + LogicalOrOptions = 60 + OneHotOptions = 61 + LogicalAndOptions = 62 + LogicalNotOptions = 63 + UnpackOptions = 64 + FloorDivOptions = 65 + SquareOptions = 66 + ZerosLikeOptions = 67 + FillOptions = 68 + BidirectionalSequenceLSTMOptions = 69 + BidirectionalSequenceRNNOptions = 70 + UnidirectionalSequenceLSTMOptions = 71 + FloorModOptions = 72 + RangeOptions = 73 + ResizeNearestNeighborOptions = 74 + LeakyReluOptions = 75 + SquaredDifferenceOptions = 76 + MirrorPadOptions = 77 + AbsOptions = 78 + SplitVOptions = 79 + UniqueOptions = 80 + ReverseV2Options = 81 + AddNOptions = 82 + GatherNdOptions = 83 + CosOptions = 84 + WhereOptions = 85 + RankOptions = 86 + ReverseSequenceOptions = 87 + MatrixDiagOptions = 88 + QuantizeOptions = 89 + MatrixSetDiagOptions = 90 + HardSwishOptions = 91 + IfOptions = 92 + WhileOptions = 93 + DepthToSpaceOptions = 94 + NonMaxSuppressionV4Options = 95 + NonMaxSuppressionV5Options = 96 + ScatterNdOptions = 97 + SelectV2Options = 98 + DensifyOptions = 99 + SegmentSumOptions = 100 + BatchMatMulOptions = 101 + CumsumOptions = 102 + CallOnceOptions = 103 + BroadcastToOptions = 104 + Rfft2dOptions = 105 + Conv3DOptions = 106 + HashtableOptions = 107 + HashtableFindOptions = 108 + HashtableImportOptions = 109 + HashtableSizeOptions = 110 + VarHandleOptions = 111 + ReadVariableOptions = 112 + AssignVariableOptions = 113 + RandomOptions = 114 + BucketizeOptions = 115 + GeluOptions = 116 + DynamicUpdateSliceOptions = 117 + UnsortedSegmentProdOptions = 118 + UnsortedSegmentMaxOptions = 119 + UnsortedSegmentMinOptions = 120 + UnsortedSegmentSumOptions = 121 + ATan2Options = 122 + SignOptions = 123 + BitcastOptions = 124 + BitwiseXorOptions = 125 + RightShiftOptions = 126 + +def BuiltinOptionsCreator(unionType, table): + from flatbuffers.table import Table + if not isinstance(table, Table): + return None + if unionType == BuiltinOptions().Conv2DOptions: + return Conv2DOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().DepthwiseConv2DOptions: + return DepthwiseConv2DOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().ConcatEmbeddingsOptions: + return ConcatEmbeddingsOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().LSHProjectionOptions: + return LSHProjectionOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().Pool2DOptions: + return Pool2DOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().SVDFOptions: + return SVDFOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().RNNOptions: + return RNNOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().FullyConnectedOptions: + return FullyConnectedOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().SoftmaxOptions: + return SoftmaxOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().ConcatenationOptions: + return ConcatenationOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().AddOptions: + return AddOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().L2NormOptions: + return L2NormOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().LocalResponseNormalizationOptions: + return LocalResponseNormalizationOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().LSTMOptions: + return LSTMOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().ResizeBilinearOptions: + return ResizeBilinearOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().CallOptions: + return CallOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().ReshapeOptions: + return ReshapeOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().SkipGramOptions: + return SkipGramOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().SpaceToDepthOptions: + return SpaceToDepthOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().EmbeddingLookupSparseOptions: + return EmbeddingLookupSparseOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().MulOptions: + return MulOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().PadOptions: + return PadOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().GatherOptions: + return GatherOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().BatchToSpaceNDOptions: + return BatchToSpaceNDOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().SpaceToBatchNDOptions: + return SpaceToBatchNDOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().TransposeOptions: + return TransposeOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().ReducerOptions: + return ReducerOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().SubOptions: + return SubOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().DivOptions: + return DivOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().SqueezeOptions: + return SqueezeOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().SequenceRNNOptions: + return SequenceRNNOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().StridedSliceOptions: + return StridedSliceOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().ExpOptions: + return ExpOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().TopKV2Options: + return TopKV2OptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().SplitOptions: + return SplitOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().LogSoftmaxOptions: + return LogSoftmaxOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().CastOptions: + return CastOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().DequantizeOptions: + return DequantizeOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().MaximumMinimumOptions: + return MaximumMinimumOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().ArgMaxOptions: + return ArgMaxOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().LessOptions: + return LessOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().NegOptions: + return NegOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().PadV2Options: + return PadV2OptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().GreaterOptions: + return GreaterOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().GreaterEqualOptions: + return GreaterEqualOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().LessEqualOptions: + return LessEqualOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().SelectOptions: + return SelectOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().SliceOptions: + return SliceOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().TransposeConvOptions: + return TransposeConvOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().SparseToDenseOptions: + return SparseToDenseOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().TileOptions: + return TileOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().ExpandDimsOptions: + return ExpandDimsOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().EqualOptions: + return EqualOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().NotEqualOptions: + return NotEqualOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().ShapeOptions: + return ShapeOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().PowOptions: + return PowOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().ArgMinOptions: + return ArgMinOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().FakeQuantOptions: + return FakeQuantOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().PackOptions: + return PackOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().LogicalOrOptions: + return LogicalOrOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().OneHotOptions: + return OneHotOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().LogicalAndOptions: + return LogicalAndOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().LogicalNotOptions: + return LogicalNotOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().UnpackOptions: + return UnpackOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().FloorDivOptions: + return FloorDivOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().SquareOptions: + return SquareOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().ZerosLikeOptions: + return ZerosLikeOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().FillOptions: + return FillOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().BidirectionalSequenceLSTMOptions: + return BidirectionalSequenceLSTMOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().BidirectionalSequenceRNNOptions: + return BidirectionalSequenceRNNOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().UnidirectionalSequenceLSTMOptions: + return UnidirectionalSequenceLSTMOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().FloorModOptions: + return FloorModOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().RangeOptions: + return RangeOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().ResizeNearestNeighborOptions: + return ResizeNearestNeighborOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().LeakyReluOptions: + return LeakyReluOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().SquaredDifferenceOptions: + return SquaredDifferenceOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().MirrorPadOptions: + return MirrorPadOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().AbsOptions: + return AbsOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().SplitVOptions: + return SplitVOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().UniqueOptions: + return UniqueOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().ReverseV2Options: + return ReverseV2OptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().AddNOptions: + return AddNOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().GatherNdOptions: + return GatherNdOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().CosOptions: + return CosOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().WhereOptions: + return WhereOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().RankOptions: + return RankOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().ReverseSequenceOptions: + return ReverseSequenceOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().MatrixDiagOptions: + return MatrixDiagOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().QuantizeOptions: + return QuantizeOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().MatrixSetDiagOptions: + return MatrixSetDiagOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().HardSwishOptions: + return HardSwishOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().IfOptions: + return IfOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().WhileOptions: + return WhileOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().DepthToSpaceOptions: + return DepthToSpaceOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().NonMaxSuppressionV4Options: + return NonMaxSuppressionV4OptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().NonMaxSuppressionV5Options: + return NonMaxSuppressionV5OptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().ScatterNdOptions: + return ScatterNdOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().SelectV2Options: + return SelectV2OptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().DensifyOptions: + return DensifyOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().SegmentSumOptions: + return SegmentSumOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().BatchMatMulOptions: + return BatchMatMulOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().CumsumOptions: + return CumsumOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().CallOnceOptions: + return CallOnceOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().BroadcastToOptions: + return BroadcastToOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().Rfft2dOptions: + return Rfft2dOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().Conv3DOptions: + return Conv3DOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().HashtableOptions: + return HashtableOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().HashtableFindOptions: + return HashtableFindOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().HashtableImportOptions: + return HashtableImportOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().HashtableSizeOptions: + return HashtableSizeOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().VarHandleOptions: + return VarHandleOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().ReadVariableOptions: + return ReadVariableOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().AssignVariableOptions: + return AssignVariableOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().RandomOptions: + return RandomOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().BucketizeOptions: + return BucketizeOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().GeluOptions: + return GeluOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().DynamicUpdateSliceOptions: + return DynamicUpdateSliceOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().UnsortedSegmentProdOptions: + return UnsortedSegmentProdOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().UnsortedSegmentMaxOptions: + return UnsortedSegmentMaxOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().UnsortedSegmentMinOptions: + return UnsortedSegmentMinOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().UnsortedSegmentSumOptions: + return UnsortedSegmentSumOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().ATan2Options: + return ATan2OptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().SignOptions: + return SignOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().BitcastOptions: + return BitcastOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().BitwiseXorOptions: + return BitwiseXorOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions().RightShiftOptions: + return RightShiftOptionsT.InitFromBuf(table.Bytes, table.Pos) + return None + + +class BuiltinOptions2(object): + NONE = 0 + StablehloConcatenateOptions = 1 + StablehloBroadcastInDimOptions = 2 + StablehloSliceOptions = 3 + StablehloConvolutionOptions = 4 + StablehloCustomCallOptions = 5 + StablehloReduceOptions = 6 + StablehloScatterOptions = 7 + StablehloCompareOptions = 8 + StablehloDynamicSliceOptions = 9 + StablehloPadOptions = 10 + StablehloIotaOptions = 11 + StablehloDotGeneralOptions = 12 + StablehloReduceWindowOptions = 13 + StablehloSortOptions = 14 + StablehloWhileOptions = 15 + StablehloGatherOptions = 16 + StablehloTransposeOptions = 17 + DilateOptions = 18 + StablehloRngBitGeneratorOptions = 19 + ReduceWindowOptions = 20 + StableHLOCompositeOptions = 21 + StablehloShiftLeftOptions = 22 + +def BuiltinOptions2Creator(unionType, table): + from flatbuffers.table import Table + if not isinstance(table, Table): + return None + if unionType == BuiltinOptions2().StablehloConcatenateOptions: + return StablehloConcatenateOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions2().StablehloBroadcastInDimOptions: + return StablehloBroadcastInDimOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions2().StablehloSliceOptions: + return StablehloSliceOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions2().StablehloConvolutionOptions: + return StablehloConvolutionOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions2().StablehloCustomCallOptions: + return StablehloCustomCallOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions2().StablehloReduceOptions: + return StablehloReduceOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions2().StablehloScatterOptions: + return StablehloScatterOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions2().StablehloCompareOptions: + return StablehloCompareOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions2().StablehloDynamicSliceOptions: + return StablehloDynamicSliceOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions2().StablehloPadOptions: + return StablehloPadOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions2().StablehloIotaOptions: + return StablehloIotaOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions2().StablehloDotGeneralOptions: + return StablehloDotGeneralOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions2().StablehloReduceWindowOptions: + return StablehloReduceWindowOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions2().StablehloSortOptions: + return StablehloSortOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions2().StablehloWhileOptions: + return StablehloWhileOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions2().StablehloGatherOptions: + return StablehloGatherOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions2().StablehloTransposeOptions: + return StablehloTransposeOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions2().DilateOptions: + return DilateOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions2().StablehloRngBitGeneratorOptions: + return StablehloRngBitGeneratorOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions2().ReduceWindowOptions: + return ReduceWindowOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions2().StableHLOCompositeOptions: + return StableHLOCompositeOptionsT.InitFromBuf(table.Bytes, table.Pos) + if unionType == BuiltinOptions2().StablehloShiftLeftOptions: + return StablehloShiftLeftOptionsT.InitFromBuf(table.Bytes, table.Pos) + return None + + +class StablehloPrecisionConfig(object): + DEFAULT = 0 + HIGH = 1 + HIGHEST = 2 + + +class StablehloComparisonDirection(object): + STABLEHLO_COMPARISON_DIRECTION_EQ = 0 + STABLEHLO_COMPARISON_DIRECTION_NE = 1 + STABLEHLO_COMPARISON_DIRECTION_GE = 2 + STABLEHLO_COMPARISON_DIRECTION_GT = 3 + STABLEHLO_COMPARISON_DIRECTION_LE = 4 + STABLEHLO_COMPARISON_DIRECTION_LT = 5 + + +class StablehloComparisonType(object): + STABLEHLO_COMPARISON_TYPE_NOTYPE = 0 + STABLEHLO_COMPARISON_TYPE_FLOAT = 1 + STABLEHLO_COMPARISON_TYPE_FLOAT_TOTAL_ORDER = 2 + STABLEHLO_COMPARISON_TYPE_SIGNED = 3 + STABLEHLO_COMPARISON_TYPE_UNSIGNED = 4 + + +class RngAlgorithm(object): + DEFAULT = 0 + PHILOX = 1 + THREEFRY = 2 + + +class Padding(object): + SAME = 0 + VALID = 1 + + +class ActivationFunctionType(object): + NONE = 0 + RELU = 1 + RELU_N1_TO_1 = 2 + RELU6 = 3 + TANH = 4 + SIGN_BIT = 5 + + +class LSHProjectionType(object): + UNKNOWN = 0 + SPARSE = 1 + DENSE = 2 + + +class FullyConnectedOptionsWeightsFormat(object): + DEFAULT = 0 + SHUFFLED4x16INT8 = 1 + + +class LSTMKernelType(object): + FULL = 0 + BASIC = 1 + + +class CombinerType(object): + SUM = 0 + MEAN = 1 + SQRTN = 2 + + +class MirrorPadMode(object): + REFLECT = 0 + SYMMETRIC = 1 + + +class ReduceWindowFunction(object): + UNSUPPORTED = 0 + ADD = 1 + MUL = 2 + MINIMUM = 3 + MAXIMUM = 4 + ALL = 5 + ANY = 6 + + +class CustomOptionsFormat(object): + FLEXBUFFERS = 0 + + +class CustomQuantization(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = CustomQuantization() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsCustomQuantization(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def CustomQuantizationBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # CustomQuantization + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # CustomQuantization + def Custom(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1)) + return 0 + + # CustomQuantization + def CustomAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o) + return 0 + + # CustomQuantization + def CustomLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # CustomQuantization + def CustomIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + +def CustomQuantizationStart(builder): + builder.StartObject(1) + +def CustomQuantizationAddCustom(builder, custom): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(custom), 0) + +def CustomQuantizationStartCustomVector(builder, numElems): + return builder.StartVector(1, numElems, 1) + +def CustomQuantizationEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class CustomQuantizationT(object): + + # CustomQuantizationT + def __init__(self): + self.custom = None # type: List[int] + + @classmethod + def InitFromBuf(cls, buf, pos): + customQuantization = CustomQuantization() + customQuantization.Init(buf, pos) + return cls.InitFromObj(customQuantization) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, customQuantization): + x = CustomQuantizationT() + x._UnPack(customQuantization) + return x + + # CustomQuantizationT + def _UnPack(self, customQuantization): + if customQuantization is None: + return + if not customQuantization.CustomIsNone(): + if np is None: + self.custom = [] + for i in range(customQuantization.CustomLength()): + self.custom.append(customQuantization.Custom(i)) + else: + self.custom = customQuantization.CustomAsNumpy() + + # CustomQuantizationT + def Pack(self, builder): + if self.custom is not None: + if np is not None and type(self.custom) is np.ndarray: + custom = builder.CreateNumpyVector(self.custom) + else: + CustomQuantizationStartCustomVector(builder, len(self.custom)) + for i in reversed(range(len(self.custom))): + builder.PrependUint8(self.custom[i]) + custom = builder.EndVector() + CustomQuantizationStart(builder) + if self.custom is not None: + CustomQuantizationAddCustom(builder, custom) + customQuantization = CustomQuantizationEnd(builder) + return customQuantization + + +class QuantizationParameters(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = QuantizationParameters() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsQuantizationParameters(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def QuantizationParametersBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # QuantizationParameters + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # QuantizationParameters + def Min(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # QuantizationParameters + def MinAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o) + return 0 + + # QuantizationParameters + def MinLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # QuantizationParameters + def MinIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # QuantizationParameters + def Max(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # QuantizationParameters + def MaxAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o) + return 0 + + # QuantizationParameters + def MaxLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # QuantizationParameters + def MaxIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # QuantizationParameters + def Scale(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # QuantizationParameters + def ScaleAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o) + return 0 + + # QuantizationParameters + def ScaleLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # QuantizationParameters + def ScaleIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + # QuantizationParameters + def ZeroPoint(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # QuantizationParameters + def ZeroPointAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # QuantizationParameters + def ZeroPointLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # QuantizationParameters + def ZeroPointIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + return o == 0 + + # QuantizationParameters + def DetailsType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) + return 0 + + # QuantizationParameters + def Details(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + from flatbuffers.table import Table + obj = Table(bytearray(), 0) + self._tab.Union(obj, o) + return obj + return None + + # QuantizationParameters + def QuantizedDimension(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def QuantizationParametersStart(builder): + builder.StartObject(7) + +def QuantizationParametersAddMin(builder, min): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(min), 0) + +def QuantizationParametersStartMinVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def QuantizationParametersAddMax(builder, max): + builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(max), 0) + +def QuantizationParametersStartMaxVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def QuantizationParametersAddScale(builder, scale): + builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(scale), 0) + +def QuantizationParametersStartScaleVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def QuantizationParametersAddZeroPoint(builder, zeroPoint): + builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(zeroPoint), 0) + +def QuantizationParametersStartZeroPointVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def QuantizationParametersAddDetailsType(builder, detailsType): + builder.PrependUint8Slot(4, detailsType, 0) + +def QuantizationParametersAddDetails(builder, details): + builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(details), 0) + +def QuantizationParametersAddQuantizedDimension(builder, quantizedDimension): + builder.PrependInt32Slot(6, quantizedDimension, 0) + +def QuantizationParametersEnd(builder): + return builder.EndObject() + + +try: + from typing import List, Union +except: + pass + +class QuantizationParametersT(object): + + # QuantizationParametersT + def __init__(self): + self.min = None # type: List[float] + self.max = None # type: List[float] + self.scale = None # type: List[float] + self.zeroPoint = None # type: List[int] + self.detailsType = 0 # type: int + self.details = None # type: Union[None, CustomQuantizationT] + self.quantizedDimension = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + quantizationParameters = QuantizationParameters() + quantizationParameters.Init(buf, pos) + return cls.InitFromObj(quantizationParameters) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, quantizationParameters): + x = QuantizationParametersT() + x._UnPack(quantizationParameters) + return x + + # QuantizationParametersT + def _UnPack(self, quantizationParameters): + if quantizationParameters is None: + return + if not quantizationParameters.MinIsNone(): + if np is None: + self.min = [] + for i in range(quantizationParameters.MinLength()): + self.min.append(quantizationParameters.Min(i)) + else: + self.min = quantizationParameters.MinAsNumpy() + if not quantizationParameters.MaxIsNone(): + if np is None: + self.max = [] + for i in range(quantizationParameters.MaxLength()): + self.max.append(quantizationParameters.Max(i)) + else: + self.max = quantizationParameters.MaxAsNumpy() + if not quantizationParameters.ScaleIsNone(): + if np is None: + self.scale = [] + for i in range(quantizationParameters.ScaleLength()): + self.scale.append(quantizationParameters.Scale(i)) + else: + self.scale = quantizationParameters.ScaleAsNumpy() + if not quantizationParameters.ZeroPointIsNone(): + if np is None: + self.zeroPoint = [] + for i in range(quantizationParameters.ZeroPointLength()): + self.zeroPoint.append(quantizationParameters.ZeroPoint(i)) + else: + self.zeroPoint = quantizationParameters.ZeroPointAsNumpy() + self.detailsType = quantizationParameters.DetailsType() + self.details = QuantizationDetailsCreator(self.detailsType, quantizationParameters.Details()) + self.quantizedDimension = quantizationParameters.QuantizedDimension() + + # QuantizationParametersT + def Pack(self, builder): + if self.min is not None: + if np is not None and type(self.min) is np.ndarray: + min = builder.CreateNumpyVector(self.min) + else: + QuantizationParametersStartMinVector(builder, len(self.min)) + for i in reversed(range(len(self.min))): + builder.PrependFloat32(self.min[i]) + min = builder.EndVector() + if self.max is not None: + if np is not None and type(self.max) is np.ndarray: + max = builder.CreateNumpyVector(self.max) + else: + QuantizationParametersStartMaxVector(builder, len(self.max)) + for i in reversed(range(len(self.max))): + builder.PrependFloat32(self.max[i]) + max = builder.EndVector() + if self.scale is not None: + if np is not None and type(self.scale) is np.ndarray: + scale = builder.CreateNumpyVector(self.scale) + else: + QuantizationParametersStartScaleVector(builder, len(self.scale)) + for i in reversed(range(len(self.scale))): + builder.PrependFloat32(self.scale[i]) + scale = builder.EndVector() + if self.zeroPoint is not None: + if np is not None and type(self.zeroPoint) is np.ndarray: + zeroPoint = builder.CreateNumpyVector(self.zeroPoint) + else: + QuantizationParametersStartZeroPointVector(builder, len(self.zeroPoint)) + for i in reversed(range(len(self.zeroPoint))): + builder.PrependInt64(self.zeroPoint[i]) + zeroPoint = builder.EndVector() + if self.details is not None: + details = self.details.Pack(builder) + QuantizationParametersStart(builder) + if self.min is not None: + QuantizationParametersAddMin(builder, min) + if self.max is not None: + QuantizationParametersAddMax(builder, max) + if self.scale is not None: + QuantizationParametersAddScale(builder, scale) + if self.zeroPoint is not None: + QuantizationParametersAddZeroPoint(builder, zeroPoint) + QuantizationParametersAddDetailsType(builder, self.detailsType) + if self.details is not None: + QuantizationParametersAddDetails(builder, details) + QuantizationParametersAddQuantizedDimension(builder, self.quantizedDimension) + quantizationParameters = QuantizationParametersEnd(builder) + return quantizationParameters + + +class Int32Vector(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Int32Vector() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsInt32Vector(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def Int32VectorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # Int32Vector + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Int32Vector + def Values(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # Int32Vector + def ValuesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Int32Vector + def ValuesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Int32Vector + def ValuesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + +def Int32VectorStart(builder): + builder.StartObject(1) + +def Int32VectorAddValues(builder, values): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0) + +def Int32VectorStartValuesVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def Int32VectorEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class Int32VectorT(object): + + # Int32VectorT + def __init__(self): + self.values = None # type: List[int] + + @classmethod + def InitFromBuf(cls, buf, pos): + int32Vector = Int32Vector() + int32Vector.Init(buf, pos) + return cls.InitFromObj(int32Vector) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, int32Vector): + x = Int32VectorT() + x._UnPack(int32Vector) + return x + + # Int32VectorT + def _UnPack(self, int32Vector): + if int32Vector is None: + return + if not int32Vector.ValuesIsNone(): + if np is None: + self.values = [] + for i in range(int32Vector.ValuesLength()): + self.values.append(int32Vector.Values(i)) + else: + self.values = int32Vector.ValuesAsNumpy() + + # Int32VectorT + def Pack(self, builder): + if self.values is not None: + if np is not None and type(self.values) is np.ndarray: + values = builder.CreateNumpyVector(self.values) + else: + Int32VectorStartValuesVector(builder, len(self.values)) + for i in reversed(range(len(self.values))): + builder.PrependInt32(self.values[i]) + values = builder.EndVector() + Int32VectorStart(builder) + if self.values is not None: + Int32VectorAddValues(builder, values) + int32Vector = Int32VectorEnd(builder) + return int32Vector + + +class Uint16Vector(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Uint16Vector() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsUint16Vector(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def Uint16VectorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # Uint16Vector + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Uint16Vector + def Values(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Uint16Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 2)) + return 0 + + # Uint16Vector + def ValuesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint16Flags, o) + return 0 + + # Uint16Vector + def ValuesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Uint16Vector + def ValuesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + +def Uint16VectorStart(builder): + builder.StartObject(1) + +def Uint16VectorAddValues(builder, values): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0) + +def Uint16VectorStartValuesVector(builder, numElems): + return builder.StartVector(2, numElems, 2) + +def Uint16VectorEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class Uint16VectorT(object): + + # Uint16VectorT + def __init__(self): + self.values = None # type: List[int] + + @classmethod + def InitFromBuf(cls, buf, pos): + uint16Vector = Uint16Vector() + uint16Vector.Init(buf, pos) + return cls.InitFromObj(uint16Vector) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, uint16Vector): + x = Uint16VectorT() + x._UnPack(uint16Vector) + return x + + # Uint16VectorT + def _UnPack(self, uint16Vector): + if uint16Vector is None: + return + if not uint16Vector.ValuesIsNone(): + if np is None: + self.values = [] + for i in range(uint16Vector.ValuesLength()): + self.values.append(uint16Vector.Values(i)) + else: + self.values = uint16Vector.ValuesAsNumpy() + + # Uint16VectorT + def Pack(self, builder): + if self.values is not None: + if np is not None and type(self.values) is np.ndarray: + values = builder.CreateNumpyVector(self.values) + else: + Uint16VectorStartValuesVector(builder, len(self.values)) + for i in reversed(range(len(self.values))): + builder.PrependUint16(self.values[i]) + values = builder.EndVector() + Uint16VectorStart(builder) + if self.values is not None: + Uint16VectorAddValues(builder, values) + uint16Vector = Uint16VectorEnd(builder) + return uint16Vector + + +class Uint8Vector(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Uint8Vector() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsUint8Vector(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def Uint8VectorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # Uint8Vector + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Uint8Vector + def Values(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1)) + return 0 + + # Uint8Vector + def ValuesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o) + return 0 + + # Uint8Vector + def ValuesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Uint8Vector + def ValuesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + +def Uint8VectorStart(builder): + builder.StartObject(1) + +def Uint8VectorAddValues(builder, values): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0) + +def Uint8VectorStartValuesVector(builder, numElems): + return builder.StartVector(1, numElems, 1) + +def Uint8VectorEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class Uint8VectorT(object): + + # Uint8VectorT + def __init__(self): + self.values = None # type: List[int] + + @classmethod + def InitFromBuf(cls, buf, pos): + uint8Vector = Uint8Vector() + uint8Vector.Init(buf, pos) + return cls.InitFromObj(uint8Vector) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, uint8Vector): + x = Uint8VectorT() + x._UnPack(uint8Vector) + return x + + # Uint8VectorT + def _UnPack(self, uint8Vector): + if uint8Vector is None: + return + if not uint8Vector.ValuesIsNone(): + if np is None: + self.values = [] + for i in range(uint8Vector.ValuesLength()): + self.values.append(uint8Vector.Values(i)) + else: + self.values = uint8Vector.ValuesAsNumpy() + + # Uint8VectorT + def Pack(self, builder): + if self.values is not None: + if np is not None and type(self.values) is np.ndarray: + values = builder.CreateNumpyVector(self.values) + else: + Uint8VectorStartValuesVector(builder, len(self.values)) + for i in reversed(range(len(self.values))): + builder.PrependUint8(self.values[i]) + values = builder.EndVector() + Uint8VectorStart(builder) + if self.values is not None: + Uint8VectorAddValues(builder, values) + uint8Vector = Uint8VectorEnd(builder) + return uint8Vector + + +class DimensionMetadata(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = DimensionMetadata() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsDimensionMetadata(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def DimensionMetadataBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # DimensionMetadata + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # DimensionMetadata + def Format(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # DimensionMetadata + def DenseSize(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # DimensionMetadata + def ArraySegmentsType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) + return 0 + + # DimensionMetadata + def ArraySegments(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + from flatbuffers.table import Table + obj = Table(bytearray(), 0) + self._tab.Union(obj, o) + return obj + return None + + # DimensionMetadata + def ArrayIndicesType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) + return 0 + + # DimensionMetadata + def ArrayIndices(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + from flatbuffers.table import Table + obj = Table(bytearray(), 0) + self._tab.Union(obj, o) + return obj + return None + +def DimensionMetadataStart(builder): + builder.StartObject(6) + +def DimensionMetadataAddFormat(builder, format): + builder.PrependInt8Slot(0, format, 0) + +def DimensionMetadataAddDenseSize(builder, denseSize): + builder.PrependInt32Slot(1, denseSize, 0) + +def DimensionMetadataAddArraySegmentsType(builder, arraySegmentsType): + builder.PrependUint8Slot(2, arraySegmentsType, 0) + +def DimensionMetadataAddArraySegments(builder, arraySegments): + builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(arraySegments), 0) + +def DimensionMetadataAddArrayIndicesType(builder, arrayIndicesType): + builder.PrependUint8Slot(4, arrayIndicesType, 0) + +def DimensionMetadataAddArrayIndices(builder, arrayIndices): + builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(arrayIndices), 0) + +def DimensionMetadataEnd(builder): + return builder.EndObject() + + +try: + from typing import Union +except: + pass + +class DimensionMetadataT(object): + + # DimensionMetadataT + def __init__(self): + self.format = 0 # type: int + self.denseSize = 0 # type: int + self.arraySegmentsType = 0 # type: int + self.arraySegments = None # type: Union[None, Int32VectorT, Uint16VectorT, Uint8VectorT] + self.arrayIndicesType = 0 # type: int + self.arrayIndices = None # type: Union[None, Int32VectorT, Uint16VectorT, Uint8VectorT] + + @classmethod + def InitFromBuf(cls, buf, pos): + dimensionMetadata = DimensionMetadata() + dimensionMetadata.Init(buf, pos) + return cls.InitFromObj(dimensionMetadata) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, dimensionMetadata): + x = DimensionMetadataT() + x._UnPack(dimensionMetadata) + return x + + # DimensionMetadataT + def _UnPack(self, dimensionMetadata): + if dimensionMetadata is None: + return + self.format = dimensionMetadata.Format() + self.denseSize = dimensionMetadata.DenseSize() + self.arraySegmentsType = dimensionMetadata.ArraySegmentsType() + self.arraySegments = SparseIndexVectorCreator(self.arraySegmentsType, dimensionMetadata.ArraySegments()) + self.arrayIndicesType = dimensionMetadata.ArrayIndicesType() + self.arrayIndices = SparseIndexVectorCreator(self.arrayIndicesType, dimensionMetadata.ArrayIndices()) + + # DimensionMetadataT + def Pack(self, builder): + if self.arraySegments is not None: + arraySegments = self.arraySegments.Pack(builder) + if self.arrayIndices is not None: + arrayIndices = self.arrayIndices.Pack(builder) + DimensionMetadataStart(builder) + DimensionMetadataAddFormat(builder, self.format) + DimensionMetadataAddDenseSize(builder, self.denseSize) + DimensionMetadataAddArraySegmentsType(builder, self.arraySegmentsType) + if self.arraySegments is not None: + DimensionMetadataAddArraySegments(builder, arraySegments) + DimensionMetadataAddArrayIndicesType(builder, self.arrayIndicesType) + if self.arrayIndices is not None: + DimensionMetadataAddArrayIndices(builder, arrayIndices) + dimensionMetadata = DimensionMetadataEnd(builder) + return dimensionMetadata + + +class SparsityParameters(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SparsityParameters() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSparsityParameters(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def SparsityParametersBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SparsityParameters + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SparsityParameters + def TraversalOrder(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # SparsityParameters + def TraversalOrderAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # SparsityParameters + def TraversalOrderLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SparsityParameters + def TraversalOrderIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # SparsityParameters + def BlockMap(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # SparsityParameters + def BlockMapAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # SparsityParameters + def BlockMapLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SparsityParameters + def BlockMapIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # SparsityParameters + def DimMetadata(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + obj = DimensionMetadata() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # SparsityParameters + def DimMetadataLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SparsityParameters + def DimMetadataIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + +def SparsityParametersStart(builder): + builder.StartObject(3) + +def SparsityParametersAddTraversalOrder(builder, traversalOrder): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(traversalOrder), 0) + +def SparsityParametersStartTraversalOrderVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def SparsityParametersAddBlockMap(builder, blockMap): + builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(blockMap), 0) + +def SparsityParametersStartBlockMapVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def SparsityParametersAddDimMetadata(builder, dimMetadata): + builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(dimMetadata), 0) + +def SparsityParametersStartDimMetadataVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def SparsityParametersEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class SparsityParametersT(object): + + # SparsityParametersT + def __init__(self): + self.traversalOrder = None # type: List[int] + self.blockMap = None # type: List[int] + self.dimMetadata = None # type: List[DimensionMetadataT] + + @classmethod + def InitFromBuf(cls, buf, pos): + sparsityParameters = SparsityParameters() + sparsityParameters.Init(buf, pos) + return cls.InitFromObj(sparsityParameters) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, sparsityParameters): + x = SparsityParametersT() + x._UnPack(sparsityParameters) + return x + + # SparsityParametersT + def _UnPack(self, sparsityParameters): + if sparsityParameters is None: + return + if not sparsityParameters.TraversalOrderIsNone(): + if np is None: + self.traversalOrder = [] + for i in range(sparsityParameters.TraversalOrderLength()): + self.traversalOrder.append(sparsityParameters.TraversalOrder(i)) + else: + self.traversalOrder = sparsityParameters.TraversalOrderAsNumpy() + if not sparsityParameters.BlockMapIsNone(): + if np is None: + self.blockMap = [] + for i in range(sparsityParameters.BlockMapLength()): + self.blockMap.append(sparsityParameters.BlockMap(i)) + else: + self.blockMap = sparsityParameters.BlockMapAsNumpy() + if not sparsityParameters.DimMetadataIsNone(): + self.dimMetadata = [] + for i in range(sparsityParameters.DimMetadataLength()): + if sparsityParameters.DimMetadata(i) is None: + self.dimMetadata.append(None) + else: + dimensionMetadata_ = DimensionMetadataT.InitFromObj(sparsityParameters.DimMetadata(i)) + self.dimMetadata.append(dimensionMetadata_) + + # SparsityParametersT + def Pack(self, builder): + if self.traversalOrder is not None: + if np is not None and type(self.traversalOrder) is np.ndarray: + traversalOrder = builder.CreateNumpyVector(self.traversalOrder) + else: + SparsityParametersStartTraversalOrderVector(builder, len(self.traversalOrder)) + for i in reversed(range(len(self.traversalOrder))): + builder.PrependInt32(self.traversalOrder[i]) + traversalOrder = builder.EndVector() + if self.blockMap is not None: + if np is not None and type(self.blockMap) is np.ndarray: + blockMap = builder.CreateNumpyVector(self.blockMap) + else: + SparsityParametersStartBlockMapVector(builder, len(self.blockMap)) + for i in reversed(range(len(self.blockMap))): + builder.PrependInt32(self.blockMap[i]) + blockMap = builder.EndVector() + if self.dimMetadata is not None: + dimMetadatalist = [] + for i in range(len(self.dimMetadata)): + dimMetadatalist.append(self.dimMetadata[i].Pack(builder)) + SparsityParametersStartDimMetadataVector(builder, len(self.dimMetadata)) + for i in reversed(range(len(self.dimMetadata))): + builder.PrependUOffsetTRelative(dimMetadatalist[i]) + dimMetadata = builder.EndVector() + SparsityParametersStart(builder) + if self.traversalOrder is not None: + SparsityParametersAddTraversalOrder(builder, traversalOrder) + if self.blockMap is not None: + SparsityParametersAddBlockMap(builder, blockMap) + if self.dimMetadata is not None: + SparsityParametersAddDimMetadata(builder, dimMetadata) + sparsityParameters = SparsityParametersEnd(builder) + return sparsityParameters + + +class VariantSubType(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = VariantSubType() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsVariantSubType(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def VariantSubTypeBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # VariantSubType + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # VariantSubType + def Shape(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # VariantSubType + def ShapeAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # VariantSubType + def ShapeLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # VariantSubType + def ShapeIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # VariantSubType + def Type(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # VariantSubType + def HasRank(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def VariantSubTypeStart(builder): + builder.StartObject(3) + +def VariantSubTypeAddShape(builder, shape): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0) + +def VariantSubTypeStartShapeVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def VariantSubTypeAddType(builder, type): + builder.PrependInt8Slot(1, type, 0) + +def VariantSubTypeAddHasRank(builder, hasRank): + builder.PrependBoolSlot(2, hasRank, 0) + +def VariantSubTypeEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class VariantSubTypeT(object): + + # VariantSubTypeT + def __init__(self): + self.shape = None # type: List[int] + self.type = 0 # type: int + self.hasRank = False # type: bool + + @classmethod + def InitFromBuf(cls, buf, pos): + variantSubType = VariantSubType() + variantSubType.Init(buf, pos) + return cls.InitFromObj(variantSubType) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, variantSubType): + x = VariantSubTypeT() + x._UnPack(variantSubType) + return x + + # VariantSubTypeT + def _UnPack(self, variantSubType): + if variantSubType is None: + return + if not variantSubType.ShapeIsNone(): + if np is None: + self.shape = [] + for i in range(variantSubType.ShapeLength()): + self.shape.append(variantSubType.Shape(i)) + else: + self.shape = variantSubType.ShapeAsNumpy() + self.type = variantSubType.Type() + self.hasRank = variantSubType.HasRank() + + # VariantSubTypeT + def Pack(self, builder): + if self.shape is not None: + if np is not None and type(self.shape) is np.ndarray: + shape = builder.CreateNumpyVector(self.shape) + else: + VariantSubTypeStartShapeVector(builder, len(self.shape)) + for i in reversed(range(len(self.shape))): + builder.PrependInt32(self.shape[i]) + shape = builder.EndVector() + VariantSubTypeStart(builder) + if self.shape is not None: + VariantSubTypeAddShape(builder, shape) + VariantSubTypeAddType(builder, self.type) + VariantSubTypeAddHasRank(builder, self.hasRank) + variantSubType = VariantSubTypeEnd(builder) + return variantSubType + + +class Tensor(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Tensor() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsTensor(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def TensorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # Tensor + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Tensor + def Shape(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # Tensor + def ShapeAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Tensor + def ShapeLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Tensor + def ShapeIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # Tensor + def Type(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # Tensor + def Buffer(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) + return 0 + + # Tensor + def Name(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # Tensor + def Quantization(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + x = self._tab.Indirect(o + self._tab.Pos) + obj = QuantizationParameters() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # Tensor + def IsVariable(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # Tensor + def Sparsity(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + x = self._tab.Indirect(o + self._tab.Pos) + obj = SparsityParameters() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # Tensor + def ShapeSignature(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # Tensor + def ShapeSignatureAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Tensor + def ShapeSignatureLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Tensor + def ShapeSignatureIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + return o == 0 + + # Tensor + def HasRank(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # Tensor + def VariantTensors(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + obj = VariantSubType() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # Tensor + def VariantTensorsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Tensor + def VariantTensorsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22)) + return o == 0 + +def TensorStart(builder): + builder.StartObject(10) + +def TensorAddShape(builder, shape): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0) + +def TensorStartShapeVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def TensorAddType(builder, type): + builder.PrependInt8Slot(1, type, 0) + +def TensorAddBuffer(builder, buffer): + builder.PrependUint32Slot(2, buffer, 0) + +def TensorAddName(builder, name): + builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0) + +def TensorAddQuantization(builder, quantization): + builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(quantization), 0) + +def TensorAddIsVariable(builder, isVariable): + builder.PrependBoolSlot(5, isVariable, 0) + +def TensorAddSparsity(builder, sparsity): + builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(sparsity), 0) + +def TensorAddShapeSignature(builder, shapeSignature): + builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(shapeSignature), 0) + +def TensorStartShapeSignatureVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def TensorAddHasRank(builder, hasRank): + builder.PrependBoolSlot(8, hasRank, 0) + +def TensorAddVariantTensors(builder, variantTensors): + builder.PrependUOffsetTRelativeSlot(9, flatbuffers.number_types.UOffsetTFlags.py_type(variantTensors), 0) + +def TensorStartVariantTensorsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def TensorEnd(builder): + return builder.EndObject() + + +try: + from typing import List, Optional +except: + pass + +class TensorT(object): + + # TensorT + def __init__(self): + self.shape = None # type: List[int] + self.type = 0 # type: int + self.buffer = 0 # type: int + self.name = None # type: str + self.quantization = None # type: Optional[QuantizationParametersT] + self.isVariable = False # type: bool + self.sparsity = None # type: Optional[SparsityParametersT] + self.shapeSignature = None # type: List[int] + self.hasRank = False # type: bool + self.variantTensors = None # type: List[VariantSubTypeT] + + @classmethod + def InitFromBuf(cls, buf, pos): + tensor = Tensor() + tensor.Init(buf, pos) + return cls.InitFromObj(tensor) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, tensor): + x = TensorT() + x._UnPack(tensor) + return x + + # TensorT + def _UnPack(self, tensor): + if tensor is None: + return + if not tensor.ShapeIsNone(): + if np is None: + self.shape = [] + for i in range(tensor.ShapeLength()): + self.shape.append(tensor.Shape(i)) + else: + self.shape = tensor.ShapeAsNumpy() + self.type = tensor.Type() + self.buffer = tensor.Buffer() + self.name = tensor.Name() + if tensor.Quantization() is not None: + self.quantization = QuantizationParametersT.InitFromObj(tensor.Quantization()) + self.isVariable = tensor.IsVariable() + if tensor.Sparsity() is not None: + self.sparsity = SparsityParametersT.InitFromObj(tensor.Sparsity()) + if not tensor.ShapeSignatureIsNone(): + if np is None: + self.shapeSignature = [] + for i in range(tensor.ShapeSignatureLength()): + self.shapeSignature.append(tensor.ShapeSignature(i)) + else: + self.shapeSignature = tensor.ShapeSignatureAsNumpy() + self.hasRank = tensor.HasRank() + if not tensor.VariantTensorsIsNone(): + self.variantTensors = [] + for i in range(tensor.VariantTensorsLength()): + if tensor.VariantTensors(i) is None: + self.variantTensors.append(None) + else: + variantSubType_ = VariantSubTypeT.InitFromObj(tensor.VariantTensors(i)) + self.variantTensors.append(variantSubType_) + + # TensorT + def Pack(self, builder): + if self.shape is not None: + if np is not None and type(self.shape) is np.ndarray: + shape = builder.CreateNumpyVector(self.shape) + else: + TensorStartShapeVector(builder, len(self.shape)) + for i in reversed(range(len(self.shape))): + builder.PrependInt32(self.shape[i]) + shape = builder.EndVector() + if self.name is not None: + name = builder.CreateString(self.name) + if self.quantization is not None: + quantization = self.quantization.Pack(builder) + if self.sparsity is not None: + sparsity = self.sparsity.Pack(builder) + if self.shapeSignature is not None: + if np is not None and type(self.shapeSignature) is np.ndarray: + shapeSignature = builder.CreateNumpyVector(self.shapeSignature) + else: + TensorStartShapeSignatureVector(builder, len(self.shapeSignature)) + for i in reversed(range(len(self.shapeSignature))): + builder.PrependInt32(self.shapeSignature[i]) + shapeSignature = builder.EndVector() + if self.variantTensors is not None: + variantTensorslist = [] + for i in range(len(self.variantTensors)): + variantTensorslist.append(self.variantTensors[i].Pack(builder)) + TensorStartVariantTensorsVector(builder, len(self.variantTensors)) + for i in reversed(range(len(self.variantTensors))): + builder.PrependUOffsetTRelative(variantTensorslist[i]) + variantTensors = builder.EndVector() + TensorStart(builder) + if self.shape is not None: + TensorAddShape(builder, shape) + TensorAddType(builder, self.type) + TensorAddBuffer(builder, self.buffer) + if self.name is not None: + TensorAddName(builder, name) + if self.quantization is not None: + TensorAddQuantization(builder, quantization) + TensorAddIsVariable(builder, self.isVariable) + if self.sparsity is not None: + TensorAddSparsity(builder, sparsity) + if self.shapeSignature is not None: + TensorAddShapeSignature(builder, shapeSignature) + TensorAddHasRank(builder, self.hasRank) + if self.variantTensors is not None: + TensorAddVariantTensors(builder, variantTensors) + tensor = TensorEnd(builder) + return tensor + + +class StablehloGatherOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloGatherOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloGatherOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def StablehloGatherOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # StablehloGatherOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloGatherOptions + def OffsetDims(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloGatherOptions + def OffsetDimsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloGatherOptions + def OffsetDimsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloGatherOptions + def OffsetDimsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # StablehloGatherOptions + def CollapsedSliceDims(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloGatherOptions + def CollapsedSliceDimsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloGatherOptions + def CollapsedSliceDimsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloGatherOptions + def CollapsedSliceDimsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # StablehloGatherOptions + def StartIndexMap(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloGatherOptions + def StartIndexMapAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloGatherOptions + def StartIndexMapLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloGatherOptions + def StartIndexMapIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + # StablehloGatherOptions + def IndexVectorDim(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # StablehloGatherOptions + def SliceSizes(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloGatherOptions + def SliceSizesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloGatherOptions + def SliceSizesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloGatherOptions + def SliceSizesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + return o == 0 + + # StablehloGatherOptions + def IndicesAreSorted(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def StablehloGatherOptionsStart(builder): + builder.StartObject(6) + +def StablehloGatherOptionsAddOffsetDims(builder, offsetDims): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(offsetDims), 0) + +def StablehloGatherOptionsStartOffsetDimsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloGatherOptionsAddCollapsedSliceDims(builder, collapsedSliceDims): + builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(collapsedSliceDims), 0) + +def StablehloGatherOptionsStartCollapsedSliceDimsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloGatherOptionsAddStartIndexMap(builder, startIndexMap): + builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(startIndexMap), 0) + +def StablehloGatherOptionsStartStartIndexMapVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloGatherOptionsAddIndexVectorDim(builder, indexVectorDim): + builder.PrependInt64Slot(3, indexVectorDim, 0) + +def StablehloGatherOptionsAddSliceSizes(builder, sliceSizes): + builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(sliceSizes), 0) + +def StablehloGatherOptionsStartSliceSizesVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloGatherOptionsAddIndicesAreSorted(builder, indicesAreSorted): + builder.PrependBoolSlot(5, indicesAreSorted, 0) + +def StablehloGatherOptionsEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class StablehloGatherOptionsT(object): + + # StablehloGatherOptionsT + def __init__(self): + self.offsetDims = None # type: List[int] + self.collapsedSliceDims = None # type: List[int] + self.startIndexMap = None # type: List[int] + self.indexVectorDim = 0 # type: int + self.sliceSizes = None # type: List[int] + self.indicesAreSorted = False # type: bool + + @classmethod + def InitFromBuf(cls, buf, pos): + stablehloGatherOptions = StablehloGatherOptions() + stablehloGatherOptions.Init(buf, pos) + return cls.InitFromObj(stablehloGatherOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, stablehloGatherOptions): + x = StablehloGatherOptionsT() + x._UnPack(stablehloGatherOptions) + return x + + # StablehloGatherOptionsT + def _UnPack(self, stablehloGatherOptions): + if stablehloGatherOptions is None: + return + if not stablehloGatherOptions.OffsetDimsIsNone(): + if np is None: + self.offsetDims = [] + for i in range(stablehloGatherOptions.OffsetDimsLength()): + self.offsetDims.append(stablehloGatherOptions.OffsetDims(i)) + else: + self.offsetDims = stablehloGatherOptions.OffsetDimsAsNumpy() + if not stablehloGatherOptions.CollapsedSliceDimsIsNone(): + if np is None: + self.collapsedSliceDims = [] + for i in range(stablehloGatherOptions.CollapsedSliceDimsLength()): + self.collapsedSliceDims.append(stablehloGatherOptions.CollapsedSliceDims(i)) + else: + self.collapsedSliceDims = stablehloGatherOptions.CollapsedSliceDimsAsNumpy() + if not stablehloGatherOptions.StartIndexMapIsNone(): + if np is None: + self.startIndexMap = [] + for i in range(stablehloGatherOptions.StartIndexMapLength()): + self.startIndexMap.append(stablehloGatherOptions.StartIndexMap(i)) + else: + self.startIndexMap = stablehloGatherOptions.StartIndexMapAsNumpy() + self.indexVectorDim = stablehloGatherOptions.IndexVectorDim() + if not stablehloGatherOptions.SliceSizesIsNone(): + if np is None: + self.sliceSizes = [] + for i in range(stablehloGatherOptions.SliceSizesLength()): + self.sliceSizes.append(stablehloGatherOptions.SliceSizes(i)) + else: + self.sliceSizes = stablehloGatherOptions.SliceSizesAsNumpy() + self.indicesAreSorted = stablehloGatherOptions.IndicesAreSorted() + + # StablehloGatherOptionsT + def Pack(self, builder): + if self.offsetDims is not None: + if np is not None and type(self.offsetDims) is np.ndarray: + offsetDims = builder.CreateNumpyVector(self.offsetDims) + else: + StablehloGatherOptionsStartOffsetDimsVector(builder, len(self.offsetDims)) + for i in reversed(range(len(self.offsetDims))): + builder.PrependInt64(self.offsetDims[i]) + offsetDims = builder.EndVector() + if self.collapsedSliceDims is not None: + if np is not None and type(self.collapsedSliceDims) is np.ndarray: + collapsedSliceDims = builder.CreateNumpyVector(self.collapsedSliceDims) + else: + StablehloGatherOptionsStartCollapsedSliceDimsVector(builder, len(self.collapsedSliceDims)) + for i in reversed(range(len(self.collapsedSliceDims))): + builder.PrependInt64(self.collapsedSliceDims[i]) + collapsedSliceDims = builder.EndVector() + if self.startIndexMap is not None: + if np is not None and type(self.startIndexMap) is np.ndarray: + startIndexMap = builder.CreateNumpyVector(self.startIndexMap) + else: + StablehloGatherOptionsStartStartIndexMapVector(builder, len(self.startIndexMap)) + for i in reversed(range(len(self.startIndexMap))): + builder.PrependInt64(self.startIndexMap[i]) + startIndexMap = builder.EndVector() + if self.sliceSizes is not None: + if np is not None and type(self.sliceSizes) is np.ndarray: + sliceSizes = builder.CreateNumpyVector(self.sliceSizes) + else: + StablehloGatherOptionsStartSliceSizesVector(builder, len(self.sliceSizes)) + for i in reversed(range(len(self.sliceSizes))): + builder.PrependInt64(self.sliceSizes[i]) + sliceSizes = builder.EndVector() + StablehloGatherOptionsStart(builder) + if self.offsetDims is not None: + StablehloGatherOptionsAddOffsetDims(builder, offsetDims) + if self.collapsedSliceDims is not None: + StablehloGatherOptionsAddCollapsedSliceDims(builder, collapsedSliceDims) + if self.startIndexMap is not None: + StablehloGatherOptionsAddStartIndexMap(builder, startIndexMap) + StablehloGatherOptionsAddIndexVectorDim(builder, self.indexVectorDim) + if self.sliceSizes is not None: + StablehloGatherOptionsAddSliceSizes(builder, sliceSizes) + StablehloGatherOptionsAddIndicesAreSorted(builder, self.indicesAreSorted) + stablehloGatherOptions = StablehloGatherOptionsEnd(builder) + return stablehloGatherOptions + + +class StablehloTransposeOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloTransposeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloTransposeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def StablehloTransposeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # StablehloTransposeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloTransposeOptions + def Permutation(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloTransposeOptions + def PermutationAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloTransposeOptions + def PermutationLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloTransposeOptions + def PermutationIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + +def StablehloTransposeOptionsStart(builder): + builder.StartObject(1) + +def StablehloTransposeOptionsAddPermutation(builder, permutation): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(permutation), 0) + +def StablehloTransposeOptionsStartPermutationVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloTransposeOptionsEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class StablehloTransposeOptionsT(object): + + # StablehloTransposeOptionsT + def __init__(self): + self.permutation = None # type: List[int] + + @classmethod + def InitFromBuf(cls, buf, pos): + stablehloTransposeOptions = StablehloTransposeOptions() + stablehloTransposeOptions.Init(buf, pos) + return cls.InitFromObj(stablehloTransposeOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, stablehloTransposeOptions): + x = StablehloTransposeOptionsT() + x._UnPack(stablehloTransposeOptions) + return x + + # StablehloTransposeOptionsT + def _UnPack(self, stablehloTransposeOptions): + if stablehloTransposeOptions is None: + return + if not stablehloTransposeOptions.PermutationIsNone(): + if np is None: + self.permutation = [] + for i in range(stablehloTransposeOptions.PermutationLength()): + self.permutation.append(stablehloTransposeOptions.Permutation(i)) + else: + self.permutation = stablehloTransposeOptions.PermutationAsNumpy() + + # StablehloTransposeOptionsT + def Pack(self, builder): + if self.permutation is not None: + if np is not None and type(self.permutation) is np.ndarray: + permutation = builder.CreateNumpyVector(self.permutation) + else: + StablehloTransposeOptionsStartPermutationVector(builder, len(self.permutation)) + for i in reversed(range(len(self.permutation))): + builder.PrependInt64(self.permutation[i]) + permutation = builder.EndVector() + StablehloTransposeOptionsStart(builder) + if self.permutation is not None: + StablehloTransposeOptionsAddPermutation(builder, permutation) + stablehloTransposeOptions = StablehloTransposeOptionsEnd(builder) + return stablehloTransposeOptions + + +class StablehloDotGeneralOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloDotGeneralOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloDotGeneralOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def StablehloDotGeneralOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # StablehloDotGeneralOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloDotGeneralOptions + def LhsBatchingDimensions(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloDotGeneralOptions + def LhsBatchingDimensionsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloDotGeneralOptions + def LhsBatchingDimensionsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloDotGeneralOptions + def LhsBatchingDimensionsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # StablehloDotGeneralOptions + def RhsBatchingDimensions(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloDotGeneralOptions + def RhsBatchingDimensionsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloDotGeneralOptions + def RhsBatchingDimensionsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloDotGeneralOptions + def RhsBatchingDimensionsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # StablehloDotGeneralOptions + def LhsContractingDimensions(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloDotGeneralOptions + def LhsContractingDimensionsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloDotGeneralOptions + def LhsContractingDimensionsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloDotGeneralOptions + def LhsContractingDimensionsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + # StablehloDotGeneralOptions + def RhsContractingDimensions(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloDotGeneralOptions + def RhsContractingDimensionsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloDotGeneralOptions + def RhsContractingDimensionsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloDotGeneralOptions + def RhsContractingDimensionsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + return o == 0 + + # StablehloDotGeneralOptions + def PrecisionConfig(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Uint32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # StablehloDotGeneralOptions + def PrecisionConfigAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint32Flags, o) + return 0 + + # StablehloDotGeneralOptions + def PrecisionConfigLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloDotGeneralOptions + def PrecisionConfigIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + return o == 0 + +def StablehloDotGeneralOptionsStart(builder): + builder.StartObject(5) + +def StablehloDotGeneralOptionsAddLhsBatchingDimensions(builder, lhsBatchingDimensions): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(lhsBatchingDimensions), 0) + +def StablehloDotGeneralOptionsStartLhsBatchingDimensionsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloDotGeneralOptionsAddRhsBatchingDimensions(builder, rhsBatchingDimensions): + builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(rhsBatchingDimensions), 0) + +def StablehloDotGeneralOptionsStartRhsBatchingDimensionsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloDotGeneralOptionsAddLhsContractingDimensions(builder, lhsContractingDimensions): + builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(lhsContractingDimensions), 0) + +def StablehloDotGeneralOptionsStartLhsContractingDimensionsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloDotGeneralOptionsAddRhsContractingDimensions(builder, rhsContractingDimensions): + builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(rhsContractingDimensions), 0) + +def StablehloDotGeneralOptionsStartRhsContractingDimensionsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloDotGeneralOptionsAddPrecisionConfig(builder, precisionConfig): + builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(precisionConfig), 0) + +def StablehloDotGeneralOptionsStartPrecisionConfigVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def StablehloDotGeneralOptionsEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class StablehloDotGeneralOptionsT(object): + + # StablehloDotGeneralOptionsT + def __init__(self): + self.lhsBatchingDimensions = None # type: List[int] + self.rhsBatchingDimensions = None # type: List[int] + self.lhsContractingDimensions = None # type: List[int] + self.rhsContractingDimensions = None # type: List[int] + self.precisionConfig = None # type: List[int] + + @classmethod + def InitFromBuf(cls, buf, pos): + stablehloDotGeneralOptions = StablehloDotGeneralOptions() + stablehloDotGeneralOptions.Init(buf, pos) + return cls.InitFromObj(stablehloDotGeneralOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, stablehloDotGeneralOptions): + x = StablehloDotGeneralOptionsT() + x._UnPack(stablehloDotGeneralOptions) + return x + + # StablehloDotGeneralOptionsT + def _UnPack(self, stablehloDotGeneralOptions): + if stablehloDotGeneralOptions is None: + return + if not stablehloDotGeneralOptions.LhsBatchingDimensionsIsNone(): + if np is None: + self.lhsBatchingDimensions = [] + for i in range(stablehloDotGeneralOptions.LhsBatchingDimensionsLength()): + self.lhsBatchingDimensions.append(stablehloDotGeneralOptions.LhsBatchingDimensions(i)) + else: + self.lhsBatchingDimensions = stablehloDotGeneralOptions.LhsBatchingDimensionsAsNumpy() + if not stablehloDotGeneralOptions.RhsBatchingDimensionsIsNone(): + if np is None: + self.rhsBatchingDimensions = [] + for i in range(stablehloDotGeneralOptions.RhsBatchingDimensionsLength()): + self.rhsBatchingDimensions.append(stablehloDotGeneralOptions.RhsBatchingDimensions(i)) + else: + self.rhsBatchingDimensions = stablehloDotGeneralOptions.RhsBatchingDimensionsAsNumpy() + if not stablehloDotGeneralOptions.LhsContractingDimensionsIsNone(): + if np is None: + self.lhsContractingDimensions = [] + for i in range(stablehloDotGeneralOptions.LhsContractingDimensionsLength()): + self.lhsContractingDimensions.append(stablehloDotGeneralOptions.LhsContractingDimensions(i)) + else: + self.lhsContractingDimensions = stablehloDotGeneralOptions.LhsContractingDimensionsAsNumpy() + if not stablehloDotGeneralOptions.RhsContractingDimensionsIsNone(): + if np is None: + self.rhsContractingDimensions = [] + for i in range(stablehloDotGeneralOptions.RhsContractingDimensionsLength()): + self.rhsContractingDimensions.append(stablehloDotGeneralOptions.RhsContractingDimensions(i)) + else: + self.rhsContractingDimensions = stablehloDotGeneralOptions.RhsContractingDimensionsAsNumpy() + if not stablehloDotGeneralOptions.PrecisionConfigIsNone(): + if np is None: + self.precisionConfig = [] + for i in range(stablehloDotGeneralOptions.PrecisionConfigLength()): + self.precisionConfig.append(stablehloDotGeneralOptions.PrecisionConfig(i)) + else: + self.precisionConfig = stablehloDotGeneralOptions.PrecisionConfigAsNumpy() + + # StablehloDotGeneralOptionsT + def Pack(self, builder): + if self.lhsBatchingDimensions is not None: + if np is not None and type(self.lhsBatchingDimensions) is np.ndarray: + lhsBatchingDimensions = builder.CreateNumpyVector(self.lhsBatchingDimensions) + else: + StablehloDotGeneralOptionsStartLhsBatchingDimensionsVector(builder, len(self.lhsBatchingDimensions)) + for i in reversed(range(len(self.lhsBatchingDimensions))): + builder.PrependInt64(self.lhsBatchingDimensions[i]) + lhsBatchingDimensions = builder.EndVector() + if self.rhsBatchingDimensions is not None: + if np is not None and type(self.rhsBatchingDimensions) is np.ndarray: + rhsBatchingDimensions = builder.CreateNumpyVector(self.rhsBatchingDimensions) + else: + StablehloDotGeneralOptionsStartRhsBatchingDimensionsVector(builder, len(self.rhsBatchingDimensions)) + for i in reversed(range(len(self.rhsBatchingDimensions))): + builder.PrependInt64(self.rhsBatchingDimensions[i]) + rhsBatchingDimensions = builder.EndVector() + if self.lhsContractingDimensions is not None: + if np is not None and type(self.lhsContractingDimensions) is np.ndarray: + lhsContractingDimensions = builder.CreateNumpyVector(self.lhsContractingDimensions) + else: + StablehloDotGeneralOptionsStartLhsContractingDimensionsVector(builder, len(self.lhsContractingDimensions)) + for i in reversed(range(len(self.lhsContractingDimensions))): + builder.PrependInt64(self.lhsContractingDimensions[i]) + lhsContractingDimensions = builder.EndVector() + if self.rhsContractingDimensions is not None: + if np is not None and type(self.rhsContractingDimensions) is np.ndarray: + rhsContractingDimensions = builder.CreateNumpyVector(self.rhsContractingDimensions) + else: + StablehloDotGeneralOptionsStartRhsContractingDimensionsVector(builder, len(self.rhsContractingDimensions)) + for i in reversed(range(len(self.rhsContractingDimensions))): + builder.PrependInt64(self.rhsContractingDimensions[i]) + rhsContractingDimensions = builder.EndVector() + if self.precisionConfig is not None: + if np is not None and type(self.precisionConfig) is np.ndarray: + precisionConfig = builder.CreateNumpyVector(self.precisionConfig) + else: + StablehloDotGeneralOptionsStartPrecisionConfigVector(builder, len(self.precisionConfig)) + for i in reversed(range(len(self.precisionConfig))): + builder.PrependUint32(self.precisionConfig[i]) + precisionConfig = builder.EndVector() + StablehloDotGeneralOptionsStart(builder) + if self.lhsBatchingDimensions is not None: + StablehloDotGeneralOptionsAddLhsBatchingDimensions(builder, lhsBatchingDimensions) + if self.rhsBatchingDimensions is not None: + StablehloDotGeneralOptionsAddRhsBatchingDimensions(builder, rhsBatchingDimensions) + if self.lhsContractingDimensions is not None: + StablehloDotGeneralOptionsAddLhsContractingDimensions(builder, lhsContractingDimensions) + if self.rhsContractingDimensions is not None: + StablehloDotGeneralOptionsAddRhsContractingDimensions(builder, rhsContractingDimensions) + if self.precisionConfig is not None: + StablehloDotGeneralOptionsAddPrecisionConfig(builder, precisionConfig) + stablehloDotGeneralOptions = StablehloDotGeneralOptionsEnd(builder) + return stablehloDotGeneralOptions + + +class StablehloReduceWindowOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloReduceWindowOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloReduceWindowOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def StablehloReduceWindowOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # StablehloReduceWindowOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloReduceWindowOptions + def WindowDimensions(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloReduceWindowOptions + def WindowDimensionsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloReduceWindowOptions + def WindowDimensionsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloReduceWindowOptions + def WindowDimensionsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # StablehloReduceWindowOptions + def WindowStrides(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloReduceWindowOptions + def WindowStridesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloReduceWindowOptions + def WindowStridesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloReduceWindowOptions + def WindowStridesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # StablehloReduceWindowOptions + def BaseDilations(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloReduceWindowOptions + def BaseDilationsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloReduceWindowOptions + def BaseDilationsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloReduceWindowOptions + def BaseDilationsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + # StablehloReduceWindowOptions + def WindowDilations(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloReduceWindowOptions + def WindowDilationsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloReduceWindowOptions + def WindowDilationsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloReduceWindowOptions + def WindowDilationsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + return o == 0 + + # StablehloReduceWindowOptions + def Padding(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloReduceWindowOptions + def PaddingAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloReduceWindowOptions + def PaddingLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloReduceWindowOptions + def PaddingIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + return o == 0 + + # StablehloReduceWindowOptions + def BodySubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def StablehloReduceWindowOptionsStart(builder): + builder.StartObject(6) + +def StablehloReduceWindowOptionsAddWindowDimensions(builder, windowDimensions): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(windowDimensions), 0) + +def StablehloReduceWindowOptionsStartWindowDimensionsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloReduceWindowOptionsAddWindowStrides(builder, windowStrides): + builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(windowStrides), 0) + +def StablehloReduceWindowOptionsStartWindowStridesVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloReduceWindowOptionsAddBaseDilations(builder, baseDilations): + builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(baseDilations), 0) + +def StablehloReduceWindowOptionsStartBaseDilationsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloReduceWindowOptionsAddWindowDilations(builder, windowDilations): + builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(windowDilations), 0) + +def StablehloReduceWindowOptionsStartWindowDilationsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloReduceWindowOptionsAddPadding(builder, padding): + builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(padding), 0) + +def StablehloReduceWindowOptionsStartPaddingVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloReduceWindowOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex): + builder.PrependInt32Slot(5, bodySubgraphIndex, 0) + +def StablehloReduceWindowOptionsEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class StablehloReduceWindowOptionsT(object): + + # StablehloReduceWindowOptionsT + def __init__(self): + self.windowDimensions = None # type: List[int] + self.windowStrides = None # type: List[int] + self.baseDilations = None # type: List[int] + self.windowDilations = None # type: List[int] + self.padding = None # type: List[int] + self.bodySubgraphIndex = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + stablehloReduceWindowOptions = StablehloReduceWindowOptions() + stablehloReduceWindowOptions.Init(buf, pos) + return cls.InitFromObj(stablehloReduceWindowOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, stablehloReduceWindowOptions): + x = StablehloReduceWindowOptionsT() + x._UnPack(stablehloReduceWindowOptions) + return x + + # StablehloReduceWindowOptionsT + def _UnPack(self, stablehloReduceWindowOptions): + if stablehloReduceWindowOptions is None: + return + if not stablehloReduceWindowOptions.WindowDimensionsIsNone(): + if np is None: + self.windowDimensions = [] + for i in range(stablehloReduceWindowOptions.WindowDimensionsLength()): + self.windowDimensions.append(stablehloReduceWindowOptions.WindowDimensions(i)) + else: + self.windowDimensions = stablehloReduceWindowOptions.WindowDimensionsAsNumpy() + if not stablehloReduceWindowOptions.WindowStridesIsNone(): + if np is None: + self.windowStrides = [] + for i in range(stablehloReduceWindowOptions.WindowStridesLength()): + self.windowStrides.append(stablehloReduceWindowOptions.WindowStrides(i)) + else: + self.windowStrides = stablehloReduceWindowOptions.WindowStridesAsNumpy() + if not stablehloReduceWindowOptions.BaseDilationsIsNone(): + if np is None: + self.baseDilations = [] + for i in range(stablehloReduceWindowOptions.BaseDilationsLength()): + self.baseDilations.append(stablehloReduceWindowOptions.BaseDilations(i)) + else: + self.baseDilations = stablehloReduceWindowOptions.BaseDilationsAsNumpy() + if not stablehloReduceWindowOptions.WindowDilationsIsNone(): + if np is None: + self.windowDilations = [] + for i in range(stablehloReduceWindowOptions.WindowDilationsLength()): + self.windowDilations.append(stablehloReduceWindowOptions.WindowDilations(i)) + else: + self.windowDilations = stablehloReduceWindowOptions.WindowDilationsAsNumpy() + if not stablehloReduceWindowOptions.PaddingIsNone(): + if np is None: + self.padding = [] + for i in range(stablehloReduceWindowOptions.PaddingLength()): + self.padding.append(stablehloReduceWindowOptions.Padding(i)) + else: + self.padding = stablehloReduceWindowOptions.PaddingAsNumpy() + self.bodySubgraphIndex = stablehloReduceWindowOptions.BodySubgraphIndex() + + # StablehloReduceWindowOptionsT + def Pack(self, builder): + if self.windowDimensions is not None: + if np is not None and type(self.windowDimensions) is np.ndarray: + windowDimensions = builder.CreateNumpyVector(self.windowDimensions) + else: + StablehloReduceWindowOptionsStartWindowDimensionsVector(builder, len(self.windowDimensions)) + for i in reversed(range(len(self.windowDimensions))): + builder.PrependInt64(self.windowDimensions[i]) + windowDimensions = builder.EndVector() + if self.windowStrides is not None: + if np is not None and type(self.windowStrides) is np.ndarray: + windowStrides = builder.CreateNumpyVector(self.windowStrides) + else: + StablehloReduceWindowOptionsStartWindowStridesVector(builder, len(self.windowStrides)) + for i in reversed(range(len(self.windowStrides))): + builder.PrependInt64(self.windowStrides[i]) + windowStrides = builder.EndVector() + if self.baseDilations is not None: + if np is not None and type(self.baseDilations) is np.ndarray: + baseDilations = builder.CreateNumpyVector(self.baseDilations) + else: + StablehloReduceWindowOptionsStartBaseDilationsVector(builder, len(self.baseDilations)) + for i in reversed(range(len(self.baseDilations))): + builder.PrependInt64(self.baseDilations[i]) + baseDilations = builder.EndVector() + if self.windowDilations is not None: + if np is not None and type(self.windowDilations) is np.ndarray: + windowDilations = builder.CreateNumpyVector(self.windowDilations) + else: + StablehloReduceWindowOptionsStartWindowDilationsVector(builder, len(self.windowDilations)) + for i in reversed(range(len(self.windowDilations))): + builder.PrependInt64(self.windowDilations[i]) + windowDilations = builder.EndVector() + if self.padding is not None: + if np is not None and type(self.padding) is np.ndarray: + padding = builder.CreateNumpyVector(self.padding) + else: + StablehloReduceWindowOptionsStartPaddingVector(builder, len(self.padding)) + for i in reversed(range(len(self.padding))): + builder.PrependInt64(self.padding[i]) + padding = builder.EndVector() + StablehloReduceWindowOptionsStart(builder) + if self.windowDimensions is not None: + StablehloReduceWindowOptionsAddWindowDimensions(builder, windowDimensions) + if self.windowStrides is not None: + StablehloReduceWindowOptionsAddWindowStrides(builder, windowStrides) + if self.baseDilations is not None: + StablehloReduceWindowOptionsAddBaseDilations(builder, baseDilations) + if self.windowDilations is not None: + StablehloReduceWindowOptionsAddWindowDilations(builder, windowDilations) + if self.padding is not None: + StablehloReduceWindowOptionsAddPadding(builder, padding) + StablehloReduceWindowOptionsAddBodySubgraphIndex(builder, self.bodySubgraphIndex) + stablehloReduceWindowOptions = StablehloReduceWindowOptionsEnd(builder) + return stablehloReduceWindowOptions + + +class StablehloWhileOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloWhileOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloWhileOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def StablehloWhileOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # StablehloWhileOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloWhileOptions + def CondSubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # StablehloWhileOptions + def BodySubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def StablehloWhileOptionsStart(builder): + builder.StartObject(2) + +def StablehloWhileOptionsAddCondSubgraphIndex(builder, condSubgraphIndex): + builder.PrependInt32Slot(0, condSubgraphIndex, 0) + +def StablehloWhileOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex): + builder.PrependInt32Slot(1, bodySubgraphIndex, 0) + +def StablehloWhileOptionsEnd(builder): + return builder.EndObject() + + + +class StablehloWhileOptionsT(object): + + # StablehloWhileOptionsT + def __init__(self): + self.condSubgraphIndex = 0 # type: int + self.bodySubgraphIndex = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + stablehloWhileOptions = StablehloWhileOptions() + stablehloWhileOptions.Init(buf, pos) + return cls.InitFromObj(stablehloWhileOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, stablehloWhileOptions): + x = StablehloWhileOptionsT() + x._UnPack(stablehloWhileOptions) + return x + + # StablehloWhileOptionsT + def _UnPack(self, stablehloWhileOptions): + if stablehloWhileOptions is None: + return + self.condSubgraphIndex = stablehloWhileOptions.CondSubgraphIndex() + self.bodySubgraphIndex = stablehloWhileOptions.BodySubgraphIndex() + + # StablehloWhileOptionsT + def Pack(self, builder): + StablehloWhileOptionsStart(builder) + StablehloWhileOptionsAddCondSubgraphIndex(builder, self.condSubgraphIndex) + StablehloWhileOptionsAddBodySubgraphIndex(builder, self.bodySubgraphIndex) + stablehloWhileOptions = StablehloWhileOptionsEnd(builder) + return stablehloWhileOptions + + +class StablehloSortOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloSortOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloSortOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def StablehloSortOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # StablehloSortOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloSortOptions + def Dimension(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # StablehloSortOptions + def IsStable(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # StablehloSortOptions + def ComparatorSubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def StablehloSortOptionsStart(builder): + builder.StartObject(3) + +def StablehloSortOptionsAddDimension(builder, dimension): + builder.PrependInt64Slot(0, dimension, 0) + +def StablehloSortOptionsAddIsStable(builder, isStable): + builder.PrependBoolSlot(1, isStable, 0) + +def StablehloSortOptionsAddComparatorSubgraphIndex(builder, comparatorSubgraphIndex): + builder.PrependInt32Slot(2, comparatorSubgraphIndex, 0) + +def StablehloSortOptionsEnd(builder): + return builder.EndObject() + + + +class StablehloSortOptionsT(object): + + # StablehloSortOptionsT + def __init__(self): + self.dimension = 0 # type: int + self.isStable = False # type: bool + self.comparatorSubgraphIndex = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + stablehloSortOptions = StablehloSortOptions() + stablehloSortOptions.Init(buf, pos) + return cls.InitFromObj(stablehloSortOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, stablehloSortOptions): + x = StablehloSortOptionsT() + x._UnPack(stablehloSortOptions) + return x + + # StablehloSortOptionsT + def _UnPack(self, stablehloSortOptions): + if stablehloSortOptions is None: + return + self.dimension = stablehloSortOptions.Dimension() + self.isStable = stablehloSortOptions.IsStable() + self.comparatorSubgraphIndex = stablehloSortOptions.ComparatorSubgraphIndex() + + # StablehloSortOptionsT + def Pack(self, builder): + StablehloSortOptionsStart(builder) + StablehloSortOptionsAddDimension(builder, self.dimension) + StablehloSortOptionsAddIsStable(builder, self.isStable) + StablehloSortOptionsAddComparatorSubgraphIndex(builder, self.comparatorSubgraphIndex) + stablehloSortOptions = StablehloSortOptionsEnd(builder) + return stablehloSortOptions + + +class StablehloConcatenateOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloConcatenateOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloConcatenateOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def StablehloConcatenateOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # StablehloConcatenateOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloConcatenateOptions + def Dimension(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + +def StablehloConcatenateOptionsStart(builder): + builder.StartObject(1) + +def StablehloConcatenateOptionsAddDimension(builder, dimension): + builder.PrependInt64Slot(0, dimension, 0) + +def StablehloConcatenateOptionsEnd(builder): + return builder.EndObject() + + + +class StablehloConcatenateOptionsT(object): + + # StablehloConcatenateOptionsT + def __init__(self): + self.dimension = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + stablehloConcatenateOptions = StablehloConcatenateOptions() + stablehloConcatenateOptions.Init(buf, pos) + return cls.InitFromObj(stablehloConcatenateOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, stablehloConcatenateOptions): + x = StablehloConcatenateOptionsT() + x._UnPack(stablehloConcatenateOptions) + return x + + # StablehloConcatenateOptionsT + def _UnPack(self, stablehloConcatenateOptions): + if stablehloConcatenateOptions is None: + return + self.dimension = stablehloConcatenateOptions.Dimension() + + # StablehloConcatenateOptionsT + def Pack(self, builder): + StablehloConcatenateOptionsStart(builder) + StablehloConcatenateOptionsAddDimension(builder, self.dimension) + stablehloConcatenateOptions = StablehloConcatenateOptionsEnd(builder) + return stablehloConcatenateOptions + + +class StablehloBroadcastInDimOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloBroadcastInDimOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloBroadcastInDimOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def StablehloBroadcastInDimOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # StablehloBroadcastInDimOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloBroadcastInDimOptions + def BroadcastDimensions(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloBroadcastInDimOptions + def BroadcastDimensionsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloBroadcastInDimOptions + def BroadcastDimensionsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloBroadcastInDimOptions + def BroadcastDimensionsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + +def StablehloBroadcastInDimOptionsStart(builder): + builder.StartObject(1) + +def StablehloBroadcastInDimOptionsAddBroadcastDimensions(builder, broadcastDimensions): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(broadcastDimensions), 0) + +def StablehloBroadcastInDimOptionsStartBroadcastDimensionsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloBroadcastInDimOptionsEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class StablehloBroadcastInDimOptionsT(object): + + # StablehloBroadcastInDimOptionsT + def __init__(self): + self.broadcastDimensions = None # type: List[int] + + @classmethod + def InitFromBuf(cls, buf, pos): + stablehloBroadcastInDimOptions = StablehloBroadcastInDimOptions() + stablehloBroadcastInDimOptions.Init(buf, pos) + return cls.InitFromObj(stablehloBroadcastInDimOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, stablehloBroadcastInDimOptions): + x = StablehloBroadcastInDimOptionsT() + x._UnPack(stablehloBroadcastInDimOptions) + return x + + # StablehloBroadcastInDimOptionsT + def _UnPack(self, stablehloBroadcastInDimOptions): + if stablehloBroadcastInDimOptions is None: + return + if not stablehloBroadcastInDimOptions.BroadcastDimensionsIsNone(): + if np is None: + self.broadcastDimensions = [] + for i in range(stablehloBroadcastInDimOptions.BroadcastDimensionsLength()): + self.broadcastDimensions.append(stablehloBroadcastInDimOptions.BroadcastDimensions(i)) + else: + self.broadcastDimensions = stablehloBroadcastInDimOptions.BroadcastDimensionsAsNumpy() + + # StablehloBroadcastInDimOptionsT + def Pack(self, builder): + if self.broadcastDimensions is not None: + if np is not None and type(self.broadcastDimensions) is np.ndarray: + broadcastDimensions = builder.CreateNumpyVector(self.broadcastDimensions) + else: + StablehloBroadcastInDimOptionsStartBroadcastDimensionsVector(builder, len(self.broadcastDimensions)) + for i in reversed(range(len(self.broadcastDimensions))): + builder.PrependInt64(self.broadcastDimensions[i]) + broadcastDimensions = builder.EndVector() + StablehloBroadcastInDimOptionsStart(builder) + if self.broadcastDimensions is not None: + StablehloBroadcastInDimOptionsAddBroadcastDimensions(builder, broadcastDimensions) + stablehloBroadcastInDimOptions = StablehloBroadcastInDimOptionsEnd(builder) + return stablehloBroadcastInDimOptions + + +class StablehloCompareOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloCompareOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloCompareOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def StablehloCompareOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # StablehloCompareOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloCompareOptions + def ComparisonDirection(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) + return 0 + + # StablehloCompareOptions + def CompareType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) + return 0 + +def StablehloCompareOptionsStart(builder): + builder.StartObject(2) + +def StablehloCompareOptionsAddComparisonDirection(builder, comparisonDirection): + builder.PrependUint32Slot(0, comparisonDirection, 0) + +def StablehloCompareOptionsAddCompareType(builder, compareType): + builder.PrependUint32Slot(1, compareType, 0) + +def StablehloCompareOptionsEnd(builder): + return builder.EndObject() + + + +class StablehloCompareOptionsT(object): + + # StablehloCompareOptionsT + def __init__(self): + self.comparisonDirection = 0 # type: int + self.compareType = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + stablehloCompareOptions = StablehloCompareOptions() + stablehloCompareOptions.Init(buf, pos) + return cls.InitFromObj(stablehloCompareOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, stablehloCompareOptions): + x = StablehloCompareOptionsT() + x._UnPack(stablehloCompareOptions) + return x + + # StablehloCompareOptionsT + def _UnPack(self, stablehloCompareOptions): + if stablehloCompareOptions is None: + return + self.comparisonDirection = stablehloCompareOptions.ComparisonDirection() + self.compareType = stablehloCompareOptions.CompareType() + + # StablehloCompareOptionsT + def Pack(self, builder): + StablehloCompareOptionsStart(builder) + StablehloCompareOptionsAddComparisonDirection(builder, self.comparisonDirection) + StablehloCompareOptionsAddCompareType(builder, self.compareType) + stablehloCompareOptions = StablehloCompareOptionsEnd(builder) + return stablehloCompareOptions + + +class StablehloDynamicSliceOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloDynamicSliceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloDynamicSliceOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def StablehloDynamicSliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # StablehloDynamicSliceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloDynamicSliceOptions + def SliceSizes(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloDynamicSliceOptions + def SliceSizesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloDynamicSliceOptions + def SliceSizesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloDynamicSliceOptions + def SliceSizesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + +def StablehloDynamicSliceOptionsStart(builder): + builder.StartObject(1) + +def StablehloDynamicSliceOptionsAddSliceSizes(builder, sliceSizes): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(sliceSizes), 0) + +def StablehloDynamicSliceOptionsStartSliceSizesVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloDynamicSliceOptionsEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class StablehloDynamicSliceOptionsT(object): + + # StablehloDynamicSliceOptionsT + def __init__(self): + self.sliceSizes = None # type: List[int] + + @classmethod + def InitFromBuf(cls, buf, pos): + stablehloDynamicSliceOptions = StablehloDynamicSliceOptions() + stablehloDynamicSliceOptions.Init(buf, pos) + return cls.InitFromObj(stablehloDynamicSliceOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, stablehloDynamicSliceOptions): + x = StablehloDynamicSliceOptionsT() + x._UnPack(stablehloDynamicSliceOptions) + return x + + # StablehloDynamicSliceOptionsT + def _UnPack(self, stablehloDynamicSliceOptions): + if stablehloDynamicSliceOptions is None: + return + if not stablehloDynamicSliceOptions.SliceSizesIsNone(): + if np is None: + self.sliceSizes = [] + for i in range(stablehloDynamicSliceOptions.SliceSizesLength()): + self.sliceSizes.append(stablehloDynamicSliceOptions.SliceSizes(i)) + else: + self.sliceSizes = stablehloDynamicSliceOptions.SliceSizesAsNumpy() + + # StablehloDynamicSliceOptionsT + def Pack(self, builder): + if self.sliceSizes is not None: + if np is not None and type(self.sliceSizes) is np.ndarray: + sliceSizes = builder.CreateNumpyVector(self.sliceSizes) + else: + StablehloDynamicSliceOptionsStartSliceSizesVector(builder, len(self.sliceSizes)) + for i in reversed(range(len(self.sliceSizes))): + builder.PrependInt64(self.sliceSizes[i]) + sliceSizes = builder.EndVector() + StablehloDynamicSliceOptionsStart(builder) + if self.sliceSizes is not None: + StablehloDynamicSliceOptionsAddSliceSizes(builder, sliceSizes) + stablehloDynamicSliceOptions = StablehloDynamicSliceOptionsEnd(builder) + return stablehloDynamicSliceOptions + + +class StablehloPadOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloPadOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloPadOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def StablehloPadOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # StablehloPadOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloPadOptions + def EdgePaddingLow(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloPadOptions + def EdgePaddingLowAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloPadOptions + def EdgePaddingLowLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloPadOptions + def EdgePaddingLowIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # StablehloPadOptions + def EdgePaddingHigh(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloPadOptions + def EdgePaddingHighAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloPadOptions + def EdgePaddingHighLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloPadOptions + def EdgePaddingHighIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # StablehloPadOptions + def InteriorPadding(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloPadOptions + def InteriorPaddingAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloPadOptions + def InteriorPaddingLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloPadOptions + def InteriorPaddingIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + +def StablehloPadOptionsStart(builder): + builder.StartObject(3) + +def StablehloPadOptionsAddEdgePaddingLow(builder, edgePaddingLow): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(edgePaddingLow), 0) + +def StablehloPadOptionsStartEdgePaddingLowVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloPadOptionsAddEdgePaddingHigh(builder, edgePaddingHigh): + builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(edgePaddingHigh), 0) + +def StablehloPadOptionsStartEdgePaddingHighVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloPadOptionsAddInteriorPadding(builder, interiorPadding): + builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(interiorPadding), 0) + +def StablehloPadOptionsStartInteriorPaddingVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloPadOptionsEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class StablehloPadOptionsT(object): + + # StablehloPadOptionsT + def __init__(self): + self.edgePaddingLow = None # type: List[int] + self.edgePaddingHigh = None # type: List[int] + self.interiorPadding = None # type: List[int] + + @classmethod + def InitFromBuf(cls, buf, pos): + stablehloPadOptions = StablehloPadOptions() + stablehloPadOptions.Init(buf, pos) + return cls.InitFromObj(stablehloPadOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, stablehloPadOptions): + x = StablehloPadOptionsT() + x._UnPack(stablehloPadOptions) + return x + + # StablehloPadOptionsT + def _UnPack(self, stablehloPadOptions): + if stablehloPadOptions is None: + return + if not stablehloPadOptions.EdgePaddingLowIsNone(): + if np is None: + self.edgePaddingLow = [] + for i in range(stablehloPadOptions.EdgePaddingLowLength()): + self.edgePaddingLow.append(stablehloPadOptions.EdgePaddingLow(i)) + else: + self.edgePaddingLow = stablehloPadOptions.EdgePaddingLowAsNumpy() + if not stablehloPadOptions.EdgePaddingHighIsNone(): + if np is None: + self.edgePaddingHigh = [] + for i in range(stablehloPadOptions.EdgePaddingHighLength()): + self.edgePaddingHigh.append(stablehloPadOptions.EdgePaddingHigh(i)) + else: + self.edgePaddingHigh = stablehloPadOptions.EdgePaddingHighAsNumpy() + if not stablehloPadOptions.InteriorPaddingIsNone(): + if np is None: + self.interiorPadding = [] + for i in range(stablehloPadOptions.InteriorPaddingLength()): + self.interiorPadding.append(stablehloPadOptions.InteriorPadding(i)) + else: + self.interiorPadding = stablehloPadOptions.InteriorPaddingAsNumpy() + + # StablehloPadOptionsT + def Pack(self, builder): + if self.edgePaddingLow is not None: + if np is not None and type(self.edgePaddingLow) is np.ndarray: + edgePaddingLow = builder.CreateNumpyVector(self.edgePaddingLow) + else: + StablehloPadOptionsStartEdgePaddingLowVector(builder, len(self.edgePaddingLow)) + for i in reversed(range(len(self.edgePaddingLow))): + builder.PrependInt64(self.edgePaddingLow[i]) + edgePaddingLow = builder.EndVector() + if self.edgePaddingHigh is not None: + if np is not None and type(self.edgePaddingHigh) is np.ndarray: + edgePaddingHigh = builder.CreateNumpyVector(self.edgePaddingHigh) + else: + StablehloPadOptionsStartEdgePaddingHighVector(builder, len(self.edgePaddingHigh)) + for i in reversed(range(len(self.edgePaddingHigh))): + builder.PrependInt64(self.edgePaddingHigh[i]) + edgePaddingHigh = builder.EndVector() + if self.interiorPadding is not None: + if np is not None and type(self.interiorPadding) is np.ndarray: + interiorPadding = builder.CreateNumpyVector(self.interiorPadding) + else: + StablehloPadOptionsStartInteriorPaddingVector(builder, len(self.interiorPadding)) + for i in reversed(range(len(self.interiorPadding))): + builder.PrependInt64(self.interiorPadding[i]) + interiorPadding = builder.EndVector() + StablehloPadOptionsStart(builder) + if self.edgePaddingLow is not None: + StablehloPadOptionsAddEdgePaddingLow(builder, edgePaddingLow) + if self.edgePaddingHigh is not None: + StablehloPadOptionsAddEdgePaddingHigh(builder, edgePaddingHigh) + if self.interiorPadding is not None: + StablehloPadOptionsAddInteriorPadding(builder, interiorPadding) + stablehloPadOptions = StablehloPadOptionsEnd(builder) + return stablehloPadOptions + + +class StablehloIotaOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloIotaOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloIotaOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def StablehloIotaOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # StablehloIotaOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloIotaOptions + def IotaDimension(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + +def StablehloIotaOptionsStart(builder): + builder.StartObject(1) + +def StablehloIotaOptionsAddIotaDimension(builder, iotaDimension): + builder.PrependInt64Slot(0, iotaDimension, 0) + +def StablehloIotaOptionsEnd(builder): + return builder.EndObject() + + + +class StablehloIotaOptionsT(object): + + # StablehloIotaOptionsT + def __init__(self): + self.iotaDimension = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + stablehloIotaOptions = StablehloIotaOptions() + stablehloIotaOptions.Init(buf, pos) + return cls.InitFromObj(stablehloIotaOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, stablehloIotaOptions): + x = StablehloIotaOptionsT() + x._UnPack(stablehloIotaOptions) + return x + + # StablehloIotaOptionsT + def _UnPack(self, stablehloIotaOptions): + if stablehloIotaOptions is None: + return + self.iotaDimension = stablehloIotaOptions.IotaDimension() + + # StablehloIotaOptionsT + def Pack(self, builder): + StablehloIotaOptionsStart(builder) + StablehloIotaOptionsAddIotaDimension(builder, self.iotaDimension) + stablehloIotaOptions = StablehloIotaOptionsEnd(builder) + return stablehloIotaOptions + + +class StablehloCustomCallOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloCustomCallOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloCustomCallOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def StablehloCustomCallOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # StablehloCustomCallOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloCustomCallOptions + def CallTargetName(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # StablehloCustomCallOptions + def HasSideEffect(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # StablehloCustomCallOptions + def BackendConfig(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # StablehloCustomCallOptions + def ApiVersion(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # StablehloCustomCallOptions + def CalledComputations(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # StablehloCustomCallOptions + def CalledComputationsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # StablehloCustomCallOptions + def CalledComputationsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloCustomCallOptions + def CalledComputationsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + return o == 0 + + # StablehloCustomCallOptions + def CustomAttributes(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1)) + return 0 + + # StablehloCustomCallOptions + def CustomAttributesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o) + return 0 + + # StablehloCustomCallOptions + def CustomAttributesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloCustomCallOptions + def CustomAttributesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + return o == 0 + +def StablehloCustomCallOptionsStart(builder): + builder.StartObject(6) + +def StablehloCustomCallOptionsAddCallTargetName(builder, callTargetName): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(callTargetName), 0) + +def StablehloCustomCallOptionsAddHasSideEffect(builder, hasSideEffect): + builder.PrependBoolSlot(1, hasSideEffect, 0) + +def StablehloCustomCallOptionsAddBackendConfig(builder, backendConfig): + builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(backendConfig), 0) + +def StablehloCustomCallOptionsAddApiVersion(builder, apiVersion): + builder.PrependInt32Slot(3, apiVersion, 0) + +def StablehloCustomCallOptionsAddCalledComputations(builder, calledComputations): + builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(calledComputations), 0) + +def StablehloCustomCallOptionsStartCalledComputationsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def StablehloCustomCallOptionsAddCustomAttributes(builder, customAttributes): + builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(customAttributes), 0) + +def StablehloCustomCallOptionsStartCustomAttributesVector(builder, numElems): + return builder.StartVector(1, numElems, 1) + +def StablehloCustomCallOptionsEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class StablehloCustomCallOptionsT(object): + + # StablehloCustomCallOptionsT + def __init__(self): + self.callTargetName = None # type: str + self.hasSideEffect = False # type: bool + self.backendConfig = None # type: str + self.apiVersion = 0 # type: int + self.calledComputations = None # type: List[int] + self.customAttributes = None # type: List[int] + + @classmethod + def InitFromBuf(cls, buf, pos): + stablehloCustomCallOptions = StablehloCustomCallOptions() + stablehloCustomCallOptions.Init(buf, pos) + return cls.InitFromObj(stablehloCustomCallOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, stablehloCustomCallOptions): + x = StablehloCustomCallOptionsT() + x._UnPack(stablehloCustomCallOptions) + return x + + # StablehloCustomCallOptionsT + def _UnPack(self, stablehloCustomCallOptions): + if stablehloCustomCallOptions is None: + return + self.callTargetName = stablehloCustomCallOptions.CallTargetName() + self.hasSideEffect = stablehloCustomCallOptions.HasSideEffect() + self.backendConfig = stablehloCustomCallOptions.BackendConfig() + self.apiVersion = stablehloCustomCallOptions.ApiVersion() + if not stablehloCustomCallOptions.CalledComputationsIsNone(): + if np is None: + self.calledComputations = [] + for i in range(stablehloCustomCallOptions.CalledComputationsLength()): + self.calledComputations.append(stablehloCustomCallOptions.CalledComputations(i)) + else: + self.calledComputations = stablehloCustomCallOptions.CalledComputationsAsNumpy() + if not stablehloCustomCallOptions.CustomAttributesIsNone(): + if np is None: + self.customAttributes = [] + for i in range(stablehloCustomCallOptions.CustomAttributesLength()): + self.customAttributes.append(stablehloCustomCallOptions.CustomAttributes(i)) + else: + self.customAttributes = stablehloCustomCallOptions.CustomAttributesAsNumpy() + + # StablehloCustomCallOptionsT + def Pack(self, builder): + if self.callTargetName is not None: + callTargetName = builder.CreateString(self.callTargetName) + if self.backendConfig is not None: + backendConfig = builder.CreateString(self.backendConfig) + if self.calledComputations is not None: + if np is not None and type(self.calledComputations) is np.ndarray: + calledComputations = builder.CreateNumpyVector(self.calledComputations) + else: + StablehloCustomCallOptionsStartCalledComputationsVector(builder, len(self.calledComputations)) + for i in reversed(range(len(self.calledComputations))): + builder.PrependInt32(self.calledComputations[i]) + calledComputations = builder.EndVector() + if self.customAttributes is not None: + if np is not None and type(self.customAttributes) is np.ndarray: + customAttributes = builder.CreateNumpyVector(self.customAttributes) + else: + StablehloCustomCallOptionsStartCustomAttributesVector(builder, len(self.customAttributes)) + for i in reversed(range(len(self.customAttributes))): + builder.PrependUint8(self.customAttributes[i]) + customAttributes = builder.EndVector() + StablehloCustomCallOptionsStart(builder) + if self.callTargetName is not None: + StablehloCustomCallOptionsAddCallTargetName(builder, callTargetName) + StablehloCustomCallOptionsAddHasSideEffect(builder, self.hasSideEffect) + if self.backendConfig is not None: + StablehloCustomCallOptionsAddBackendConfig(builder, backendConfig) + StablehloCustomCallOptionsAddApiVersion(builder, self.apiVersion) + if self.calledComputations is not None: + StablehloCustomCallOptionsAddCalledComputations(builder, calledComputations) + if self.customAttributes is not None: + StablehloCustomCallOptionsAddCustomAttributes(builder, customAttributes) + stablehloCustomCallOptions = StablehloCustomCallOptionsEnd(builder) + return stablehloCustomCallOptions + + +class StablehloReduceOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloReduceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloReduceOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def StablehloReduceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # StablehloReduceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloReduceOptions + def Dimensions(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloReduceOptions + def DimensionsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloReduceOptions + def DimensionsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloReduceOptions + def DimensionsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # StablehloReduceOptions + def BodySubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def StablehloReduceOptionsStart(builder): + builder.StartObject(2) + +def StablehloReduceOptionsAddDimensions(builder, dimensions): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(dimensions), 0) + +def StablehloReduceOptionsStartDimensionsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloReduceOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex): + builder.PrependInt32Slot(1, bodySubgraphIndex, 0) + +def StablehloReduceOptionsEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class StablehloReduceOptionsT(object): + + # StablehloReduceOptionsT + def __init__(self): + self.dimensions = None # type: List[int] + self.bodySubgraphIndex = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + stablehloReduceOptions = StablehloReduceOptions() + stablehloReduceOptions.Init(buf, pos) + return cls.InitFromObj(stablehloReduceOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, stablehloReduceOptions): + x = StablehloReduceOptionsT() + x._UnPack(stablehloReduceOptions) + return x + + # StablehloReduceOptionsT + def _UnPack(self, stablehloReduceOptions): + if stablehloReduceOptions is None: + return + if not stablehloReduceOptions.DimensionsIsNone(): + if np is None: + self.dimensions = [] + for i in range(stablehloReduceOptions.DimensionsLength()): + self.dimensions.append(stablehloReduceOptions.Dimensions(i)) + else: + self.dimensions = stablehloReduceOptions.DimensionsAsNumpy() + self.bodySubgraphIndex = stablehloReduceOptions.BodySubgraphIndex() + + # StablehloReduceOptionsT + def Pack(self, builder): + if self.dimensions is not None: + if np is not None and type(self.dimensions) is np.ndarray: + dimensions = builder.CreateNumpyVector(self.dimensions) + else: + StablehloReduceOptionsStartDimensionsVector(builder, len(self.dimensions)) + for i in reversed(range(len(self.dimensions))): + builder.PrependInt64(self.dimensions[i]) + dimensions = builder.EndVector() + StablehloReduceOptionsStart(builder) + if self.dimensions is not None: + StablehloReduceOptionsAddDimensions(builder, dimensions) + StablehloReduceOptionsAddBodySubgraphIndex(builder, self.bodySubgraphIndex) + stablehloReduceOptions = StablehloReduceOptionsEnd(builder) + return stablehloReduceOptions + + +class StablehloSliceOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloSliceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloSliceOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def StablehloSliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # StablehloSliceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloSliceOptions + def StartIndices(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloSliceOptions + def StartIndicesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloSliceOptions + def StartIndicesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloSliceOptions + def StartIndicesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # StablehloSliceOptions + def LimitIndices(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloSliceOptions + def LimitIndicesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloSliceOptions + def LimitIndicesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloSliceOptions + def LimitIndicesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # StablehloSliceOptions + def Strides(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloSliceOptions + def StridesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloSliceOptions + def StridesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloSliceOptions + def StridesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + +def StablehloSliceOptionsStart(builder): + builder.StartObject(3) + +def StablehloSliceOptionsAddStartIndices(builder, startIndices): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(startIndices), 0) + +def StablehloSliceOptionsStartStartIndicesVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloSliceOptionsAddLimitIndices(builder, limitIndices): + builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(limitIndices), 0) + +def StablehloSliceOptionsStartLimitIndicesVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloSliceOptionsAddStrides(builder, strides): + builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(strides), 0) + +def StablehloSliceOptionsStartStridesVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloSliceOptionsEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class StablehloSliceOptionsT(object): + + # StablehloSliceOptionsT + def __init__(self): + self.startIndices = None # type: List[int] + self.limitIndices = None # type: List[int] + self.strides = None # type: List[int] + + @classmethod + def InitFromBuf(cls, buf, pos): + stablehloSliceOptions = StablehloSliceOptions() + stablehloSliceOptions.Init(buf, pos) + return cls.InitFromObj(stablehloSliceOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, stablehloSliceOptions): + x = StablehloSliceOptionsT() + x._UnPack(stablehloSliceOptions) + return x + + # StablehloSliceOptionsT + def _UnPack(self, stablehloSliceOptions): + if stablehloSliceOptions is None: + return + if not stablehloSliceOptions.StartIndicesIsNone(): + if np is None: + self.startIndices = [] + for i in range(stablehloSliceOptions.StartIndicesLength()): + self.startIndices.append(stablehloSliceOptions.StartIndices(i)) + else: + self.startIndices = stablehloSliceOptions.StartIndicesAsNumpy() + if not stablehloSliceOptions.LimitIndicesIsNone(): + if np is None: + self.limitIndices = [] + for i in range(stablehloSliceOptions.LimitIndicesLength()): + self.limitIndices.append(stablehloSliceOptions.LimitIndices(i)) + else: + self.limitIndices = stablehloSliceOptions.LimitIndicesAsNumpy() + if not stablehloSliceOptions.StridesIsNone(): + if np is None: + self.strides = [] + for i in range(stablehloSliceOptions.StridesLength()): + self.strides.append(stablehloSliceOptions.Strides(i)) + else: + self.strides = stablehloSliceOptions.StridesAsNumpy() + + # StablehloSliceOptionsT + def Pack(self, builder): + if self.startIndices is not None: + if np is not None and type(self.startIndices) is np.ndarray: + startIndices = builder.CreateNumpyVector(self.startIndices) + else: + StablehloSliceOptionsStartStartIndicesVector(builder, len(self.startIndices)) + for i in reversed(range(len(self.startIndices))): + builder.PrependInt64(self.startIndices[i]) + startIndices = builder.EndVector() + if self.limitIndices is not None: + if np is not None and type(self.limitIndices) is np.ndarray: + limitIndices = builder.CreateNumpyVector(self.limitIndices) + else: + StablehloSliceOptionsStartLimitIndicesVector(builder, len(self.limitIndices)) + for i in reversed(range(len(self.limitIndices))): + builder.PrependInt64(self.limitIndices[i]) + limitIndices = builder.EndVector() + if self.strides is not None: + if np is not None and type(self.strides) is np.ndarray: + strides = builder.CreateNumpyVector(self.strides) + else: + StablehloSliceOptionsStartStridesVector(builder, len(self.strides)) + for i in reversed(range(len(self.strides))): + builder.PrependInt64(self.strides[i]) + strides = builder.EndVector() + StablehloSliceOptionsStart(builder) + if self.startIndices is not None: + StablehloSliceOptionsAddStartIndices(builder, startIndices) + if self.limitIndices is not None: + StablehloSliceOptionsAddLimitIndices(builder, limitIndices) + if self.strides is not None: + StablehloSliceOptionsAddStrides(builder, strides) + stablehloSliceOptions = StablehloSliceOptionsEnd(builder) + return stablehloSliceOptions + + +class StablehloConvolutionOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloConvolutionOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloConvolutionOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def StablehloConvolutionOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # StablehloConvolutionOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloConvolutionOptions + def WindowStrides(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloConvolutionOptions + def WindowStridesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloConvolutionOptions + def WindowStridesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloConvolutionOptions + def WindowStridesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # StablehloConvolutionOptions + def Padding(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloConvolutionOptions + def PaddingAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloConvolutionOptions + def PaddingLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloConvolutionOptions + def PaddingIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # StablehloConvolutionOptions + def LhsDilation(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloConvolutionOptions + def LhsDilationAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloConvolutionOptions + def LhsDilationLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloConvolutionOptions + def LhsDilationIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + # StablehloConvolutionOptions + def RhsDilation(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloConvolutionOptions + def RhsDilationAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloConvolutionOptions + def RhsDilationLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloConvolutionOptions + def RhsDilationIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + return o == 0 + + # StablehloConvolutionOptions + def WindowReversal(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.BoolFlags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1)) + return 0 + + # StablehloConvolutionOptions + def WindowReversalAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.BoolFlags, o) + return 0 + + # StablehloConvolutionOptions + def WindowReversalLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloConvolutionOptions + def WindowReversalIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + return o == 0 + + # StablehloConvolutionOptions + def InputBatchDimension(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # StablehloConvolutionOptions + def InputFeatureDimension(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # StablehloConvolutionOptions + def InputSpatialDimensions(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloConvolutionOptions + def InputSpatialDimensionsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloConvolutionOptions + def InputSpatialDimensionsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloConvolutionOptions + def InputSpatialDimensionsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + return o == 0 + + # StablehloConvolutionOptions + def KernelInputFeatureDimension(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # StablehloConvolutionOptions + def KernelOutputFeatureDimension(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # StablehloConvolutionOptions + def KernelSpatialDimensions(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloConvolutionOptions + def KernelSpatialDimensionsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloConvolutionOptions + def KernelSpatialDimensionsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloConvolutionOptions + def KernelSpatialDimensionsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24)) + return o == 0 + + # StablehloConvolutionOptions + def OutputBatchDimension(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # StablehloConvolutionOptions + def OutputFeatureDimension(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(28)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # StablehloConvolutionOptions + def OutputSpatialDimensions(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloConvolutionOptions + def OutputSpatialDimensionsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloConvolutionOptions + def OutputSpatialDimensionsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloConvolutionOptions + def OutputSpatialDimensionsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30)) + return o == 0 + + # StablehloConvolutionOptions + def FeatureGroupCount(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(32)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # StablehloConvolutionOptions + def BatchGroupCount(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(34)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # StablehloConvolutionOptions + def PrecisionConfig(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(36)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Uint32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # StablehloConvolutionOptions + def PrecisionConfigAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(36)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint32Flags, o) + return 0 + + # StablehloConvolutionOptions + def PrecisionConfigLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(36)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloConvolutionOptions + def PrecisionConfigIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(36)) + return o == 0 + +def StablehloConvolutionOptionsStart(builder): + builder.StartObject(17) + +def StablehloConvolutionOptionsAddWindowStrides(builder, windowStrides): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(windowStrides), 0) + +def StablehloConvolutionOptionsStartWindowStridesVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloConvolutionOptionsAddPadding(builder, padding): + builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(padding), 0) + +def StablehloConvolutionOptionsStartPaddingVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloConvolutionOptionsAddLhsDilation(builder, lhsDilation): + builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(lhsDilation), 0) + +def StablehloConvolutionOptionsStartLhsDilationVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloConvolutionOptionsAddRhsDilation(builder, rhsDilation): + builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(rhsDilation), 0) + +def StablehloConvolutionOptionsStartRhsDilationVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloConvolutionOptionsAddWindowReversal(builder, windowReversal): + builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(windowReversal), 0) + +def StablehloConvolutionOptionsStartWindowReversalVector(builder, numElems): + return builder.StartVector(1, numElems, 1) + +def StablehloConvolutionOptionsAddInputBatchDimension(builder, inputBatchDimension): + builder.PrependInt64Slot(5, inputBatchDimension, 0) + +def StablehloConvolutionOptionsAddInputFeatureDimension(builder, inputFeatureDimension): + builder.PrependInt64Slot(6, inputFeatureDimension, 0) + +def StablehloConvolutionOptionsAddInputSpatialDimensions(builder, inputSpatialDimensions): + builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(inputSpatialDimensions), 0) + +def StablehloConvolutionOptionsStartInputSpatialDimensionsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloConvolutionOptionsAddKernelInputFeatureDimension(builder, kernelInputFeatureDimension): + builder.PrependInt64Slot(8, kernelInputFeatureDimension, 0) + +def StablehloConvolutionOptionsAddKernelOutputFeatureDimension(builder, kernelOutputFeatureDimension): + builder.PrependInt64Slot(9, kernelOutputFeatureDimension, 0) + +def StablehloConvolutionOptionsAddKernelSpatialDimensions(builder, kernelSpatialDimensions): + builder.PrependUOffsetTRelativeSlot(10, flatbuffers.number_types.UOffsetTFlags.py_type(kernelSpatialDimensions), 0) + +def StablehloConvolutionOptionsStartKernelSpatialDimensionsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloConvolutionOptionsAddOutputBatchDimension(builder, outputBatchDimension): + builder.PrependInt64Slot(11, outputBatchDimension, 0) + +def StablehloConvolutionOptionsAddOutputFeatureDimension(builder, outputFeatureDimension): + builder.PrependInt64Slot(12, outputFeatureDimension, 0) + +def StablehloConvolutionOptionsAddOutputSpatialDimensions(builder, outputSpatialDimensions): + builder.PrependUOffsetTRelativeSlot(13, flatbuffers.number_types.UOffsetTFlags.py_type(outputSpatialDimensions), 0) + +def StablehloConvolutionOptionsStartOutputSpatialDimensionsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloConvolutionOptionsAddFeatureGroupCount(builder, featureGroupCount): + builder.PrependInt64Slot(14, featureGroupCount, 0) + +def StablehloConvolutionOptionsAddBatchGroupCount(builder, batchGroupCount): + builder.PrependInt64Slot(15, batchGroupCount, 0) + +def StablehloConvolutionOptionsAddPrecisionConfig(builder, precisionConfig): + builder.PrependUOffsetTRelativeSlot(16, flatbuffers.number_types.UOffsetTFlags.py_type(precisionConfig), 0) + +def StablehloConvolutionOptionsStartPrecisionConfigVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def StablehloConvolutionOptionsEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class StablehloConvolutionOptionsT(object): + + # StablehloConvolutionOptionsT + def __init__(self): + self.windowStrides = None # type: List[int] + self.padding = None # type: List[int] + self.lhsDilation = None # type: List[int] + self.rhsDilation = None # type: List[int] + self.windowReversal = None # type: List[bool] + self.inputBatchDimension = 0 # type: int + self.inputFeatureDimension = 0 # type: int + self.inputSpatialDimensions = None # type: List[int] + self.kernelInputFeatureDimension = 0 # type: int + self.kernelOutputFeatureDimension = 0 # type: int + self.kernelSpatialDimensions = None # type: List[int] + self.outputBatchDimension = 0 # type: int + self.outputFeatureDimension = 0 # type: int + self.outputSpatialDimensions = None # type: List[int] + self.featureGroupCount = 0 # type: int + self.batchGroupCount = 0 # type: int + self.precisionConfig = None # type: List[int] + + @classmethod + def InitFromBuf(cls, buf, pos): + stablehloConvolutionOptions = StablehloConvolutionOptions() + stablehloConvolutionOptions.Init(buf, pos) + return cls.InitFromObj(stablehloConvolutionOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, stablehloConvolutionOptions): + x = StablehloConvolutionOptionsT() + x._UnPack(stablehloConvolutionOptions) + return x + + # StablehloConvolutionOptionsT + def _UnPack(self, stablehloConvolutionOptions): + if stablehloConvolutionOptions is None: + return + if not stablehloConvolutionOptions.WindowStridesIsNone(): + if np is None: + self.windowStrides = [] + for i in range(stablehloConvolutionOptions.WindowStridesLength()): + self.windowStrides.append(stablehloConvolutionOptions.WindowStrides(i)) + else: + self.windowStrides = stablehloConvolutionOptions.WindowStridesAsNumpy() + if not stablehloConvolutionOptions.PaddingIsNone(): + if np is None: + self.padding = [] + for i in range(stablehloConvolutionOptions.PaddingLength()): + self.padding.append(stablehloConvolutionOptions.Padding(i)) + else: + self.padding = stablehloConvolutionOptions.PaddingAsNumpy() + if not stablehloConvolutionOptions.LhsDilationIsNone(): + if np is None: + self.lhsDilation = [] + for i in range(stablehloConvolutionOptions.LhsDilationLength()): + self.lhsDilation.append(stablehloConvolutionOptions.LhsDilation(i)) + else: + self.lhsDilation = stablehloConvolutionOptions.LhsDilationAsNumpy() + if not stablehloConvolutionOptions.RhsDilationIsNone(): + if np is None: + self.rhsDilation = [] + for i in range(stablehloConvolutionOptions.RhsDilationLength()): + self.rhsDilation.append(stablehloConvolutionOptions.RhsDilation(i)) + else: + self.rhsDilation = stablehloConvolutionOptions.RhsDilationAsNumpy() + if not stablehloConvolutionOptions.WindowReversalIsNone(): + if np is None: + self.windowReversal = [] + for i in range(stablehloConvolutionOptions.WindowReversalLength()): + self.windowReversal.append(stablehloConvolutionOptions.WindowReversal(i)) + else: + self.windowReversal = stablehloConvolutionOptions.WindowReversalAsNumpy() + self.inputBatchDimension = stablehloConvolutionOptions.InputBatchDimension() + self.inputFeatureDimension = stablehloConvolutionOptions.InputFeatureDimension() + if not stablehloConvolutionOptions.InputSpatialDimensionsIsNone(): + if np is None: + self.inputSpatialDimensions = [] + for i in range(stablehloConvolutionOptions.InputSpatialDimensionsLength()): + self.inputSpatialDimensions.append(stablehloConvolutionOptions.InputSpatialDimensions(i)) + else: + self.inputSpatialDimensions = stablehloConvolutionOptions.InputSpatialDimensionsAsNumpy() + self.kernelInputFeatureDimension = stablehloConvolutionOptions.KernelInputFeatureDimension() + self.kernelOutputFeatureDimension = stablehloConvolutionOptions.KernelOutputFeatureDimension() + if not stablehloConvolutionOptions.KernelSpatialDimensionsIsNone(): + if np is None: + self.kernelSpatialDimensions = [] + for i in range(stablehloConvolutionOptions.KernelSpatialDimensionsLength()): + self.kernelSpatialDimensions.append(stablehloConvolutionOptions.KernelSpatialDimensions(i)) + else: + self.kernelSpatialDimensions = stablehloConvolutionOptions.KernelSpatialDimensionsAsNumpy() + self.outputBatchDimension = stablehloConvolutionOptions.OutputBatchDimension() + self.outputFeatureDimension = stablehloConvolutionOptions.OutputFeatureDimension() + if not stablehloConvolutionOptions.OutputSpatialDimensionsIsNone(): + if np is None: + self.outputSpatialDimensions = [] + for i in range(stablehloConvolutionOptions.OutputSpatialDimensionsLength()): + self.outputSpatialDimensions.append(stablehloConvolutionOptions.OutputSpatialDimensions(i)) + else: + self.outputSpatialDimensions = stablehloConvolutionOptions.OutputSpatialDimensionsAsNumpy() + self.featureGroupCount = stablehloConvolutionOptions.FeatureGroupCount() + self.batchGroupCount = stablehloConvolutionOptions.BatchGroupCount() + if not stablehloConvolutionOptions.PrecisionConfigIsNone(): + if np is None: + self.precisionConfig = [] + for i in range(stablehloConvolutionOptions.PrecisionConfigLength()): + self.precisionConfig.append(stablehloConvolutionOptions.PrecisionConfig(i)) + else: + self.precisionConfig = stablehloConvolutionOptions.PrecisionConfigAsNumpy() + + # StablehloConvolutionOptionsT + def Pack(self, builder): + if self.windowStrides is not None: + if np is not None and type(self.windowStrides) is np.ndarray: + windowStrides = builder.CreateNumpyVector(self.windowStrides) + else: + StablehloConvolutionOptionsStartWindowStridesVector(builder, len(self.windowStrides)) + for i in reversed(range(len(self.windowStrides))): + builder.PrependInt64(self.windowStrides[i]) + windowStrides = builder.EndVector() + if self.padding is not None: + if np is not None and type(self.padding) is np.ndarray: + padding = builder.CreateNumpyVector(self.padding) + else: + StablehloConvolutionOptionsStartPaddingVector(builder, len(self.padding)) + for i in reversed(range(len(self.padding))): + builder.PrependInt64(self.padding[i]) + padding = builder.EndVector() + if self.lhsDilation is not None: + if np is not None and type(self.lhsDilation) is np.ndarray: + lhsDilation = builder.CreateNumpyVector(self.lhsDilation) + else: + StablehloConvolutionOptionsStartLhsDilationVector(builder, len(self.lhsDilation)) + for i in reversed(range(len(self.lhsDilation))): + builder.PrependInt64(self.lhsDilation[i]) + lhsDilation = builder.EndVector() + if self.rhsDilation is not None: + if np is not None and type(self.rhsDilation) is np.ndarray: + rhsDilation = builder.CreateNumpyVector(self.rhsDilation) + else: + StablehloConvolutionOptionsStartRhsDilationVector(builder, len(self.rhsDilation)) + for i in reversed(range(len(self.rhsDilation))): + builder.PrependInt64(self.rhsDilation[i]) + rhsDilation = builder.EndVector() + if self.windowReversal is not None: + if np is not None and type(self.windowReversal) is np.ndarray: + windowReversal = builder.CreateNumpyVector(self.windowReversal) + else: + StablehloConvolutionOptionsStartWindowReversalVector(builder, len(self.windowReversal)) + for i in reversed(range(len(self.windowReversal))): + builder.PrependBool(self.windowReversal[i]) + windowReversal = builder.EndVector() + if self.inputSpatialDimensions is not None: + if np is not None and type(self.inputSpatialDimensions) is np.ndarray: + inputSpatialDimensions = builder.CreateNumpyVector(self.inputSpatialDimensions) + else: + StablehloConvolutionOptionsStartInputSpatialDimensionsVector(builder, len(self.inputSpatialDimensions)) + for i in reversed(range(len(self.inputSpatialDimensions))): + builder.PrependInt64(self.inputSpatialDimensions[i]) + inputSpatialDimensions = builder.EndVector() + if self.kernelSpatialDimensions is not None: + if np is not None and type(self.kernelSpatialDimensions) is np.ndarray: + kernelSpatialDimensions = builder.CreateNumpyVector(self.kernelSpatialDimensions) + else: + StablehloConvolutionOptionsStartKernelSpatialDimensionsVector(builder, len(self.kernelSpatialDimensions)) + for i in reversed(range(len(self.kernelSpatialDimensions))): + builder.PrependInt64(self.kernelSpatialDimensions[i]) + kernelSpatialDimensions = builder.EndVector() + if self.outputSpatialDimensions is not None: + if np is not None and type(self.outputSpatialDimensions) is np.ndarray: + outputSpatialDimensions = builder.CreateNumpyVector(self.outputSpatialDimensions) + else: + StablehloConvolutionOptionsStartOutputSpatialDimensionsVector(builder, len(self.outputSpatialDimensions)) + for i in reversed(range(len(self.outputSpatialDimensions))): + builder.PrependInt64(self.outputSpatialDimensions[i]) + outputSpatialDimensions = builder.EndVector() + if self.precisionConfig is not None: + if np is not None and type(self.precisionConfig) is np.ndarray: + precisionConfig = builder.CreateNumpyVector(self.precisionConfig) + else: + StablehloConvolutionOptionsStartPrecisionConfigVector(builder, len(self.precisionConfig)) + for i in reversed(range(len(self.precisionConfig))): + builder.PrependUint32(self.precisionConfig[i]) + precisionConfig = builder.EndVector() + StablehloConvolutionOptionsStart(builder) + if self.windowStrides is not None: + StablehloConvolutionOptionsAddWindowStrides(builder, windowStrides) + if self.padding is not None: + StablehloConvolutionOptionsAddPadding(builder, padding) + if self.lhsDilation is not None: + StablehloConvolutionOptionsAddLhsDilation(builder, lhsDilation) + if self.rhsDilation is not None: + StablehloConvolutionOptionsAddRhsDilation(builder, rhsDilation) + if self.windowReversal is not None: + StablehloConvolutionOptionsAddWindowReversal(builder, windowReversal) + StablehloConvolutionOptionsAddInputBatchDimension(builder, self.inputBatchDimension) + StablehloConvolutionOptionsAddInputFeatureDimension(builder, self.inputFeatureDimension) + if self.inputSpatialDimensions is not None: + StablehloConvolutionOptionsAddInputSpatialDimensions(builder, inputSpatialDimensions) + StablehloConvolutionOptionsAddKernelInputFeatureDimension(builder, self.kernelInputFeatureDimension) + StablehloConvolutionOptionsAddKernelOutputFeatureDimension(builder, self.kernelOutputFeatureDimension) + if self.kernelSpatialDimensions is not None: + StablehloConvolutionOptionsAddKernelSpatialDimensions(builder, kernelSpatialDimensions) + StablehloConvolutionOptionsAddOutputBatchDimension(builder, self.outputBatchDimension) + StablehloConvolutionOptionsAddOutputFeatureDimension(builder, self.outputFeatureDimension) + if self.outputSpatialDimensions is not None: + StablehloConvolutionOptionsAddOutputSpatialDimensions(builder, outputSpatialDimensions) + StablehloConvolutionOptionsAddFeatureGroupCount(builder, self.featureGroupCount) + StablehloConvolutionOptionsAddBatchGroupCount(builder, self.batchGroupCount) + if self.precisionConfig is not None: + StablehloConvolutionOptionsAddPrecisionConfig(builder, precisionConfig) + stablehloConvolutionOptions = StablehloConvolutionOptionsEnd(builder) + return stablehloConvolutionOptions + + +class StablehloScatterOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloScatterOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloScatterOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def StablehloScatterOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # StablehloScatterOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloScatterOptions + def IndicesAreSorted(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # StablehloScatterOptions + def UpdateWindowDims(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloScatterOptions + def UpdateWindowDimsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloScatterOptions + def UpdateWindowDimsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloScatterOptions + def UpdateWindowDimsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # StablehloScatterOptions + def InsertedWindowDims(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloScatterOptions + def InsertedWindowDimsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloScatterOptions + def InsertedWindowDimsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloScatterOptions + def InsertedWindowDimsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + # StablehloScatterOptions + def ScatterDimsToOperandDims(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # StablehloScatterOptions + def ScatterDimsToOperandDimsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # StablehloScatterOptions + def ScatterDimsToOperandDimsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StablehloScatterOptions + def ScatterDimsToOperandDimsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + return o == 0 + + # StablehloScatterOptions + def IndexVectorDim(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # StablehloScatterOptions + def UniqueIndices(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # StablehloScatterOptions + def UpdateComputationSubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def StablehloScatterOptionsStart(builder): + builder.StartObject(7) + +def StablehloScatterOptionsAddIndicesAreSorted(builder, indicesAreSorted): + builder.PrependBoolSlot(0, indicesAreSorted, 0) + +def StablehloScatterOptionsAddUpdateWindowDims(builder, updateWindowDims): + builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(updateWindowDims), 0) + +def StablehloScatterOptionsStartUpdateWindowDimsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloScatterOptionsAddInsertedWindowDims(builder, insertedWindowDims): + builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(insertedWindowDims), 0) + +def StablehloScatterOptionsStartInsertedWindowDimsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloScatterOptionsAddScatterDimsToOperandDims(builder, scatterDimsToOperandDims): + builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(scatterDimsToOperandDims), 0) + +def StablehloScatterOptionsStartScatterDimsToOperandDimsVector(builder, numElems): + return builder.StartVector(8, numElems, 8) + +def StablehloScatterOptionsAddIndexVectorDim(builder, indexVectorDim): + builder.PrependInt64Slot(4, indexVectorDim, 0) + +def StablehloScatterOptionsAddUniqueIndices(builder, uniqueIndices): + builder.PrependBoolSlot(5, uniqueIndices, 0) + +def StablehloScatterOptionsAddUpdateComputationSubgraphIndex(builder, updateComputationSubgraphIndex): + builder.PrependInt32Slot(6, updateComputationSubgraphIndex, 0) + +def StablehloScatterOptionsEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class StablehloScatterOptionsT(object): + + # StablehloScatterOptionsT + def __init__(self): + self.indicesAreSorted = False # type: bool + self.updateWindowDims = None # type: List[int] + self.insertedWindowDims = None # type: List[int] + self.scatterDimsToOperandDims = None # type: List[int] + self.indexVectorDim = 0 # type: int + self.uniqueIndices = False # type: bool + self.updateComputationSubgraphIndex = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + stablehloScatterOptions = StablehloScatterOptions() + stablehloScatterOptions.Init(buf, pos) + return cls.InitFromObj(stablehloScatterOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, stablehloScatterOptions): + x = StablehloScatterOptionsT() + x._UnPack(stablehloScatterOptions) + return x + + # StablehloScatterOptionsT + def _UnPack(self, stablehloScatterOptions): + if stablehloScatterOptions is None: + return + self.indicesAreSorted = stablehloScatterOptions.IndicesAreSorted() + if not stablehloScatterOptions.UpdateWindowDimsIsNone(): + if np is None: + self.updateWindowDims = [] + for i in range(stablehloScatterOptions.UpdateWindowDimsLength()): + self.updateWindowDims.append(stablehloScatterOptions.UpdateWindowDims(i)) + else: + self.updateWindowDims = stablehloScatterOptions.UpdateWindowDimsAsNumpy() + if not stablehloScatterOptions.InsertedWindowDimsIsNone(): + if np is None: + self.insertedWindowDims = [] + for i in range(stablehloScatterOptions.InsertedWindowDimsLength()): + self.insertedWindowDims.append(stablehloScatterOptions.InsertedWindowDims(i)) + else: + self.insertedWindowDims = stablehloScatterOptions.InsertedWindowDimsAsNumpy() + if not stablehloScatterOptions.ScatterDimsToOperandDimsIsNone(): + if np is None: + self.scatterDimsToOperandDims = [] + for i in range(stablehloScatterOptions.ScatterDimsToOperandDimsLength()): + self.scatterDimsToOperandDims.append(stablehloScatterOptions.ScatterDimsToOperandDims(i)) + else: + self.scatterDimsToOperandDims = stablehloScatterOptions.ScatterDimsToOperandDimsAsNumpy() + self.indexVectorDim = stablehloScatterOptions.IndexVectorDim() + self.uniqueIndices = stablehloScatterOptions.UniqueIndices() + self.updateComputationSubgraphIndex = stablehloScatterOptions.UpdateComputationSubgraphIndex() + + # StablehloScatterOptionsT + def Pack(self, builder): + if self.updateWindowDims is not None: + if np is not None and type(self.updateWindowDims) is np.ndarray: + updateWindowDims = builder.CreateNumpyVector(self.updateWindowDims) + else: + StablehloScatterOptionsStartUpdateWindowDimsVector(builder, len(self.updateWindowDims)) + for i in reversed(range(len(self.updateWindowDims))): + builder.PrependInt64(self.updateWindowDims[i]) + updateWindowDims = builder.EndVector() + if self.insertedWindowDims is not None: + if np is not None and type(self.insertedWindowDims) is np.ndarray: + insertedWindowDims = builder.CreateNumpyVector(self.insertedWindowDims) + else: + StablehloScatterOptionsStartInsertedWindowDimsVector(builder, len(self.insertedWindowDims)) + for i in reversed(range(len(self.insertedWindowDims))): + builder.PrependInt64(self.insertedWindowDims[i]) + insertedWindowDims = builder.EndVector() + if self.scatterDimsToOperandDims is not None: + if np is not None and type(self.scatterDimsToOperandDims) is np.ndarray: + scatterDimsToOperandDims = builder.CreateNumpyVector(self.scatterDimsToOperandDims) + else: + StablehloScatterOptionsStartScatterDimsToOperandDimsVector(builder, len(self.scatterDimsToOperandDims)) + for i in reversed(range(len(self.scatterDimsToOperandDims))): + builder.PrependInt64(self.scatterDimsToOperandDims[i]) + scatterDimsToOperandDims = builder.EndVector() + StablehloScatterOptionsStart(builder) + StablehloScatterOptionsAddIndicesAreSorted(builder, self.indicesAreSorted) + if self.updateWindowDims is not None: + StablehloScatterOptionsAddUpdateWindowDims(builder, updateWindowDims) + if self.insertedWindowDims is not None: + StablehloScatterOptionsAddInsertedWindowDims(builder, insertedWindowDims) + if self.scatterDimsToOperandDims is not None: + StablehloScatterOptionsAddScatterDimsToOperandDims(builder, scatterDimsToOperandDims) + StablehloScatterOptionsAddIndexVectorDim(builder, self.indexVectorDim) + StablehloScatterOptionsAddUniqueIndices(builder, self.uniqueIndices) + StablehloScatterOptionsAddUpdateComputationSubgraphIndex(builder, self.updateComputationSubgraphIndex) + stablehloScatterOptions = StablehloScatterOptionsEnd(builder) + return stablehloScatterOptions + + +class StablehloRngBitGeneratorOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloRngBitGeneratorOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloRngBitGeneratorOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def StablehloRngBitGeneratorOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # StablehloRngBitGeneratorOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StablehloRngBitGeneratorOptions + def Algorithm(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + +def StablehloRngBitGeneratorOptionsStart(builder): + builder.StartObject(1) + +def StablehloRngBitGeneratorOptionsAddAlgorithm(builder, algorithm): + builder.PrependInt8Slot(0, algorithm, 0) + +def StablehloRngBitGeneratorOptionsEnd(builder): + return builder.EndObject() + + + +class StablehloRngBitGeneratorOptionsT(object): + + # StablehloRngBitGeneratorOptionsT + def __init__(self): + self.algorithm = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + stablehloRngBitGeneratorOptions = StablehloRngBitGeneratorOptions() + stablehloRngBitGeneratorOptions.Init(buf, pos) + return cls.InitFromObj(stablehloRngBitGeneratorOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, stablehloRngBitGeneratorOptions): + x = StablehloRngBitGeneratorOptionsT() + x._UnPack(stablehloRngBitGeneratorOptions) + return x + + # StablehloRngBitGeneratorOptionsT + def _UnPack(self, stablehloRngBitGeneratorOptions): + if stablehloRngBitGeneratorOptions is None: + return + self.algorithm = stablehloRngBitGeneratorOptions.Algorithm() + + # StablehloRngBitGeneratorOptionsT + def Pack(self, builder): + StablehloRngBitGeneratorOptionsStart(builder) + StablehloRngBitGeneratorOptionsAddAlgorithm(builder, self.algorithm) + stablehloRngBitGeneratorOptions = StablehloRngBitGeneratorOptionsEnd(builder) + return stablehloRngBitGeneratorOptions + + +class Conv2DOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Conv2DOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsConv2DOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def Conv2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # Conv2DOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Conv2DOptions + def Padding(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # Conv2DOptions + def StrideW(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Conv2DOptions + def StrideH(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Conv2DOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # Conv2DOptions + def DilationWFactor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 1 + + # Conv2DOptions + def DilationHFactor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 1 + + # Conv2DOptions + def QuantizedBiasType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + +def Conv2DOptionsStart(builder): + builder.StartObject(7) + +def Conv2DOptionsAddPadding(builder, padding): + builder.PrependInt8Slot(0, padding, 0) + +def Conv2DOptionsAddStrideW(builder, strideW): + builder.PrependInt32Slot(1, strideW, 0) + +def Conv2DOptionsAddStrideH(builder, strideH): + builder.PrependInt32Slot(2, strideH, 0) + +def Conv2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(3, fusedActivationFunction, 0) + +def Conv2DOptionsAddDilationWFactor(builder, dilationWFactor): + builder.PrependInt32Slot(4, dilationWFactor, 1) + +def Conv2DOptionsAddDilationHFactor(builder, dilationHFactor): + builder.PrependInt32Slot(5, dilationHFactor, 1) + +def Conv2DOptionsAddQuantizedBiasType(builder, quantizedBiasType): + builder.PrependInt8Slot(6, quantizedBiasType, 0) + +def Conv2DOptionsEnd(builder): + return builder.EndObject() + + + +class Conv2DOptionsT(object): + + # Conv2DOptionsT + def __init__(self): + self.padding = 0 # type: int + self.strideW = 0 # type: int + self.strideH = 0 # type: int + self.fusedActivationFunction = 0 # type: int + self.dilationWFactor = 1 # type: int + self.dilationHFactor = 1 # type: int + self.quantizedBiasType = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + conv2Doptions = Conv2DOptions() + conv2Doptions.Init(buf, pos) + return cls.InitFromObj(conv2Doptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, conv2Doptions): + x = Conv2DOptionsT() + x._UnPack(conv2Doptions) + return x + + # Conv2DOptionsT + def _UnPack(self, conv2Doptions): + if conv2Doptions is None: + return + self.padding = conv2Doptions.Padding() + self.strideW = conv2Doptions.StrideW() + self.strideH = conv2Doptions.StrideH() + self.fusedActivationFunction = conv2Doptions.FusedActivationFunction() + self.dilationWFactor = conv2Doptions.DilationWFactor() + self.dilationHFactor = conv2Doptions.DilationHFactor() + self.quantizedBiasType = conv2Doptions.QuantizedBiasType() + + # Conv2DOptionsT + def Pack(self, builder): + Conv2DOptionsStart(builder) + Conv2DOptionsAddPadding(builder, self.padding) + Conv2DOptionsAddStrideW(builder, self.strideW) + Conv2DOptionsAddStrideH(builder, self.strideH) + Conv2DOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) + Conv2DOptionsAddDilationWFactor(builder, self.dilationWFactor) + Conv2DOptionsAddDilationHFactor(builder, self.dilationHFactor) + Conv2DOptionsAddQuantizedBiasType(builder, self.quantizedBiasType) + conv2Doptions = Conv2DOptionsEnd(builder) + return conv2Doptions + + +class Conv3DOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Conv3DOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsConv3DOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def Conv3DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # Conv3DOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Conv3DOptions + def Padding(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # Conv3DOptions + def StrideD(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Conv3DOptions + def StrideW(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Conv3DOptions + def StrideH(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Conv3DOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # Conv3DOptions + def DilationDFactor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 1 + + # Conv3DOptions + def DilationWFactor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 1 + + # Conv3DOptions + def DilationHFactor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 1 + +def Conv3DOptionsStart(builder): + builder.StartObject(8) + +def Conv3DOptionsAddPadding(builder, padding): + builder.PrependInt8Slot(0, padding, 0) + +def Conv3DOptionsAddStrideD(builder, strideD): + builder.PrependInt32Slot(1, strideD, 0) + +def Conv3DOptionsAddStrideW(builder, strideW): + builder.PrependInt32Slot(2, strideW, 0) + +def Conv3DOptionsAddStrideH(builder, strideH): + builder.PrependInt32Slot(3, strideH, 0) + +def Conv3DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(4, fusedActivationFunction, 0) + +def Conv3DOptionsAddDilationDFactor(builder, dilationDFactor): + builder.PrependInt32Slot(5, dilationDFactor, 1) + +def Conv3DOptionsAddDilationWFactor(builder, dilationWFactor): + builder.PrependInt32Slot(6, dilationWFactor, 1) + +def Conv3DOptionsAddDilationHFactor(builder, dilationHFactor): + builder.PrependInt32Slot(7, dilationHFactor, 1) + +def Conv3DOptionsEnd(builder): + return builder.EndObject() + + + +class Conv3DOptionsT(object): + + # Conv3DOptionsT + def __init__(self): + self.padding = 0 # type: int + self.strideD = 0 # type: int + self.strideW = 0 # type: int + self.strideH = 0 # type: int + self.fusedActivationFunction = 0 # type: int + self.dilationDFactor = 1 # type: int + self.dilationWFactor = 1 # type: int + self.dilationHFactor = 1 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + conv3Doptions = Conv3DOptions() + conv3Doptions.Init(buf, pos) + return cls.InitFromObj(conv3Doptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, conv3Doptions): + x = Conv3DOptionsT() + x._UnPack(conv3Doptions) + return x + + # Conv3DOptionsT + def _UnPack(self, conv3Doptions): + if conv3Doptions is None: + return + self.padding = conv3Doptions.Padding() + self.strideD = conv3Doptions.StrideD() + self.strideW = conv3Doptions.StrideW() + self.strideH = conv3Doptions.StrideH() + self.fusedActivationFunction = conv3Doptions.FusedActivationFunction() + self.dilationDFactor = conv3Doptions.DilationDFactor() + self.dilationWFactor = conv3Doptions.DilationWFactor() + self.dilationHFactor = conv3Doptions.DilationHFactor() + + # Conv3DOptionsT + def Pack(self, builder): + Conv3DOptionsStart(builder) + Conv3DOptionsAddPadding(builder, self.padding) + Conv3DOptionsAddStrideD(builder, self.strideD) + Conv3DOptionsAddStrideW(builder, self.strideW) + Conv3DOptionsAddStrideH(builder, self.strideH) + Conv3DOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) + Conv3DOptionsAddDilationDFactor(builder, self.dilationDFactor) + Conv3DOptionsAddDilationWFactor(builder, self.dilationWFactor) + Conv3DOptionsAddDilationHFactor(builder, self.dilationHFactor) + conv3Doptions = Conv3DOptionsEnd(builder) + return conv3Doptions + + +class Pool2DOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Pool2DOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsPool2DOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def Pool2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # Pool2DOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Pool2DOptions + def Padding(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # Pool2DOptions + def StrideW(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Pool2DOptions + def StrideH(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Pool2DOptions + def FilterWidth(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Pool2DOptions + def FilterHeight(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Pool2DOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + +def Pool2DOptionsStart(builder): + builder.StartObject(6) + +def Pool2DOptionsAddPadding(builder, padding): + builder.PrependInt8Slot(0, padding, 0) + +def Pool2DOptionsAddStrideW(builder, strideW): + builder.PrependInt32Slot(1, strideW, 0) + +def Pool2DOptionsAddStrideH(builder, strideH): + builder.PrependInt32Slot(2, strideH, 0) + +def Pool2DOptionsAddFilterWidth(builder, filterWidth): + builder.PrependInt32Slot(3, filterWidth, 0) + +def Pool2DOptionsAddFilterHeight(builder, filterHeight): + builder.PrependInt32Slot(4, filterHeight, 0) + +def Pool2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(5, fusedActivationFunction, 0) + +def Pool2DOptionsEnd(builder): + return builder.EndObject() + + + +class Pool2DOptionsT(object): + + # Pool2DOptionsT + def __init__(self): + self.padding = 0 # type: int + self.strideW = 0 # type: int + self.strideH = 0 # type: int + self.filterWidth = 0 # type: int + self.filterHeight = 0 # type: int + self.fusedActivationFunction = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + pool2Doptions = Pool2DOptions() + pool2Doptions.Init(buf, pos) + return cls.InitFromObj(pool2Doptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, pool2Doptions): + x = Pool2DOptionsT() + x._UnPack(pool2Doptions) + return x + + # Pool2DOptionsT + def _UnPack(self, pool2Doptions): + if pool2Doptions is None: + return + self.padding = pool2Doptions.Padding() + self.strideW = pool2Doptions.StrideW() + self.strideH = pool2Doptions.StrideH() + self.filterWidth = pool2Doptions.FilterWidth() + self.filterHeight = pool2Doptions.FilterHeight() + self.fusedActivationFunction = pool2Doptions.FusedActivationFunction() + + # Pool2DOptionsT + def Pack(self, builder): + Pool2DOptionsStart(builder) + Pool2DOptionsAddPadding(builder, self.padding) + Pool2DOptionsAddStrideW(builder, self.strideW) + Pool2DOptionsAddStrideH(builder, self.strideH) + Pool2DOptionsAddFilterWidth(builder, self.filterWidth) + Pool2DOptionsAddFilterHeight(builder, self.filterHeight) + Pool2DOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) + pool2Doptions = Pool2DOptionsEnd(builder) + return pool2Doptions + + +class DepthwiseConv2DOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = DepthwiseConv2DOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsDepthwiseConv2DOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def DepthwiseConv2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # DepthwiseConv2DOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # DepthwiseConv2DOptions + def Padding(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # DepthwiseConv2DOptions + def StrideW(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # DepthwiseConv2DOptions + def StrideH(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # DepthwiseConv2DOptions + def DepthMultiplier(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # DepthwiseConv2DOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # DepthwiseConv2DOptions + def DilationWFactor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 1 + + # DepthwiseConv2DOptions + def DilationHFactor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 1 + +def DepthwiseConv2DOptionsStart(builder): + builder.StartObject(7) + +def DepthwiseConv2DOptionsAddPadding(builder, padding): + builder.PrependInt8Slot(0, padding, 0) + +def DepthwiseConv2DOptionsAddStrideW(builder, strideW): + builder.PrependInt32Slot(1, strideW, 0) + +def DepthwiseConv2DOptionsAddStrideH(builder, strideH): + builder.PrependInt32Slot(2, strideH, 0) + +def DepthwiseConv2DOptionsAddDepthMultiplier(builder, depthMultiplier): + builder.PrependInt32Slot(3, depthMultiplier, 0) + +def DepthwiseConv2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(4, fusedActivationFunction, 0) + +def DepthwiseConv2DOptionsAddDilationWFactor(builder, dilationWFactor): + builder.PrependInt32Slot(5, dilationWFactor, 1) + +def DepthwiseConv2DOptionsAddDilationHFactor(builder, dilationHFactor): + builder.PrependInt32Slot(6, dilationHFactor, 1) + +def DepthwiseConv2DOptionsEnd(builder): + return builder.EndObject() + + + +class DepthwiseConv2DOptionsT(object): + + # DepthwiseConv2DOptionsT + def __init__(self): + self.padding = 0 # type: int + self.strideW = 0 # type: int + self.strideH = 0 # type: int + self.depthMultiplier = 0 # type: int + self.fusedActivationFunction = 0 # type: int + self.dilationWFactor = 1 # type: int + self.dilationHFactor = 1 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + depthwiseConv2Doptions = DepthwiseConv2DOptions() + depthwiseConv2Doptions.Init(buf, pos) + return cls.InitFromObj(depthwiseConv2Doptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, depthwiseConv2Doptions): + x = DepthwiseConv2DOptionsT() + x._UnPack(depthwiseConv2Doptions) + return x + + # DepthwiseConv2DOptionsT + def _UnPack(self, depthwiseConv2Doptions): + if depthwiseConv2Doptions is None: + return + self.padding = depthwiseConv2Doptions.Padding() + self.strideW = depthwiseConv2Doptions.StrideW() + self.strideH = depthwiseConv2Doptions.StrideH() + self.depthMultiplier = depthwiseConv2Doptions.DepthMultiplier() + self.fusedActivationFunction = depthwiseConv2Doptions.FusedActivationFunction() + self.dilationWFactor = depthwiseConv2Doptions.DilationWFactor() + self.dilationHFactor = depthwiseConv2Doptions.DilationHFactor() + + # DepthwiseConv2DOptionsT + def Pack(self, builder): + DepthwiseConv2DOptionsStart(builder) + DepthwiseConv2DOptionsAddPadding(builder, self.padding) + DepthwiseConv2DOptionsAddStrideW(builder, self.strideW) + DepthwiseConv2DOptionsAddStrideH(builder, self.strideH) + DepthwiseConv2DOptionsAddDepthMultiplier(builder, self.depthMultiplier) + DepthwiseConv2DOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) + DepthwiseConv2DOptionsAddDilationWFactor(builder, self.dilationWFactor) + DepthwiseConv2DOptionsAddDilationHFactor(builder, self.dilationHFactor) + depthwiseConv2Doptions = DepthwiseConv2DOptionsEnd(builder) + return depthwiseConv2Doptions + + +class ConcatEmbeddingsOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ConcatEmbeddingsOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsConcatEmbeddingsOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def ConcatEmbeddingsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ConcatEmbeddingsOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ConcatEmbeddingsOptions + def NumChannels(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # ConcatEmbeddingsOptions + def NumColumnsPerChannel(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # ConcatEmbeddingsOptions + def NumColumnsPerChannelAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # ConcatEmbeddingsOptions + def NumColumnsPerChannelLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # ConcatEmbeddingsOptions + def NumColumnsPerChannelIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # ConcatEmbeddingsOptions + def EmbeddingDimPerChannel(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # ConcatEmbeddingsOptions + def EmbeddingDimPerChannelAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # ConcatEmbeddingsOptions + def EmbeddingDimPerChannelLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # ConcatEmbeddingsOptions + def EmbeddingDimPerChannelIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + +def ConcatEmbeddingsOptionsStart(builder): + builder.StartObject(3) + +def ConcatEmbeddingsOptionsAddNumChannels(builder, numChannels): + builder.PrependInt32Slot(0, numChannels, 0) + +def ConcatEmbeddingsOptionsAddNumColumnsPerChannel(builder, numColumnsPerChannel): + builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(numColumnsPerChannel), 0) + +def ConcatEmbeddingsOptionsStartNumColumnsPerChannelVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def ConcatEmbeddingsOptionsAddEmbeddingDimPerChannel(builder, embeddingDimPerChannel): + builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(embeddingDimPerChannel), 0) + +def ConcatEmbeddingsOptionsStartEmbeddingDimPerChannelVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def ConcatEmbeddingsOptionsEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class ConcatEmbeddingsOptionsT(object): + + # ConcatEmbeddingsOptionsT + def __init__(self): + self.numChannels = 0 # type: int + self.numColumnsPerChannel = None # type: List[int] + self.embeddingDimPerChannel = None # type: List[int] + + @classmethod + def InitFromBuf(cls, buf, pos): + concatEmbeddingsOptions = ConcatEmbeddingsOptions() + concatEmbeddingsOptions.Init(buf, pos) + return cls.InitFromObj(concatEmbeddingsOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, concatEmbeddingsOptions): + x = ConcatEmbeddingsOptionsT() + x._UnPack(concatEmbeddingsOptions) + return x + + # ConcatEmbeddingsOptionsT + def _UnPack(self, concatEmbeddingsOptions): + if concatEmbeddingsOptions is None: + return + self.numChannels = concatEmbeddingsOptions.NumChannels() + if not concatEmbeddingsOptions.NumColumnsPerChannelIsNone(): + if np is None: + self.numColumnsPerChannel = [] + for i in range(concatEmbeddingsOptions.NumColumnsPerChannelLength()): + self.numColumnsPerChannel.append(concatEmbeddingsOptions.NumColumnsPerChannel(i)) + else: + self.numColumnsPerChannel = concatEmbeddingsOptions.NumColumnsPerChannelAsNumpy() + if not concatEmbeddingsOptions.EmbeddingDimPerChannelIsNone(): + if np is None: + self.embeddingDimPerChannel = [] + for i in range(concatEmbeddingsOptions.EmbeddingDimPerChannelLength()): + self.embeddingDimPerChannel.append(concatEmbeddingsOptions.EmbeddingDimPerChannel(i)) + else: + self.embeddingDimPerChannel = concatEmbeddingsOptions.EmbeddingDimPerChannelAsNumpy() + + # ConcatEmbeddingsOptionsT + def Pack(self, builder): + if self.numColumnsPerChannel is not None: + if np is not None and type(self.numColumnsPerChannel) is np.ndarray: + numColumnsPerChannel = builder.CreateNumpyVector(self.numColumnsPerChannel) + else: + ConcatEmbeddingsOptionsStartNumColumnsPerChannelVector(builder, len(self.numColumnsPerChannel)) + for i in reversed(range(len(self.numColumnsPerChannel))): + builder.PrependInt32(self.numColumnsPerChannel[i]) + numColumnsPerChannel = builder.EndVector() + if self.embeddingDimPerChannel is not None: + if np is not None and type(self.embeddingDimPerChannel) is np.ndarray: + embeddingDimPerChannel = builder.CreateNumpyVector(self.embeddingDimPerChannel) + else: + ConcatEmbeddingsOptionsStartEmbeddingDimPerChannelVector(builder, len(self.embeddingDimPerChannel)) + for i in reversed(range(len(self.embeddingDimPerChannel))): + builder.PrependInt32(self.embeddingDimPerChannel[i]) + embeddingDimPerChannel = builder.EndVector() + ConcatEmbeddingsOptionsStart(builder) + ConcatEmbeddingsOptionsAddNumChannels(builder, self.numChannels) + if self.numColumnsPerChannel is not None: + ConcatEmbeddingsOptionsAddNumColumnsPerChannel(builder, numColumnsPerChannel) + if self.embeddingDimPerChannel is not None: + ConcatEmbeddingsOptionsAddEmbeddingDimPerChannel(builder, embeddingDimPerChannel) + concatEmbeddingsOptions = ConcatEmbeddingsOptionsEnd(builder) + return concatEmbeddingsOptions + + +class LSHProjectionOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = LSHProjectionOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsLSHProjectionOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def LSHProjectionOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # LSHProjectionOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # LSHProjectionOptions + def Type(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + +def LSHProjectionOptionsStart(builder): + builder.StartObject(1) + +def LSHProjectionOptionsAddType(builder, type): + builder.PrependInt8Slot(0, type, 0) + +def LSHProjectionOptionsEnd(builder): + return builder.EndObject() + + + +class LSHProjectionOptionsT(object): + + # LSHProjectionOptionsT + def __init__(self): + self.type = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + lshprojectionOptions = LSHProjectionOptions() + lshprojectionOptions.Init(buf, pos) + return cls.InitFromObj(lshprojectionOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, lshprojectionOptions): + x = LSHProjectionOptionsT() + x._UnPack(lshprojectionOptions) + return x + + # LSHProjectionOptionsT + def _UnPack(self, lshprojectionOptions): + if lshprojectionOptions is None: + return + self.type = lshprojectionOptions.Type() + + # LSHProjectionOptionsT + def Pack(self, builder): + LSHProjectionOptionsStart(builder) + LSHProjectionOptionsAddType(builder, self.type) + lshprojectionOptions = LSHProjectionOptionsEnd(builder) + return lshprojectionOptions + + +class SVDFOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SVDFOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSVDFOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def SVDFOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SVDFOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SVDFOptions + def Rank(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # SVDFOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # SVDFOptions + def AsymmetricQuantizeInputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def SVDFOptionsStart(builder): + builder.StartObject(3) + +def SVDFOptionsAddRank(builder, rank): + builder.PrependInt32Slot(0, rank, 0) + +def SVDFOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(1, fusedActivationFunction, 0) + +def SVDFOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + builder.PrependBoolSlot(2, asymmetricQuantizeInputs, 0) + +def SVDFOptionsEnd(builder): + return builder.EndObject() + + + +class SVDFOptionsT(object): + + # SVDFOptionsT + def __init__(self): + self.rank = 0 # type: int + self.fusedActivationFunction = 0 # type: int + self.asymmetricQuantizeInputs = False # type: bool + + @classmethod + def InitFromBuf(cls, buf, pos): + svdfoptions = SVDFOptions() + svdfoptions.Init(buf, pos) + return cls.InitFromObj(svdfoptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, svdfoptions): + x = SVDFOptionsT() + x._UnPack(svdfoptions) + return x + + # SVDFOptionsT + def _UnPack(self, svdfoptions): + if svdfoptions is None: + return + self.rank = svdfoptions.Rank() + self.fusedActivationFunction = svdfoptions.FusedActivationFunction() + self.asymmetricQuantizeInputs = svdfoptions.AsymmetricQuantizeInputs() + + # SVDFOptionsT + def Pack(self, builder): + SVDFOptionsStart(builder) + SVDFOptionsAddRank(builder, self.rank) + SVDFOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) + SVDFOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs) + svdfoptions = SVDFOptionsEnd(builder) + return svdfoptions + + +class RNNOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = RNNOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsRNNOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def RNNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # RNNOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # RNNOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # RNNOptions + def AsymmetricQuantizeInputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def RNNOptionsStart(builder): + builder.StartObject(2) + +def RNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(0, fusedActivationFunction, 0) + +def RNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + builder.PrependBoolSlot(1, asymmetricQuantizeInputs, 0) + +def RNNOptionsEnd(builder): + return builder.EndObject() + + + +class RNNOptionsT(object): + + # RNNOptionsT + def __init__(self): + self.fusedActivationFunction = 0 # type: int + self.asymmetricQuantizeInputs = False # type: bool + + @classmethod + def InitFromBuf(cls, buf, pos): + rnnoptions = RNNOptions() + rnnoptions.Init(buf, pos) + return cls.InitFromObj(rnnoptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, rnnoptions): + x = RNNOptionsT() + x._UnPack(rnnoptions) + return x + + # RNNOptionsT + def _UnPack(self, rnnoptions): + if rnnoptions is None: + return + self.fusedActivationFunction = rnnoptions.FusedActivationFunction() + self.asymmetricQuantizeInputs = rnnoptions.AsymmetricQuantizeInputs() + + # RNNOptionsT + def Pack(self, builder): + RNNOptionsStart(builder) + RNNOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) + RNNOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs) + rnnoptions = RNNOptionsEnd(builder) + return rnnoptions + + +class SequenceRNNOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SequenceRNNOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSequenceRNNOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def SequenceRNNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SequenceRNNOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SequenceRNNOptions + def TimeMajor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # SequenceRNNOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # SequenceRNNOptions + def AsymmetricQuantizeInputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def SequenceRNNOptionsStart(builder): + builder.StartObject(3) + +def SequenceRNNOptionsAddTimeMajor(builder, timeMajor): + builder.PrependBoolSlot(0, timeMajor, 0) + +def SequenceRNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(1, fusedActivationFunction, 0) + +def SequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + builder.PrependBoolSlot(2, asymmetricQuantizeInputs, 0) + +def SequenceRNNOptionsEnd(builder): + return builder.EndObject() + + + +class SequenceRNNOptionsT(object): + + # SequenceRNNOptionsT + def __init__(self): + self.timeMajor = False # type: bool + self.fusedActivationFunction = 0 # type: int + self.asymmetricQuantizeInputs = False # type: bool + + @classmethod + def InitFromBuf(cls, buf, pos): + sequenceRnnoptions = SequenceRNNOptions() + sequenceRnnoptions.Init(buf, pos) + return cls.InitFromObj(sequenceRnnoptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, sequenceRnnoptions): + x = SequenceRNNOptionsT() + x._UnPack(sequenceRnnoptions) + return x + + # SequenceRNNOptionsT + def _UnPack(self, sequenceRnnoptions): + if sequenceRnnoptions is None: + return + self.timeMajor = sequenceRnnoptions.TimeMajor() + self.fusedActivationFunction = sequenceRnnoptions.FusedActivationFunction() + self.asymmetricQuantizeInputs = sequenceRnnoptions.AsymmetricQuantizeInputs() + + # SequenceRNNOptionsT + def Pack(self, builder): + SequenceRNNOptionsStart(builder) + SequenceRNNOptionsAddTimeMajor(builder, self.timeMajor) + SequenceRNNOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) + SequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs) + sequenceRnnoptions = SequenceRNNOptionsEnd(builder) + return sequenceRnnoptions + + +class BidirectionalSequenceRNNOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = BidirectionalSequenceRNNOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsBidirectionalSequenceRNNOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def BidirectionalSequenceRNNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # BidirectionalSequenceRNNOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # BidirectionalSequenceRNNOptions + def TimeMajor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # BidirectionalSequenceRNNOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # BidirectionalSequenceRNNOptions + def MergeOutputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # BidirectionalSequenceRNNOptions + def AsymmetricQuantizeInputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def BidirectionalSequenceRNNOptionsStart(builder): + builder.StartObject(4) + +def BidirectionalSequenceRNNOptionsAddTimeMajor(builder, timeMajor): + builder.PrependBoolSlot(0, timeMajor, 0) + +def BidirectionalSequenceRNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(1, fusedActivationFunction, 0) + +def BidirectionalSequenceRNNOptionsAddMergeOutputs(builder, mergeOutputs): + builder.PrependBoolSlot(2, mergeOutputs, 0) + +def BidirectionalSequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + builder.PrependBoolSlot(3, asymmetricQuantizeInputs, 0) + +def BidirectionalSequenceRNNOptionsEnd(builder): + return builder.EndObject() + + + +class BidirectionalSequenceRNNOptionsT(object): + + # BidirectionalSequenceRNNOptionsT + def __init__(self): + self.timeMajor = False # type: bool + self.fusedActivationFunction = 0 # type: int + self.mergeOutputs = False # type: bool + self.asymmetricQuantizeInputs = False # type: bool + + @classmethod + def InitFromBuf(cls, buf, pos): + bidirectionalSequenceRnnoptions = BidirectionalSequenceRNNOptions() + bidirectionalSequenceRnnoptions.Init(buf, pos) + return cls.InitFromObj(bidirectionalSequenceRnnoptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, bidirectionalSequenceRnnoptions): + x = BidirectionalSequenceRNNOptionsT() + x._UnPack(bidirectionalSequenceRnnoptions) + return x + + # BidirectionalSequenceRNNOptionsT + def _UnPack(self, bidirectionalSequenceRnnoptions): + if bidirectionalSequenceRnnoptions is None: + return + self.timeMajor = bidirectionalSequenceRnnoptions.TimeMajor() + self.fusedActivationFunction = bidirectionalSequenceRnnoptions.FusedActivationFunction() + self.mergeOutputs = bidirectionalSequenceRnnoptions.MergeOutputs() + self.asymmetricQuantizeInputs = bidirectionalSequenceRnnoptions.AsymmetricQuantizeInputs() + + # BidirectionalSequenceRNNOptionsT + def Pack(self, builder): + BidirectionalSequenceRNNOptionsStart(builder) + BidirectionalSequenceRNNOptionsAddTimeMajor(builder, self.timeMajor) + BidirectionalSequenceRNNOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) + BidirectionalSequenceRNNOptionsAddMergeOutputs(builder, self.mergeOutputs) + BidirectionalSequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs) + bidirectionalSequenceRnnoptions = BidirectionalSequenceRNNOptionsEnd(builder) + return bidirectionalSequenceRnnoptions + + +class FullyConnectedOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = FullyConnectedOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsFullyConnectedOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def FullyConnectedOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # FullyConnectedOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # FullyConnectedOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # FullyConnectedOptions + def WeightsFormat(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # FullyConnectedOptions + def KeepNumDims(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # FullyConnectedOptions + def AsymmetricQuantizeInputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # FullyConnectedOptions + def QuantizedBiasType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + +def FullyConnectedOptionsStart(builder): + builder.StartObject(5) + +def FullyConnectedOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(0, fusedActivationFunction, 0) + +def FullyConnectedOptionsAddWeightsFormat(builder, weightsFormat): + builder.PrependInt8Slot(1, weightsFormat, 0) + +def FullyConnectedOptionsAddKeepNumDims(builder, keepNumDims): + builder.PrependBoolSlot(2, keepNumDims, 0) + +def FullyConnectedOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + builder.PrependBoolSlot(3, asymmetricQuantizeInputs, 0) + +def FullyConnectedOptionsAddQuantizedBiasType(builder, quantizedBiasType): + builder.PrependInt8Slot(4, quantizedBiasType, 0) + +def FullyConnectedOptionsEnd(builder): + return builder.EndObject() + + + +class FullyConnectedOptionsT(object): + + # FullyConnectedOptionsT + def __init__(self): + self.fusedActivationFunction = 0 # type: int + self.weightsFormat = 0 # type: int + self.keepNumDims = False # type: bool + self.asymmetricQuantizeInputs = False # type: bool + self.quantizedBiasType = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + fullyConnectedOptions = FullyConnectedOptions() + fullyConnectedOptions.Init(buf, pos) + return cls.InitFromObj(fullyConnectedOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, fullyConnectedOptions): + x = FullyConnectedOptionsT() + x._UnPack(fullyConnectedOptions) + return x + + # FullyConnectedOptionsT + def _UnPack(self, fullyConnectedOptions): + if fullyConnectedOptions is None: + return + self.fusedActivationFunction = fullyConnectedOptions.FusedActivationFunction() + self.weightsFormat = fullyConnectedOptions.WeightsFormat() + self.keepNumDims = fullyConnectedOptions.KeepNumDims() + self.asymmetricQuantizeInputs = fullyConnectedOptions.AsymmetricQuantizeInputs() + self.quantizedBiasType = fullyConnectedOptions.QuantizedBiasType() + + # FullyConnectedOptionsT + def Pack(self, builder): + FullyConnectedOptionsStart(builder) + FullyConnectedOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) + FullyConnectedOptionsAddWeightsFormat(builder, self.weightsFormat) + FullyConnectedOptionsAddKeepNumDims(builder, self.keepNumDims) + FullyConnectedOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs) + FullyConnectedOptionsAddQuantizedBiasType(builder, self.quantizedBiasType) + fullyConnectedOptions = FullyConnectedOptionsEnd(builder) + return fullyConnectedOptions + + +class SoftmaxOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SoftmaxOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSoftmaxOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def SoftmaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SoftmaxOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SoftmaxOptions + def Beta(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + +def SoftmaxOptionsStart(builder): + builder.StartObject(1) + +def SoftmaxOptionsAddBeta(builder, beta): + builder.PrependFloat32Slot(0, beta, 0.0) + +def SoftmaxOptionsEnd(builder): + return builder.EndObject() + + + +class SoftmaxOptionsT(object): + + # SoftmaxOptionsT + def __init__(self): + self.beta = 0.0 # type: float + + @classmethod + def InitFromBuf(cls, buf, pos): + softmaxOptions = SoftmaxOptions() + softmaxOptions.Init(buf, pos) + return cls.InitFromObj(softmaxOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, softmaxOptions): + x = SoftmaxOptionsT() + x._UnPack(softmaxOptions) + return x + + # SoftmaxOptionsT + def _UnPack(self, softmaxOptions): + if softmaxOptions is None: + return + self.beta = softmaxOptions.Beta() + + # SoftmaxOptionsT + def Pack(self, builder): + SoftmaxOptionsStart(builder) + SoftmaxOptionsAddBeta(builder, self.beta) + softmaxOptions = SoftmaxOptionsEnd(builder) + return softmaxOptions + + +class ConcatenationOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ConcatenationOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsConcatenationOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def ConcatenationOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ConcatenationOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ConcatenationOptions + def Axis(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # ConcatenationOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + +def ConcatenationOptionsStart(builder): + builder.StartObject(2) + +def ConcatenationOptionsAddAxis(builder, axis): + builder.PrependInt32Slot(0, axis, 0) + +def ConcatenationOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(1, fusedActivationFunction, 0) + +def ConcatenationOptionsEnd(builder): + return builder.EndObject() + + + +class ConcatenationOptionsT(object): + + # ConcatenationOptionsT + def __init__(self): + self.axis = 0 # type: int + self.fusedActivationFunction = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + concatenationOptions = ConcatenationOptions() + concatenationOptions.Init(buf, pos) + return cls.InitFromObj(concatenationOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, concatenationOptions): + x = ConcatenationOptionsT() + x._UnPack(concatenationOptions) + return x + + # ConcatenationOptionsT + def _UnPack(self, concatenationOptions): + if concatenationOptions is None: + return + self.axis = concatenationOptions.Axis() + self.fusedActivationFunction = concatenationOptions.FusedActivationFunction() + + # ConcatenationOptionsT + def Pack(self, builder): + ConcatenationOptionsStart(builder) + ConcatenationOptionsAddAxis(builder, self.axis) + ConcatenationOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) + concatenationOptions = ConcatenationOptionsEnd(builder) + return concatenationOptions + + +class AddOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = AddOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsAddOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def AddOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # AddOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # AddOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # AddOptions + def PotScaleInt16(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return True + +def AddOptionsStart(builder): + builder.StartObject(2) + +def AddOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(0, fusedActivationFunction, 0) + +def AddOptionsAddPotScaleInt16(builder, potScaleInt16): + builder.PrependBoolSlot(1, potScaleInt16, 1) + +def AddOptionsEnd(builder): + return builder.EndObject() + + + +class AddOptionsT(object): + + # AddOptionsT + def __init__(self): + self.fusedActivationFunction = 0 # type: int + self.potScaleInt16 = True # type: bool + + @classmethod + def InitFromBuf(cls, buf, pos): + addOptions = AddOptions() + addOptions.Init(buf, pos) + return cls.InitFromObj(addOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, addOptions): + x = AddOptionsT() + x._UnPack(addOptions) + return x + + # AddOptionsT + def _UnPack(self, addOptions): + if addOptions is None: + return + self.fusedActivationFunction = addOptions.FusedActivationFunction() + self.potScaleInt16 = addOptions.PotScaleInt16() + + # AddOptionsT + def Pack(self, builder): + AddOptionsStart(builder) + AddOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) + AddOptionsAddPotScaleInt16(builder, self.potScaleInt16) + addOptions = AddOptionsEnd(builder) + return addOptions + + +class MulOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = MulOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsMulOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def MulOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # MulOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # MulOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + +def MulOptionsStart(builder): + builder.StartObject(1) + +def MulOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(0, fusedActivationFunction, 0) + +def MulOptionsEnd(builder): + return builder.EndObject() + + + +class MulOptionsT(object): + + # MulOptionsT + def __init__(self): + self.fusedActivationFunction = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + mulOptions = MulOptions() + mulOptions.Init(buf, pos) + return cls.InitFromObj(mulOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, mulOptions): + x = MulOptionsT() + x._UnPack(mulOptions) + return x + + # MulOptionsT + def _UnPack(self, mulOptions): + if mulOptions is None: + return + self.fusedActivationFunction = mulOptions.FusedActivationFunction() + + # MulOptionsT + def Pack(self, builder): + MulOptionsStart(builder) + MulOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) + mulOptions = MulOptionsEnd(builder) + return mulOptions + + +class L2NormOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = L2NormOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsL2NormOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def L2NormOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # L2NormOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # L2NormOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + +def L2NormOptionsStart(builder): + builder.StartObject(1) + +def L2NormOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(0, fusedActivationFunction, 0) + +def L2NormOptionsEnd(builder): + return builder.EndObject() + + + +class L2NormOptionsT(object): + + # L2NormOptionsT + def __init__(self): + self.fusedActivationFunction = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + l2NormOptions = L2NormOptions() + l2NormOptions.Init(buf, pos) + return cls.InitFromObj(l2NormOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, l2NormOptions): + x = L2NormOptionsT() + x._UnPack(l2NormOptions) + return x + + # L2NormOptionsT + def _UnPack(self, l2NormOptions): + if l2NormOptions is None: + return + self.fusedActivationFunction = l2NormOptions.FusedActivationFunction() + + # L2NormOptionsT + def Pack(self, builder): + L2NormOptionsStart(builder) + L2NormOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) + l2NormOptions = L2NormOptionsEnd(builder) + return l2NormOptions + + +class LocalResponseNormalizationOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = LocalResponseNormalizationOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsLocalResponseNormalizationOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def LocalResponseNormalizationOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # LocalResponseNormalizationOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # LocalResponseNormalizationOptions + def Radius(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # LocalResponseNormalizationOptions + def Bias(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + + # LocalResponseNormalizationOptions + def Alpha(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + + # LocalResponseNormalizationOptions + def Beta(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + +def LocalResponseNormalizationOptionsStart(builder): + builder.StartObject(4) + +def LocalResponseNormalizationOptionsAddRadius(builder, radius): + builder.PrependInt32Slot(0, radius, 0) + +def LocalResponseNormalizationOptionsAddBias(builder, bias): + builder.PrependFloat32Slot(1, bias, 0.0) + +def LocalResponseNormalizationOptionsAddAlpha(builder, alpha): + builder.PrependFloat32Slot(2, alpha, 0.0) + +def LocalResponseNormalizationOptionsAddBeta(builder, beta): + builder.PrependFloat32Slot(3, beta, 0.0) + +def LocalResponseNormalizationOptionsEnd(builder): + return builder.EndObject() + + + +class LocalResponseNormalizationOptionsT(object): + + # LocalResponseNormalizationOptionsT + def __init__(self): + self.radius = 0 # type: int + self.bias = 0.0 # type: float + self.alpha = 0.0 # type: float + self.beta = 0.0 # type: float + + @classmethod + def InitFromBuf(cls, buf, pos): + localResponseNormalizationOptions = LocalResponseNormalizationOptions() + localResponseNormalizationOptions.Init(buf, pos) + return cls.InitFromObj(localResponseNormalizationOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, localResponseNormalizationOptions): + x = LocalResponseNormalizationOptionsT() + x._UnPack(localResponseNormalizationOptions) + return x + + # LocalResponseNormalizationOptionsT + def _UnPack(self, localResponseNormalizationOptions): + if localResponseNormalizationOptions is None: + return + self.radius = localResponseNormalizationOptions.Radius() + self.bias = localResponseNormalizationOptions.Bias() + self.alpha = localResponseNormalizationOptions.Alpha() + self.beta = localResponseNormalizationOptions.Beta() + + # LocalResponseNormalizationOptionsT + def Pack(self, builder): + LocalResponseNormalizationOptionsStart(builder) + LocalResponseNormalizationOptionsAddRadius(builder, self.radius) + LocalResponseNormalizationOptionsAddBias(builder, self.bias) + LocalResponseNormalizationOptionsAddAlpha(builder, self.alpha) + LocalResponseNormalizationOptionsAddBeta(builder, self.beta) + localResponseNormalizationOptions = LocalResponseNormalizationOptionsEnd(builder) + return localResponseNormalizationOptions + + +class LSTMOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = LSTMOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsLSTMOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def LSTMOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # LSTMOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # LSTMOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # LSTMOptions + def CellClip(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + + # LSTMOptions + def ProjClip(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + + # LSTMOptions + def KernelType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # LSTMOptions + def AsymmetricQuantizeInputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def LSTMOptionsStart(builder): + builder.StartObject(5) + +def LSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(0, fusedActivationFunction, 0) + +def LSTMOptionsAddCellClip(builder, cellClip): + builder.PrependFloat32Slot(1, cellClip, 0.0) + +def LSTMOptionsAddProjClip(builder, projClip): + builder.PrependFloat32Slot(2, projClip, 0.0) + +def LSTMOptionsAddKernelType(builder, kernelType): + builder.PrependInt8Slot(3, kernelType, 0) + +def LSTMOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + builder.PrependBoolSlot(4, asymmetricQuantizeInputs, 0) + +def LSTMOptionsEnd(builder): + return builder.EndObject() + + + +class LSTMOptionsT(object): + + # LSTMOptionsT + def __init__(self): + self.fusedActivationFunction = 0 # type: int + self.cellClip = 0.0 # type: float + self.projClip = 0.0 # type: float + self.kernelType = 0 # type: int + self.asymmetricQuantizeInputs = False # type: bool + + @classmethod + def InitFromBuf(cls, buf, pos): + lstmoptions = LSTMOptions() + lstmoptions.Init(buf, pos) + return cls.InitFromObj(lstmoptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, lstmoptions): + x = LSTMOptionsT() + x._UnPack(lstmoptions) + return x + + # LSTMOptionsT + def _UnPack(self, lstmoptions): + if lstmoptions is None: + return + self.fusedActivationFunction = lstmoptions.FusedActivationFunction() + self.cellClip = lstmoptions.CellClip() + self.projClip = lstmoptions.ProjClip() + self.kernelType = lstmoptions.KernelType() + self.asymmetricQuantizeInputs = lstmoptions.AsymmetricQuantizeInputs() + + # LSTMOptionsT + def Pack(self, builder): + LSTMOptionsStart(builder) + LSTMOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) + LSTMOptionsAddCellClip(builder, self.cellClip) + LSTMOptionsAddProjClip(builder, self.projClip) + LSTMOptionsAddKernelType(builder, self.kernelType) + LSTMOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs) + lstmoptions = LSTMOptionsEnd(builder) + return lstmoptions + + +class UnidirectionalSequenceLSTMOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = UnidirectionalSequenceLSTMOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsUnidirectionalSequenceLSTMOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def UnidirectionalSequenceLSTMOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # UnidirectionalSequenceLSTMOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # UnidirectionalSequenceLSTMOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # UnidirectionalSequenceLSTMOptions + def CellClip(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + + # UnidirectionalSequenceLSTMOptions + def ProjClip(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + + # UnidirectionalSequenceLSTMOptions + def TimeMajor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # UnidirectionalSequenceLSTMOptions + def AsymmetricQuantizeInputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # UnidirectionalSequenceLSTMOptions + def DiagonalRecurrentTensors(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def UnidirectionalSequenceLSTMOptionsStart(builder): + builder.StartObject(6) + +def UnidirectionalSequenceLSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(0, fusedActivationFunction, 0) + +def UnidirectionalSequenceLSTMOptionsAddCellClip(builder, cellClip): + builder.PrependFloat32Slot(1, cellClip, 0.0) + +def UnidirectionalSequenceLSTMOptionsAddProjClip(builder, projClip): + builder.PrependFloat32Slot(2, projClip, 0.0) + +def UnidirectionalSequenceLSTMOptionsAddTimeMajor(builder, timeMajor): + builder.PrependBoolSlot(3, timeMajor, 0) + +def UnidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + builder.PrependBoolSlot(4, asymmetricQuantizeInputs, 0) + +def UnidirectionalSequenceLSTMOptionsAddDiagonalRecurrentTensors(builder, diagonalRecurrentTensors): + builder.PrependBoolSlot(5, diagonalRecurrentTensors, 0) + +def UnidirectionalSequenceLSTMOptionsEnd(builder): + return builder.EndObject() + + + +class UnidirectionalSequenceLSTMOptionsT(object): + + # UnidirectionalSequenceLSTMOptionsT + def __init__(self): + self.fusedActivationFunction = 0 # type: int + self.cellClip = 0.0 # type: float + self.projClip = 0.0 # type: float + self.timeMajor = False # type: bool + self.asymmetricQuantizeInputs = False # type: bool + self.diagonalRecurrentTensors = False # type: bool + + @classmethod + def InitFromBuf(cls, buf, pos): + unidirectionalSequenceLstmoptions = UnidirectionalSequenceLSTMOptions() + unidirectionalSequenceLstmoptions.Init(buf, pos) + return cls.InitFromObj(unidirectionalSequenceLstmoptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, unidirectionalSequenceLstmoptions): + x = UnidirectionalSequenceLSTMOptionsT() + x._UnPack(unidirectionalSequenceLstmoptions) + return x + + # UnidirectionalSequenceLSTMOptionsT + def _UnPack(self, unidirectionalSequenceLstmoptions): + if unidirectionalSequenceLstmoptions is None: + return + self.fusedActivationFunction = unidirectionalSequenceLstmoptions.FusedActivationFunction() + self.cellClip = unidirectionalSequenceLstmoptions.CellClip() + self.projClip = unidirectionalSequenceLstmoptions.ProjClip() + self.timeMajor = unidirectionalSequenceLstmoptions.TimeMajor() + self.asymmetricQuantizeInputs = unidirectionalSequenceLstmoptions.AsymmetricQuantizeInputs() + self.diagonalRecurrentTensors = unidirectionalSequenceLstmoptions.DiagonalRecurrentTensors() + + # UnidirectionalSequenceLSTMOptionsT + def Pack(self, builder): + UnidirectionalSequenceLSTMOptionsStart(builder) + UnidirectionalSequenceLSTMOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) + UnidirectionalSequenceLSTMOptionsAddCellClip(builder, self.cellClip) + UnidirectionalSequenceLSTMOptionsAddProjClip(builder, self.projClip) + UnidirectionalSequenceLSTMOptionsAddTimeMajor(builder, self.timeMajor) + UnidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs) + UnidirectionalSequenceLSTMOptionsAddDiagonalRecurrentTensors(builder, self.diagonalRecurrentTensors) + unidirectionalSequenceLstmoptions = UnidirectionalSequenceLSTMOptionsEnd(builder) + return unidirectionalSequenceLstmoptions + + +class BidirectionalSequenceLSTMOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = BidirectionalSequenceLSTMOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsBidirectionalSequenceLSTMOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def BidirectionalSequenceLSTMOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # BidirectionalSequenceLSTMOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # BidirectionalSequenceLSTMOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # BidirectionalSequenceLSTMOptions + def CellClip(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + + # BidirectionalSequenceLSTMOptions + def ProjClip(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + + # BidirectionalSequenceLSTMOptions + def MergeOutputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # BidirectionalSequenceLSTMOptions + def TimeMajor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return True + + # BidirectionalSequenceLSTMOptions + def AsymmetricQuantizeInputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def BidirectionalSequenceLSTMOptionsStart(builder): + builder.StartObject(6) + +def BidirectionalSequenceLSTMOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(0, fusedActivationFunction, 0) + +def BidirectionalSequenceLSTMOptionsAddCellClip(builder, cellClip): + builder.PrependFloat32Slot(1, cellClip, 0.0) + +def BidirectionalSequenceLSTMOptionsAddProjClip(builder, projClip): + builder.PrependFloat32Slot(2, projClip, 0.0) + +def BidirectionalSequenceLSTMOptionsAddMergeOutputs(builder, mergeOutputs): + builder.PrependBoolSlot(3, mergeOutputs, 0) + +def BidirectionalSequenceLSTMOptionsAddTimeMajor(builder, timeMajor): + builder.PrependBoolSlot(4, timeMajor, 1) + +def BidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + builder.PrependBoolSlot(5, asymmetricQuantizeInputs, 0) + +def BidirectionalSequenceLSTMOptionsEnd(builder): + return builder.EndObject() + + + +class BidirectionalSequenceLSTMOptionsT(object): + + # BidirectionalSequenceLSTMOptionsT + def __init__(self): + self.fusedActivationFunction = 0 # type: int + self.cellClip = 0.0 # type: float + self.projClip = 0.0 # type: float + self.mergeOutputs = False # type: bool + self.timeMajor = True # type: bool + self.asymmetricQuantizeInputs = False # type: bool + + @classmethod + def InitFromBuf(cls, buf, pos): + bidirectionalSequenceLstmoptions = BidirectionalSequenceLSTMOptions() + bidirectionalSequenceLstmoptions.Init(buf, pos) + return cls.InitFromObj(bidirectionalSequenceLstmoptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, bidirectionalSequenceLstmoptions): + x = BidirectionalSequenceLSTMOptionsT() + x._UnPack(bidirectionalSequenceLstmoptions) + return x + + # BidirectionalSequenceLSTMOptionsT + def _UnPack(self, bidirectionalSequenceLstmoptions): + if bidirectionalSequenceLstmoptions is None: + return + self.fusedActivationFunction = bidirectionalSequenceLstmoptions.FusedActivationFunction() + self.cellClip = bidirectionalSequenceLstmoptions.CellClip() + self.projClip = bidirectionalSequenceLstmoptions.ProjClip() + self.mergeOutputs = bidirectionalSequenceLstmoptions.MergeOutputs() + self.timeMajor = bidirectionalSequenceLstmoptions.TimeMajor() + self.asymmetricQuantizeInputs = bidirectionalSequenceLstmoptions.AsymmetricQuantizeInputs() + + # BidirectionalSequenceLSTMOptionsT + def Pack(self, builder): + BidirectionalSequenceLSTMOptionsStart(builder) + BidirectionalSequenceLSTMOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) + BidirectionalSequenceLSTMOptionsAddCellClip(builder, self.cellClip) + BidirectionalSequenceLSTMOptionsAddProjClip(builder, self.projClip) + BidirectionalSequenceLSTMOptionsAddMergeOutputs(builder, self.mergeOutputs) + BidirectionalSequenceLSTMOptionsAddTimeMajor(builder, self.timeMajor) + BidirectionalSequenceLSTMOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs) + bidirectionalSequenceLstmoptions = BidirectionalSequenceLSTMOptionsEnd(builder) + return bidirectionalSequenceLstmoptions + + +class ResizeBilinearOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ResizeBilinearOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsResizeBilinearOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def ResizeBilinearOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ResizeBilinearOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ResizeBilinearOptions + def AlignCorners(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # ResizeBilinearOptions + def HalfPixelCenters(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def ResizeBilinearOptionsStart(builder): + builder.StartObject(4) + +def ResizeBilinearOptionsAddAlignCorners(builder, alignCorners): + builder.PrependBoolSlot(2, alignCorners, 0) + +def ResizeBilinearOptionsAddHalfPixelCenters(builder, halfPixelCenters): + builder.PrependBoolSlot(3, halfPixelCenters, 0) + +def ResizeBilinearOptionsEnd(builder): + return builder.EndObject() + + + +class ResizeBilinearOptionsT(object): + + # ResizeBilinearOptionsT + def __init__(self): + self.alignCorners = False # type: bool + self.halfPixelCenters = False # type: bool + + @classmethod + def InitFromBuf(cls, buf, pos): + resizeBilinearOptions = ResizeBilinearOptions() + resizeBilinearOptions.Init(buf, pos) + return cls.InitFromObj(resizeBilinearOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, resizeBilinearOptions): + x = ResizeBilinearOptionsT() + x._UnPack(resizeBilinearOptions) + return x + + # ResizeBilinearOptionsT + def _UnPack(self, resizeBilinearOptions): + if resizeBilinearOptions is None: + return + self.alignCorners = resizeBilinearOptions.AlignCorners() + self.halfPixelCenters = resizeBilinearOptions.HalfPixelCenters() + + # ResizeBilinearOptionsT + def Pack(self, builder): + ResizeBilinearOptionsStart(builder) + ResizeBilinearOptionsAddAlignCorners(builder, self.alignCorners) + ResizeBilinearOptionsAddHalfPixelCenters(builder, self.halfPixelCenters) + resizeBilinearOptions = ResizeBilinearOptionsEnd(builder) + return resizeBilinearOptions + + +class ResizeNearestNeighborOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ResizeNearestNeighborOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsResizeNearestNeighborOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def ResizeNearestNeighborOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ResizeNearestNeighborOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ResizeNearestNeighborOptions + def AlignCorners(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # ResizeNearestNeighborOptions + def HalfPixelCenters(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def ResizeNearestNeighborOptionsStart(builder): + builder.StartObject(2) + +def ResizeNearestNeighborOptionsAddAlignCorners(builder, alignCorners): + builder.PrependBoolSlot(0, alignCorners, 0) + +def ResizeNearestNeighborOptionsAddHalfPixelCenters(builder, halfPixelCenters): + builder.PrependBoolSlot(1, halfPixelCenters, 0) + +def ResizeNearestNeighborOptionsEnd(builder): + return builder.EndObject() + + + +class ResizeNearestNeighborOptionsT(object): + + # ResizeNearestNeighborOptionsT + def __init__(self): + self.alignCorners = False # type: bool + self.halfPixelCenters = False # type: bool + + @classmethod + def InitFromBuf(cls, buf, pos): + resizeNearestNeighborOptions = ResizeNearestNeighborOptions() + resizeNearestNeighborOptions.Init(buf, pos) + return cls.InitFromObj(resizeNearestNeighborOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, resizeNearestNeighborOptions): + x = ResizeNearestNeighborOptionsT() + x._UnPack(resizeNearestNeighborOptions) + return x + + # ResizeNearestNeighborOptionsT + def _UnPack(self, resizeNearestNeighborOptions): + if resizeNearestNeighborOptions is None: + return + self.alignCorners = resizeNearestNeighborOptions.AlignCorners() + self.halfPixelCenters = resizeNearestNeighborOptions.HalfPixelCenters() + + # ResizeNearestNeighborOptionsT + def Pack(self, builder): + ResizeNearestNeighborOptionsStart(builder) + ResizeNearestNeighborOptionsAddAlignCorners(builder, self.alignCorners) + ResizeNearestNeighborOptionsAddHalfPixelCenters(builder, self.halfPixelCenters) + resizeNearestNeighborOptions = ResizeNearestNeighborOptionsEnd(builder) + return resizeNearestNeighborOptions + + +class CallOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = CallOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsCallOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def CallOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # CallOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # CallOptions + def Subgraph(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) + return 0 + +def CallOptionsStart(builder): + builder.StartObject(1) + +def CallOptionsAddSubgraph(builder, subgraph): + builder.PrependUint32Slot(0, subgraph, 0) + +def CallOptionsEnd(builder): + return builder.EndObject() + + + +class CallOptionsT(object): + + # CallOptionsT + def __init__(self): + self.subgraph = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + callOptions = CallOptions() + callOptions.Init(buf, pos) + return cls.InitFromObj(callOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, callOptions): + x = CallOptionsT() + x._UnPack(callOptions) + return x + + # CallOptionsT + def _UnPack(self, callOptions): + if callOptions is None: + return + self.subgraph = callOptions.Subgraph() + + # CallOptionsT + def Pack(self, builder): + CallOptionsStart(builder) + CallOptionsAddSubgraph(builder, self.subgraph) + callOptions = CallOptionsEnd(builder) + return callOptions + + +class PadOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = PadOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsPadOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def PadOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # PadOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def PadOptionsStart(builder): + builder.StartObject(0) + +def PadOptionsEnd(builder): + return builder.EndObject() + + + +class PadOptionsT(object): + + # PadOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + padOptions = PadOptions() + padOptions.Init(buf, pos) + return cls.InitFromObj(padOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, padOptions): + x = PadOptionsT() + x._UnPack(padOptions) + return x + + # PadOptionsT + def _UnPack(self, padOptions): + if padOptions is None: + return + + # PadOptionsT + def Pack(self, builder): + PadOptionsStart(builder) + padOptions = PadOptionsEnd(builder) + return padOptions + + +class PadV2Options(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = PadV2Options() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsPadV2Options(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def PadV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # PadV2Options + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def PadV2OptionsStart(builder): + builder.StartObject(0) + +def PadV2OptionsEnd(builder): + return builder.EndObject() + + + +class PadV2OptionsT(object): + + # PadV2OptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + padV2Options = PadV2Options() + padV2Options.Init(buf, pos) + return cls.InitFromObj(padV2Options) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, padV2Options): + x = PadV2OptionsT() + x._UnPack(padV2Options) + return x + + # PadV2OptionsT + def _UnPack(self, padV2Options): + if padV2Options is None: + return + + # PadV2OptionsT + def Pack(self, builder): + PadV2OptionsStart(builder) + padV2Options = PadV2OptionsEnd(builder) + return padV2Options + + +class ReshapeOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ReshapeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsReshapeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def ReshapeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ReshapeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ReshapeOptions + def NewShape(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # ReshapeOptions + def NewShapeAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # ReshapeOptions + def NewShapeLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # ReshapeOptions + def NewShapeIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + +def ReshapeOptionsStart(builder): + builder.StartObject(1) + +def ReshapeOptionsAddNewShape(builder, newShape): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(newShape), 0) + +def ReshapeOptionsStartNewShapeVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def ReshapeOptionsEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class ReshapeOptionsT(object): + + # ReshapeOptionsT + def __init__(self): + self.newShape = None # type: List[int] + + @classmethod + def InitFromBuf(cls, buf, pos): + reshapeOptions = ReshapeOptions() + reshapeOptions.Init(buf, pos) + return cls.InitFromObj(reshapeOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, reshapeOptions): + x = ReshapeOptionsT() + x._UnPack(reshapeOptions) + return x + + # ReshapeOptionsT + def _UnPack(self, reshapeOptions): + if reshapeOptions is None: + return + if not reshapeOptions.NewShapeIsNone(): + if np is None: + self.newShape = [] + for i in range(reshapeOptions.NewShapeLength()): + self.newShape.append(reshapeOptions.NewShape(i)) + else: + self.newShape = reshapeOptions.NewShapeAsNumpy() + + # ReshapeOptionsT + def Pack(self, builder): + if self.newShape is not None: + if np is not None and type(self.newShape) is np.ndarray: + newShape = builder.CreateNumpyVector(self.newShape) + else: + ReshapeOptionsStartNewShapeVector(builder, len(self.newShape)) + for i in reversed(range(len(self.newShape))): + builder.PrependInt32(self.newShape[i]) + newShape = builder.EndVector() + ReshapeOptionsStart(builder) + if self.newShape is not None: + ReshapeOptionsAddNewShape(builder, newShape) + reshapeOptions = ReshapeOptionsEnd(builder) + return reshapeOptions + + +class SpaceToBatchNDOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SpaceToBatchNDOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSpaceToBatchNDOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def SpaceToBatchNDOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SpaceToBatchNDOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def SpaceToBatchNDOptionsStart(builder): + builder.StartObject(0) + +def SpaceToBatchNDOptionsEnd(builder): + return builder.EndObject() + + + +class SpaceToBatchNDOptionsT(object): + + # SpaceToBatchNDOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + spaceToBatchNdoptions = SpaceToBatchNDOptions() + spaceToBatchNdoptions.Init(buf, pos) + return cls.InitFromObj(spaceToBatchNdoptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, spaceToBatchNdoptions): + x = SpaceToBatchNDOptionsT() + x._UnPack(spaceToBatchNdoptions) + return x + + # SpaceToBatchNDOptionsT + def _UnPack(self, spaceToBatchNdoptions): + if spaceToBatchNdoptions is None: + return + + # SpaceToBatchNDOptionsT + def Pack(self, builder): + SpaceToBatchNDOptionsStart(builder) + spaceToBatchNdoptions = SpaceToBatchNDOptionsEnd(builder) + return spaceToBatchNdoptions + + +class BatchToSpaceNDOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = BatchToSpaceNDOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsBatchToSpaceNDOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def BatchToSpaceNDOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # BatchToSpaceNDOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def BatchToSpaceNDOptionsStart(builder): + builder.StartObject(0) + +def BatchToSpaceNDOptionsEnd(builder): + return builder.EndObject() + + + +class BatchToSpaceNDOptionsT(object): + + # BatchToSpaceNDOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + batchToSpaceNdoptions = BatchToSpaceNDOptions() + batchToSpaceNdoptions.Init(buf, pos) + return cls.InitFromObj(batchToSpaceNdoptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, batchToSpaceNdoptions): + x = BatchToSpaceNDOptionsT() + x._UnPack(batchToSpaceNdoptions) + return x + + # BatchToSpaceNDOptionsT + def _UnPack(self, batchToSpaceNdoptions): + if batchToSpaceNdoptions is None: + return + + # BatchToSpaceNDOptionsT + def Pack(self, builder): + BatchToSpaceNDOptionsStart(builder) + batchToSpaceNdoptions = BatchToSpaceNDOptionsEnd(builder) + return batchToSpaceNdoptions + + +class SkipGramOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SkipGramOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSkipGramOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def SkipGramOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SkipGramOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SkipGramOptions + def NgramSize(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # SkipGramOptions + def MaxSkipSize(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # SkipGramOptions + def IncludeAllNgrams(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def SkipGramOptionsStart(builder): + builder.StartObject(3) + +def SkipGramOptionsAddNgramSize(builder, ngramSize): + builder.PrependInt32Slot(0, ngramSize, 0) + +def SkipGramOptionsAddMaxSkipSize(builder, maxSkipSize): + builder.PrependInt32Slot(1, maxSkipSize, 0) + +def SkipGramOptionsAddIncludeAllNgrams(builder, includeAllNgrams): + builder.PrependBoolSlot(2, includeAllNgrams, 0) + +def SkipGramOptionsEnd(builder): + return builder.EndObject() + + + +class SkipGramOptionsT(object): + + # SkipGramOptionsT + def __init__(self): + self.ngramSize = 0 # type: int + self.maxSkipSize = 0 # type: int + self.includeAllNgrams = False # type: bool + + @classmethod + def InitFromBuf(cls, buf, pos): + skipGramOptions = SkipGramOptions() + skipGramOptions.Init(buf, pos) + return cls.InitFromObj(skipGramOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, skipGramOptions): + x = SkipGramOptionsT() + x._UnPack(skipGramOptions) + return x + + # SkipGramOptionsT + def _UnPack(self, skipGramOptions): + if skipGramOptions is None: + return + self.ngramSize = skipGramOptions.NgramSize() + self.maxSkipSize = skipGramOptions.MaxSkipSize() + self.includeAllNgrams = skipGramOptions.IncludeAllNgrams() + + # SkipGramOptionsT + def Pack(self, builder): + SkipGramOptionsStart(builder) + SkipGramOptionsAddNgramSize(builder, self.ngramSize) + SkipGramOptionsAddMaxSkipSize(builder, self.maxSkipSize) + SkipGramOptionsAddIncludeAllNgrams(builder, self.includeAllNgrams) + skipGramOptions = SkipGramOptionsEnd(builder) + return skipGramOptions + + +class SpaceToDepthOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SpaceToDepthOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSpaceToDepthOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def SpaceToDepthOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SpaceToDepthOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SpaceToDepthOptions + def BlockSize(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def SpaceToDepthOptionsStart(builder): + builder.StartObject(1) + +def SpaceToDepthOptionsAddBlockSize(builder, blockSize): + builder.PrependInt32Slot(0, blockSize, 0) + +def SpaceToDepthOptionsEnd(builder): + return builder.EndObject() + + + +class SpaceToDepthOptionsT(object): + + # SpaceToDepthOptionsT + def __init__(self): + self.blockSize = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + spaceToDepthOptions = SpaceToDepthOptions() + spaceToDepthOptions.Init(buf, pos) + return cls.InitFromObj(spaceToDepthOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, spaceToDepthOptions): + x = SpaceToDepthOptionsT() + x._UnPack(spaceToDepthOptions) + return x + + # SpaceToDepthOptionsT + def _UnPack(self, spaceToDepthOptions): + if spaceToDepthOptions is None: + return + self.blockSize = spaceToDepthOptions.BlockSize() + + # SpaceToDepthOptionsT + def Pack(self, builder): + SpaceToDepthOptionsStart(builder) + SpaceToDepthOptionsAddBlockSize(builder, self.blockSize) + spaceToDepthOptions = SpaceToDepthOptionsEnd(builder) + return spaceToDepthOptions + + +class DepthToSpaceOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = DepthToSpaceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsDepthToSpaceOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def DepthToSpaceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # DepthToSpaceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # DepthToSpaceOptions + def BlockSize(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def DepthToSpaceOptionsStart(builder): + builder.StartObject(1) + +def DepthToSpaceOptionsAddBlockSize(builder, blockSize): + builder.PrependInt32Slot(0, blockSize, 0) + +def DepthToSpaceOptionsEnd(builder): + return builder.EndObject() + + + +class DepthToSpaceOptionsT(object): + + # DepthToSpaceOptionsT + def __init__(self): + self.blockSize = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + depthToSpaceOptions = DepthToSpaceOptions() + depthToSpaceOptions.Init(buf, pos) + return cls.InitFromObj(depthToSpaceOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, depthToSpaceOptions): + x = DepthToSpaceOptionsT() + x._UnPack(depthToSpaceOptions) + return x + + # DepthToSpaceOptionsT + def _UnPack(self, depthToSpaceOptions): + if depthToSpaceOptions is None: + return + self.blockSize = depthToSpaceOptions.BlockSize() + + # DepthToSpaceOptionsT + def Pack(self, builder): + DepthToSpaceOptionsStart(builder) + DepthToSpaceOptionsAddBlockSize(builder, self.blockSize) + depthToSpaceOptions = DepthToSpaceOptionsEnd(builder) + return depthToSpaceOptions + + +class SubOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SubOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSubOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def SubOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SubOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SubOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # SubOptions + def PotScaleInt16(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return True + +def SubOptionsStart(builder): + builder.StartObject(2) + +def SubOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(0, fusedActivationFunction, 0) + +def SubOptionsAddPotScaleInt16(builder, potScaleInt16): + builder.PrependBoolSlot(1, potScaleInt16, 1) + +def SubOptionsEnd(builder): + return builder.EndObject() + + + +class SubOptionsT(object): + + # SubOptionsT + def __init__(self): + self.fusedActivationFunction = 0 # type: int + self.potScaleInt16 = True # type: bool + + @classmethod + def InitFromBuf(cls, buf, pos): + subOptions = SubOptions() + subOptions.Init(buf, pos) + return cls.InitFromObj(subOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, subOptions): + x = SubOptionsT() + x._UnPack(subOptions) + return x + + # SubOptionsT + def _UnPack(self, subOptions): + if subOptions is None: + return + self.fusedActivationFunction = subOptions.FusedActivationFunction() + self.potScaleInt16 = subOptions.PotScaleInt16() + + # SubOptionsT + def Pack(self, builder): + SubOptionsStart(builder) + SubOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) + SubOptionsAddPotScaleInt16(builder, self.potScaleInt16) + subOptions = SubOptionsEnd(builder) + return subOptions + + +class DivOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = DivOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsDivOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def DivOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # DivOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # DivOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + +def DivOptionsStart(builder): + builder.StartObject(1) + +def DivOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(0, fusedActivationFunction, 0) + +def DivOptionsEnd(builder): + return builder.EndObject() + + + +class DivOptionsT(object): + + # DivOptionsT + def __init__(self): + self.fusedActivationFunction = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + divOptions = DivOptions() + divOptions.Init(buf, pos) + return cls.InitFromObj(divOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, divOptions): + x = DivOptionsT() + x._UnPack(divOptions) + return x + + # DivOptionsT + def _UnPack(self, divOptions): + if divOptions is None: + return + self.fusedActivationFunction = divOptions.FusedActivationFunction() + + # DivOptionsT + def Pack(self, builder): + DivOptionsStart(builder) + DivOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) + divOptions = DivOptionsEnd(builder) + return divOptions + + +class TopKV2Options(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = TopKV2Options() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsTopKV2Options(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def TopKV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # TopKV2Options + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def TopKV2OptionsStart(builder): + builder.StartObject(0) + +def TopKV2OptionsEnd(builder): + return builder.EndObject() + + + +class TopKV2OptionsT(object): + + # TopKV2OptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + topKv2Options = TopKV2Options() + topKv2Options.Init(buf, pos) + return cls.InitFromObj(topKv2Options) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, topKv2Options): + x = TopKV2OptionsT() + x._UnPack(topKv2Options) + return x + + # TopKV2OptionsT + def _UnPack(self, topKv2Options): + if topKv2Options is None: + return + + # TopKV2OptionsT + def Pack(self, builder): + TopKV2OptionsStart(builder) + topKv2Options = TopKV2OptionsEnd(builder) + return topKv2Options + + +class EmbeddingLookupSparseOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = EmbeddingLookupSparseOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsEmbeddingLookupSparseOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def EmbeddingLookupSparseOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # EmbeddingLookupSparseOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # EmbeddingLookupSparseOptions + def Combiner(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + +def EmbeddingLookupSparseOptionsStart(builder): + builder.StartObject(1) + +def EmbeddingLookupSparseOptionsAddCombiner(builder, combiner): + builder.PrependInt8Slot(0, combiner, 0) + +def EmbeddingLookupSparseOptionsEnd(builder): + return builder.EndObject() + + + +class EmbeddingLookupSparseOptionsT(object): + + # EmbeddingLookupSparseOptionsT + def __init__(self): + self.combiner = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + embeddingLookupSparseOptions = EmbeddingLookupSparseOptions() + embeddingLookupSparseOptions.Init(buf, pos) + return cls.InitFromObj(embeddingLookupSparseOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, embeddingLookupSparseOptions): + x = EmbeddingLookupSparseOptionsT() + x._UnPack(embeddingLookupSparseOptions) + return x + + # EmbeddingLookupSparseOptionsT + def _UnPack(self, embeddingLookupSparseOptions): + if embeddingLookupSparseOptions is None: + return + self.combiner = embeddingLookupSparseOptions.Combiner() + + # EmbeddingLookupSparseOptionsT + def Pack(self, builder): + EmbeddingLookupSparseOptionsStart(builder) + EmbeddingLookupSparseOptionsAddCombiner(builder, self.combiner) + embeddingLookupSparseOptions = EmbeddingLookupSparseOptionsEnd(builder) + return embeddingLookupSparseOptions + + +class GatherOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = GatherOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsGatherOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def GatherOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # GatherOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # GatherOptions + def Axis(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # GatherOptions + def BatchDims(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def GatherOptionsStart(builder): + builder.StartObject(2) + +def GatherOptionsAddAxis(builder, axis): + builder.PrependInt32Slot(0, axis, 0) + +def GatherOptionsAddBatchDims(builder, batchDims): + builder.PrependInt32Slot(1, batchDims, 0) + +def GatherOptionsEnd(builder): + return builder.EndObject() + + + +class GatherOptionsT(object): + + # GatherOptionsT + def __init__(self): + self.axis = 0 # type: int + self.batchDims = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + gatherOptions = GatherOptions() + gatherOptions.Init(buf, pos) + return cls.InitFromObj(gatherOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, gatherOptions): + x = GatherOptionsT() + x._UnPack(gatherOptions) + return x + + # GatherOptionsT + def _UnPack(self, gatherOptions): + if gatherOptions is None: + return + self.axis = gatherOptions.Axis() + self.batchDims = gatherOptions.BatchDims() + + # GatherOptionsT + def Pack(self, builder): + GatherOptionsStart(builder) + GatherOptionsAddAxis(builder, self.axis) + GatherOptionsAddBatchDims(builder, self.batchDims) + gatherOptions = GatherOptionsEnd(builder) + return gatherOptions + + +class TransposeOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = TransposeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsTransposeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def TransposeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # TransposeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def TransposeOptionsStart(builder): + builder.StartObject(0) + +def TransposeOptionsEnd(builder): + return builder.EndObject() + + + +class TransposeOptionsT(object): + + # TransposeOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + transposeOptions = TransposeOptions() + transposeOptions.Init(buf, pos) + return cls.InitFromObj(transposeOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, transposeOptions): + x = TransposeOptionsT() + x._UnPack(transposeOptions) + return x + + # TransposeOptionsT + def _UnPack(self, transposeOptions): + if transposeOptions is None: + return + + # TransposeOptionsT + def Pack(self, builder): + TransposeOptionsStart(builder) + transposeOptions = TransposeOptionsEnd(builder) + return transposeOptions + + +class ExpOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ExpOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsExpOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def ExpOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ExpOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def ExpOptionsStart(builder): + builder.StartObject(0) + +def ExpOptionsEnd(builder): + return builder.EndObject() + + + +class ExpOptionsT(object): + + # ExpOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + expOptions = ExpOptions() + expOptions.Init(buf, pos) + return cls.InitFromObj(expOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, expOptions): + x = ExpOptionsT() + x._UnPack(expOptions) + return x + + # ExpOptionsT + def _UnPack(self, expOptions): + if expOptions is None: + return + + # ExpOptionsT + def Pack(self, builder): + ExpOptionsStart(builder) + expOptions = ExpOptionsEnd(builder) + return expOptions + + +class CosOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = CosOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsCosOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def CosOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # CosOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def CosOptionsStart(builder): + builder.StartObject(0) + +def CosOptionsEnd(builder): + return builder.EndObject() + + + +class CosOptionsT(object): + + # CosOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + cosOptions = CosOptions() + cosOptions.Init(buf, pos) + return cls.InitFromObj(cosOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, cosOptions): + x = CosOptionsT() + x._UnPack(cosOptions) + return x + + # CosOptionsT + def _UnPack(self, cosOptions): + if cosOptions is None: + return + + # CosOptionsT + def Pack(self, builder): + CosOptionsStart(builder) + cosOptions = CosOptionsEnd(builder) + return cosOptions + + +class ReducerOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ReducerOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsReducerOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def ReducerOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ReducerOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ReducerOptions + def KeepDims(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def ReducerOptionsStart(builder): + builder.StartObject(1) + +def ReducerOptionsAddKeepDims(builder, keepDims): + builder.PrependBoolSlot(0, keepDims, 0) + +def ReducerOptionsEnd(builder): + return builder.EndObject() + + + +class ReducerOptionsT(object): + + # ReducerOptionsT + def __init__(self): + self.keepDims = False # type: bool + + @classmethod + def InitFromBuf(cls, buf, pos): + reducerOptions = ReducerOptions() + reducerOptions.Init(buf, pos) + return cls.InitFromObj(reducerOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, reducerOptions): + x = ReducerOptionsT() + x._UnPack(reducerOptions) + return x + + # ReducerOptionsT + def _UnPack(self, reducerOptions): + if reducerOptions is None: + return + self.keepDims = reducerOptions.KeepDims() + + # ReducerOptionsT + def Pack(self, builder): + ReducerOptionsStart(builder) + ReducerOptionsAddKeepDims(builder, self.keepDims) + reducerOptions = ReducerOptionsEnd(builder) + return reducerOptions + + +class SqueezeOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SqueezeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSqueezeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def SqueezeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SqueezeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SqueezeOptions + def SqueezeDims(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # SqueezeOptions + def SqueezeDimsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # SqueezeOptions + def SqueezeDimsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SqueezeOptions + def SqueezeDimsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + +def SqueezeOptionsStart(builder): + builder.StartObject(1) + +def SqueezeOptionsAddSqueezeDims(builder, squeezeDims): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(squeezeDims), 0) + +def SqueezeOptionsStartSqueezeDimsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def SqueezeOptionsEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class SqueezeOptionsT(object): + + # SqueezeOptionsT + def __init__(self): + self.squeezeDims = None # type: List[int] + + @classmethod + def InitFromBuf(cls, buf, pos): + squeezeOptions = SqueezeOptions() + squeezeOptions.Init(buf, pos) + return cls.InitFromObj(squeezeOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, squeezeOptions): + x = SqueezeOptionsT() + x._UnPack(squeezeOptions) + return x + + # SqueezeOptionsT + def _UnPack(self, squeezeOptions): + if squeezeOptions is None: + return + if not squeezeOptions.SqueezeDimsIsNone(): + if np is None: + self.squeezeDims = [] + for i in range(squeezeOptions.SqueezeDimsLength()): + self.squeezeDims.append(squeezeOptions.SqueezeDims(i)) + else: + self.squeezeDims = squeezeOptions.SqueezeDimsAsNumpy() + + # SqueezeOptionsT + def Pack(self, builder): + if self.squeezeDims is not None: + if np is not None and type(self.squeezeDims) is np.ndarray: + squeezeDims = builder.CreateNumpyVector(self.squeezeDims) + else: + SqueezeOptionsStartSqueezeDimsVector(builder, len(self.squeezeDims)) + for i in reversed(range(len(self.squeezeDims))): + builder.PrependInt32(self.squeezeDims[i]) + squeezeDims = builder.EndVector() + SqueezeOptionsStart(builder) + if self.squeezeDims is not None: + SqueezeOptionsAddSqueezeDims(builder, squeezeDims) + squeezeOptions = SqueezeOptionsEnd(builder) + return squeezeOptions + + +class SplitOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SplitOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSplitOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def SplitOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SplitOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SplitOptions + def NumSplits(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def SplitOptionsStart(builder): + builder.StartObject(1) + +def SplitOptionsAddNumSplits(builder, numSplits): + builder.PrependInt32Slot(0, numSplits, 0) + +def SplitOptionsEnd(builder): + return builder.EndObject() + + + +class SplitOptionsT(object): + + # SplitOptionsT + def __init__(self): + self.numSplits = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + splitOptions = SplitOptions() + splitOptions.Init(buf, pos) + return cls.InitFromObj(splitOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, splitOptions): + x = SplitOptionsT() + x._UnPack(splitOptions) + return x + + # SplitOptionsT + def _UnPack(self, splitOptions): + if splitOptions is None: + return + self.numSplits = splitOptions.NumSplits() + + # SplitOptionsT + def Pack(self, builder): + SplitOptionsStart(builder) + SplitOptionsAddNumSplits(builder, self.numSplits) + splitOptions = SplitOptionsEnd(builder) + return splitOptions + + +class SplitVOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SplitVOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSplitVOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def SplitVOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SplitVOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SplitVOptions + def NumSplits(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def SplitVOptionsStart(builder): + builder.StartObject(1) + +def SplitVOptionsAddNumSplits(builder, numSplits): + builder.PrependInt32Slot(0, numSplits, 0) + +def SplitVOptionsEnd(builder): + return builder.EndObject() + + + +class SplitVOptionsT(object): + + # SplitVOptionsT + def __init__(self): + self.numSplits = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + splitVoptions = SplitVOptions() + splitVoptions.Init(buf, pos) + return cls.InitFromObj(splitVoptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, splitVoptions): + x = SplitVOptionsT() + x._UnPack(splitVoptions) + return x + + # SplitVOptionsT + def _UnPack(self, splitVoptions): + if splitVoptions is None: + return + self.numSplits = splitVoptions.NumSplits() + + # SplitVOptionsT + def Pack(self, builder): + SplitVOptionsStart(builder) + SplitVOptionsAddNumSplits(builder, self.numSplits) + splitVoptions = SplitVOptionsEnd(builder) + return splitVoptions + + +class StridedSliceOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StridedSliceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStridedSliceOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def StridedSliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # StridedSliceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StridedSliceOptions + def BeginMask(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # StridedSliceOptions + def EndMask(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # StridedSliceOptions + def EllipsisMask(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # StridedSliceOptions + def NewAxisMask(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # StridedSliceOptions + def ShrinkAxisMask(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # StridedSliceOptions + def Offset(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def StridedSliceOptionsStart(builder): + builder.StartObject(6) + +def StridedSliceOptionsAddBeginMask(builder, beginMask): + builder.PrependInt32Slot(0, beginMask, 0) + +def StridedSliceOptionsAddEndMask(builder, endMask): + builder.PrependInt32Slot(1, endMask, 0) + +def StridedSliceOptionsAddEllipsisMask(builder, ellipsisMask): + builder.PrependInt32Slot(2, ellipsisMask, 0) + +def StridedSliceOptionsAddNewAxisMask(builder, newAxisMask): + builder.PrependInt32Slot(3, newAxisMask, 0) + +def StridedSliceOptionsAddShrinkAxisMask(builder, shrinkAxisMask): + builder.PrependInt32Slot(4, shrinkAxisMask, 0) + +def StridedSliceOptionsAddOffset(builder, offset): + builder.PrependBoolSlot(5, offset, 0) + +def StridedSliceOptionsEnd(builder): + return builder.EndObject() + + + +class StridedSliceOptionsT(object): + + # StridedSliceOptionsT + def __init__(self): + self.beginMask = 0 # type: int + self.endMask = 0 # type: int + self.ellipsisMask = 0 # type: int + self.newAxisMask = 0 # type: int + self.shrinkAxisMask = 0 # type: int + self.offset = False # type: bool + + @classmethod + def InitFromBuf(cls, buf, pos): + stridedSliceOptions = StridedSliceOptions() + stridedSliceOptions.Init(buf, pos) + return cls.InitFromObj(stridedSliceOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, stridedSliceOptions): + x = StridedSliceOptionsT() + x._UnPack(stridedSliceOptions) + return x + + # StridedSliceOptionsT + def _UnPack(self, stridedSliceOptions): + if stridedSliceOptions is None: + return + self.beginMask = stridedSliceOptions.BeginMask() + self.endMask = stridedSliceOptions.EndMask() + self.ellipsisMask = stridedSliceOptions.EllipsisMask() + self.newAxisMask = stridedSliceOptions.NewAxisMask() + self.shrinkAxisMask = stridedSliceOptions.ShrinkAxisMask() + self.offset = stridedSliceOptions.Offset() + + # StridedSliceOptionsT + def Pack(self, builder): + StridedSliceOptionsStart(builder) + StridedSliceOptionsAddBeginMask(builder, self.beginMask) + StridedSliceOptionsAddEndMask(builder, self.endMask) + StridedSliceOptionsAddEllipsisMask(builder, self.ellipsisMask) + StridedSliceOptionsAddNewAxisMask(builder, self.newAxisMask) + StridedSliceOptionsAddShrinkAxisMask(builder, self.shrinkAxisMask) + StridedSliceOptionsAddOffset(builder, self.offset) + stridedSliceOptions = StridedSliceOptionsEnd(builder) + return stridedSliceOptions + + +class LogSoftmaxOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = LogSoftmaxOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsLogSoftmaxOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def LogSoftmaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # LogSoftmaxOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def LogSoftmaxOptionsStart(builder): + builder.StartObject(0) + +def LogSoftmaxOptionsEnd(builder): + return builder.EndObject() + + + +class LogSoftmaxOptionsT(object): + + # LogSoftmaxOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + logSoftmaxOptions = LogSoftmaxOptions() + logSoftmaxOptions.Init(buf, pos) + return cls.InitFromObj(logSoftmaxOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, logSoftmaxOptions): + x = LogSoftmaxOptionsT() + x._UnPack(logSoftmaxOptions) + return x + + # LogSoftmaxOptionsT + def _UnPack(self, logSoftmaxOptions): + if logSoftmaxOptions is None: + return + + # LogSoftmaxOptionsT + def Pack(self, builder): + LogSoftmaxOptionsStart(builder) + logSoftmaxOptions = LogSoftmaxOptionsEnd(builder) + return logSoftmaxOptions + + +class CastOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = CastOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsCastOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def CastOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # CastOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # CastOptions + def InDataType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # CastOptions + def OutDataType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + +def CastOptionsStart(builder): + builder.StartObject(2) + +def CastOptionsAddInDataType(builder, inDataType): + builder.PrependInt8Slot(0, inDataType, 0) + +def CastOptionsAddOutDataType(builder, outDataType): + builder.PrependInt8Slot(1, outDataType, 0) + +def CastOptionsEnd(builder): + return builder.EndObject() + + + +class CastOptionsT(object): + + # CastOptionsT + def __init__(self): + self.inDataType = 0 # type: int + self.outDataType = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + castOptions = CastOptions() + castOptions.Init(buf, pos) + return cls.InitFromObj(castOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, castOptions): + x = CastOptionsT() + x._UnPack(castOptions) + return x + + # CastOptionsT + def _UnPack(self, castOptions): + if castOptions is None: + return + self.inDataType = castOptions.InDataType() + self.outDataType = castOptions.OutDataType() + + # CastOptionsT + def Pack(self, builder): + CastOptionsStart(builder) + CastOptionsAddInDataType(builder, self.inDataType) + CastOptionsAddOutDataType(builder, self.outDataType) + castOptions = CastOptionsEnd(builder) + return castOptions + + +class DequantizeOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = DequantizeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsDequantizeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def DequantizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # DequantizeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def DequantizeOptionsStart(builder): + builder.StartObject(0) + +def DequantizeOptionsEnd(builder): + return builder.EndObject() + + + +class DequantizeOptionsT(object): + + # DequantizeOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + dequantizeOptions = DequantizeOptions() + dequantizeOptions.Init(buf, pos) + return cls.InitFromObj(dequantizeOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, dequantizeOptions): + x = DequantizeOptionsT() + x._UnPack(dequantizeOptions) + return x + + # DequantizeOptionsT + def _UnPack(self, dequantizeOptions): + if dequantizeOptions is None: + return + + # DequantizeOptionsT + def Pack(self, builder): + DequantizeOptionsStart(builder) + dequantizeOptions = DequantizeOptionsEnd(builder) + return dequantizeOptions + + +class MaximumMinimumOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = MaximumMinimumOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsMaximumMinimumOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def MaximumMinimumOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # MaximumMinimumOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def MaximumMinimumOptionsStart(builder): + builder.StartObject(0) + +def MaximumMinimumOptionsEnd(builder): + return builder.EndObject() + + + +class MaximumMinimumOptionsT(object): + + # MaximumMinimumOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + maximumMinimumOptions = MaximumMinimumOptions() + maximumMinimumOptions.Init(buf, pos) + return cls.InitFromObj(maximumMinimumOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, maximumMinimumOptions): + x = MaximumMinimumOptionsT() + x._UnPack(maximumMinimumOptions) + return x + + # MaximumMinimumOptionsT + def _UnPack(self, maximumMinimumOptions): + if maximumMinimumOptions is None: + return + + # MaximumMinimumOptionsT + def Pack(self, builder): + MaximumMinimumOptionsStart(builder) + maximumMinimumOptions = MaximumMinimumOptionsEnd(builder) + return maximumMinimumOptions + + +class TileOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = TileOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsTileOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def TileOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # TileOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def TileOptionsStart(builder): + builder.StartObject(0) + +def TileOptionsEnd(builder): + return builder.EndObject() + + + +class TileOptionsT(object): + + # TileOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + tileOptions = TileOptions() + tileOptions.Init(buf, pos) + return cls.InitFromObj(tileOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, tileOptions): + x = TileOptionsT() + x._UnPack(tileOptions) + return x + + # TileOptionsT + def _UnPack(self, tileOptions): + if tileOptions is None: + return + + # TileOptionsT + def Pack(self, builder): + TileOptionsStart(builder) + tileOptions = TileOptionsEnd(builder) + return tileOptions + + +class ArgMaxOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ArgMaxOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsArgMaxOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def ArgMaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ArgMaxOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ArgMaxOptions + def OutputType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + +def ArgMaxOptionsStart(builder): + builder.StartObject(1) + +def ArgMaxOptionsAddOutputType(builder, outputType): + builder.PrependInt8Slot(0, outputType, 0) + +def ArgMaxOptionsEnd(builder): + return builder.EndObject() + + + +class ArgMaxOptionsT(object): + + # ArgMaxOptionsT + def __init__(self): + self.outputType = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + argMaxOptions = ArgMaxOptions() + argMaxOptions.Init(buf, pos) + return cls.InitFromObj(argMaxOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, argMaxOptions): + x = ArgMaxOptionsT() + x._UnPack(argMaxOptions) + return x + + # ArgMaxOptionsT + def _UnPack(self, argMaxOptions): + if argMaxOptions is None: + return + self.outputType = argMaxOptions.OutputType() + + # ArgMaxOptionsT + def Pack(self, builder): + ArgMaxOptionsStart(builder) + ArgMaxOptionsAddOutputType(builder, self.outputType) + argMaxOptions = ArgMaxOptionsEnd(builder) + return argMaxOptions + + +class ArgMinOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ArgMinOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsArgMinOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def ArgMinOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ArgMinOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ArgMinOptions + def OutputType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + +def ArgMinOptionsStart(builder): + builder.StartObject(1) + +def ArgMinOptionsAddOutputType(builder, outputType): + builder.PrependInt8Slot(0, outputType, 0) + +def ArgMinOptionsEnd(builder): + return builder.EndObject() + + + +class ArgMinOptionsT(object): + + # ArgMinOptionsT + def __init__(self): + self.outputType = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + argMinOptions = ArgMinOptions() + argMinOptions.Init(buf, pos) + return cls.InitFromObj(argMinOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, argMinOptions): + x = ArgMinOptionsT() + x._UnPack(argMinOptions) + return x + + # ArgMinOptionsT + def _UnPack(self, argMinOptions): + if argMinOptions is None: + return + self.outputType = argMinOptions.OutputType() + + # ArgMinOptionsT + def Pack(self, builder): + ArgMinOptionsStart(builder) + ArgMinOptionsAddOutputType(builder, self.outputType) + argMinOptions = ArgMinOptionsEnd(builder) + return argMinOptions + + +class GreaterOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = GreaterOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsGreaterOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def GreaterOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # GreaterOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def GreaterOptionsStart(builder): + builder.StartObject(0) + +def GreaterOptionsEnd(builder): + return builder.EndObject() + + + +class GreaterOptionsT(object): + + # GreaterOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + greaterOptions = GreaterOptions() + greaterOptions.Init(buf, pos) + return cls.InitFromObj(greaterOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, greaterOptions): + x = GreaterOptionsT() + x._UnPack(greaterOptions) + return x + + # GreaterOptionsT + def _UnPack(self, greaterOptions): + if greaterOptions is None: + return + + # GreaterOptionsT + def Pack(self, builder): + GreaterOptionsStart(builder) + greaterOptions = GreaterOptionsEnd(builder) + return greaterOptions + + +class GreaterEqualOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = GreaterEqualOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsGreaterEqualOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def GreaterEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # GreaterEqualOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def GreaterEqualOptionsStart(builder): + builder.StartObject(0) + +def GreaterEqualOptionsEnd(builder): + return builder.EndObject() + + + +class GreaterEqualOptionsT(object): + + # GreaterEqualOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + greaterEqualOptions = GreaterEqualOptions() + greaterEqualOptions.Init(buf, pos) + return cls.InitFromObj(greaterEqualOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, greaterEqualOptions): + x = GreaterEqualOptionsT() + x._UnPack(greaterEqualOptions) + return x + + # GreaterEqualOptionsT + def _UnPack(self, greaterEqualOptions): + if greaterEqualOptions is None: + return + + # GreaterEqualOptionsT + def Pack(self, builder): + GreaterEqualOptionsStart(builder) + greaterEqualOptions = GreaterEqualOptionsEnd(builder) + return greaterEqualOptions + + +class LessOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = LessOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsLessOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def LessOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # LessOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def LessOptionsStart(builder): + builder.StartObject(0) + +def LessOptionsEnd(builder): + return builder.EndObject() + + + +class LessOptionsT(object): + + # LessOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + lessOptions = LessOptions() + lessOptions.Init(buf, pos) + return cls.InitFromObj(lessOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, lessOptions): + x = LessOptionsT() + x._UnPack(lessOptions) + return x + + # LessOptionsT + def _UnPack(self, lessOptions): + if lessOptions is None: + return + + # LessOptionsT + def Pack(self, builder): + LessOptionsStart(builder) + lessOptions = LessOptionsEnd(builder) + return lessOptions + + +class LessEqualOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = LessEqualOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsLessEqualOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def LessEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # LessEqualOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def LessEqualOptionsStart(builder): + builder.StartObject(0) + +def LessEqualOptionsEnd(builder): + return builder.EndObject() + + + +class LessEqualOptionsT(object): + + # LessEqualOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + lessEqualOptions = LessEqualOptions() + lessEqualOptions.Init(buf, pos) + return cls.InitFromObj(lessEqualOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, lessEqualOptions): + x = LessEqualOptionsT() + x._UnPack(lessEqualOptions) + return x + + # LessEqualOptionsT + def _UnPack(self, lessEqualOptions): + if lessEqualOptions is None: + return + + # LessEqualOptionsT + def Pack(self, builder): + LessEqualOptionsStart(builder) + lessEqualOptions = LessEqualOptionsEnd(builder) + return lessEqualOptions + + +class NegOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = NegOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsNegOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def NegOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # NegOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def NegOptionsStart(builder): + builder.StartObject(0) + +def NegOptionsEnd(builder): + return builder.EndObject() + + + +class NegOptionsT(object): + + # NegOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + negOptions = NegOptions() + negOptions.Init(buf, pos) + return cls.InitFromObj(negOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, negOptions): + x = NegOptionsT() + x._UnPack(negOptions) + return x + + # NegOptionsT + def _UnPack(self, negOptions): + if negOptions is None: + return + + # NegOptionsT + def Pack(self, builder): + NegOptionsStart(builder) + negOptions = NegOptionsEnd(builder) + return negOptions + + +class SelectOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SelectOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSelectOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def SelectOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SelectOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def SelectOptionsStart(builder): + builder.StartObject(0) + +def SelectOptionsEnd(builder): + return builder.EndObject() + + + +class SelectOptionsT(object): + + # SelectOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + selectOptions = SelectOptions() + selectOptions.Init(buf, pos) + return cls.InitFromObj(selectOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, selectOptions): + x = SelectOptionsT() + x._UnPack(selectOptions) + return x + + # SelectOptionsT + def _UnPack(self, selectOptions): + if selectOptions is None: + return + + # SelectOptionsT + def Pack(self, builder): + SelectOptionsStart(builder) + selectOptions = SelectOptionsEnd(builder) + return selectOptions + + +class SliceOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SliceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSliceOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def SliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SliceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def SliceOptionsStart(builder): + builder.StartObject(0) + +def SliceOptionsEnd(builder): + return builder.EndObject() + + + +class SliceOptionsT(object): + + # SliceOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + sliceOptions = SliceOptions() + sliceOptions.Init(buf, pos) + return cls.InitFromObj(sliceOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, sliceOptions): + x = SliceOptionsT() + x._UnPack(sliceOptions) + return x + + # SliceOptionsT + def _UnPack(self, sliceOptions): + if sliceOptions is None: + return + + # SliceOptionsT + def Pack(self, builder): + SliceOptionsStart(builder) + sliceOptions = SliceOptionsEnd(builder) + return sliceOptions + + +class TransposeConvOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = TransposeConvOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsTransposeConvOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def TransposeConvOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # TransposeConvOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # TransposeConvOptions + def Padding(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # TransposeConvOptions + def StrideW(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # TransposeConvOptions + def StrideH(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # TransposeConvOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # TransposeConvOptions + def QuantizedBiasType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + +def TransposeConvOptionsStart(builder): + builder.StartObject(5) + +def TransposeConvOptionsAddPadding(builder, padding): + builder.PrependInt8Slot(0, padding, 0) + +def TransposeConvOptionsAddStrideW(builder, strideW): + builder.PrependInt32Slot(1, strideW, 0) + +def TransposeConvOptionsAddStrideH(builder, strideH): + builder.PrependInt32Slot(2, strideH, 0) + +def TransposeConvOptionsAddFusedActivationFunction(builder, fusedActivationFunction): + builder.PrependInt8Slot(3, fusedActivationFunction, 0) + +def TransposeConvOptionsAddQuantizedBiasType(builder, quantizedBiasType): + builder.PrependInt8Slot(4, quantizedBiasType, 0) + +def TransposeConvOptionsEnd(builder): + return builder.EndObject() + + + +class TransposeConvOptionsT(object): + + # TransposeConvOptionsT + def __init__(self): + self.padding = 0 # type: int + self.strideW = 0 # type: int + self.strideH = 0 # type: int + self.fusedActivationFunction = 0 # type: int + self.quantizedBiasType = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + transposeConvOptions = TransposeConvOptions() + transposeConvOptions.Init(buf, pos) + return cls.InitFromObj(transposeConvOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, transposeConvOptions): + x = TransposeConvOptionsT() + x._UnPack(transposeConvOptions) + return x + + # TransposeConvOptionsT + def _UnPack(self, transposeConvOptions): + if transposeConvOptions is None: + return + self.padding = transposeConvOptions.Padding() + self.strideW = transposeConvOptions.StrideW() + self.strideH = transposeConvOptions.StrideH() + self.fusedActivationFunction = transposeConvOptions.FusedActivationFunction() + self.quantizedBiasType = transposeConvOptions.QuantizedBiasType() + + # TransposeConvOptionsT + def Pack(self, builder): + TransposeConvOptionsStart(builder) + TransposeConvOptionsAddPadding(builder, self.padding) + TransposeConvOptionsAddStrideW(builder, self.strideW) + TransposeConvOptionsAddStrideH(builder, self.strideH) + TransposeConvOptionsAddFusedActivationFunction(builder, self.fusedActivationFunction) + TransposeConvOptionsAddQuantizedBiasType(builder, self.quantizedBiasType) + transposeConvOptions = TransposeConvOptionsEnd(builder) + return transposeConvOptions + + +class ExpandDimsOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ExpandDimsOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsExpandDimsOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def ExpandDimsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ExpandDimsOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def ExpandDimsOptionsStart(builder): + builder.StartObject(0) + +def ExpandDimsOptionsEnd(builder): + return builder.EndObject() + + + +class ExpandDimsOptionsT(object): + + # ExpandDimsOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + expandDimsOptions = ExpandDimsOptions() + expandDimsOptions.Init(buf, pos) + return cls.InitFromObj(expandDimsOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, expandDimsOptions): + x = ExpandDimsOptionsT() + x._UnPack(expandDimsOptions) + return x + + # ExpandDimsOptionsT + def _UnPack(self, expandDimsOptions): + if expandDimsOptions is None: + return + + # ExpandDimsOptionsT + def Pack(self, builder): + ExpandDimsOptionsStart(builder) + expandDimsOptions = ExpandDimsOptionsEnd(builder) + return expandDimsOptions + + +class SparseToDenseOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SparseToDenseOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSparseToDenseOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def SparseToDenseOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SparseToDenseOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SparseToDenseOptions + def ValidateIndices(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def SparseToDenseOptionsStart(builder): + builder.StartObject(1) + +def SparseToDenseOptionsAddValidateIndices(builder, validateIndices): + builder.PrependBoolSlot(0, validateIndices, 0) + +def SparseToDenseOptionsEnd(builder): + return builder.EndObject() + + + +class SparseToDenseOptionsT(object): + + # SparseToDenseOptionsT + def __init__(self): + self.validateIndices = False # type: bool + + @classmethod + def InitFromBuf(cls, buf, pos): + sparseToDenseOptions = SparseToDenseOptions() + sparseToDenseOptions.Init(buf, pos) + return cls.InitFromObj(sparseToDenseOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, sparseToDenseOptions): + x = SparseToDenseOptionsT() + x._UnPack(sparseToDenseOptions) + return x + + # SparseToDenseOptionsT + def _UnPack(self, sparseToDenseOptions): + if sparseToDenseOptions is None: + return + self.validateIndices = sparseToDenseOptions.ValidateIndices() + + # SparseToDenseOptionsT + def Pack(self, builder): + SparseToDenseOptionsStart(builder) + SparseToDenseOptionsAddValidateIndices(builder, self.validateIndices) + sparseToDenseOptions = SparseToDenseOptionsEnd(builder) + return sparseToDenseOptions + + +class EqualOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = EqualOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsEqualOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def EqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # EqualOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def EqualOptionsStart(builder): + builder.StartObject(0) + +def EqualOptionsEnd(builder): + return builder.EndObject() + + + +class EqualOptionsT(object): + + # EqualOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + equalOptions = EqualOptions() + equalOptions.Init(buf, pos) + return cls.InitFromObj(equalOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, equalOptions): + x = EqualOptionsT() + x._UnPack(equalOptions) + return x + + # EqualOptionsT + def _UnPack(self, equalOptions): + if equalOptions is None: + return + + # EqualOptionsT + def Pack(self, builder): + EqualOptionsStart(builder) + equalOptions = EqualOptionsEnd(builder) + return equalOptions + + +class NotEqualOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = NotEqualOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsNotEqualOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def NotEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # NotEqualOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def NotEqualOptionsStart(builder): + builder.StartObject(0) + +def NotEqualOptionsEnd(builder): + return builder.EndObject() + + + +class NotEqualOptionsT(object): + + # NotEqualOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + notEqualOptions = NotEqualOptions() + notEqualOptions.Init(buf, pos) + return cls.InitFromObj(notEqualOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, notEqualOptions): + x = NotEqualOptionsT() + x._UnPack(notEqualOptions) + return x + + # NotEqualOptionsT + def _UnPack(self, notEqualOptions): + if notEqualOptions is None: + return + + # NotEqualOptionsT + def Pack(self, builder): + NotEqualOptionsStart(builder) + notEqualOptions = NotEqualOptionsEnd(builder) + return notEqualOptions + + +class ShapeOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ShapeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsShapeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def ShapeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ShapeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ShapeOptions + def OutType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + +def ShapeOptionsStart(builder): + builder.StartObject(1) + +def ShapeOptionsAddOutType(builder, outType): + builder.PrependInt8Slot(0, outType, 0) + +def ShapeOptionsEnd(builder): + return builder.EndObject() + + + +class ShapeOptionsT(object): + + # ShapeOptionsT + def __init__(self): + self.outType = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + shapeOptions = ShapeOptions() + shapeOptions.Init(buf, pos) + return cls.InitFromObj(shapeOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, shapeOptions): + x = ShapeOptionsT() + x._UnPack(shapeOptions) + return x + + # ShapeOptionsT + def _UnPack(self, shapeOptions): + if shapeOptions is None: + return + self.outType = shapeOptions.OutType() + + # ShapeOptionsT + def Pack(self, builder): + ShapeOptionsStart(builder) + ShapeOptionsAddOutType(builder, self.outType) + shapeOptions = ShapeOptionsEnd(builder) + return shapeOptions + + +class RankOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = RankOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsRankOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def RankOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # RankOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def RankOptionsStart(builder): + builder.StartObject(0) + +def RankOptionsEnd(builder): + return builder.EndObject() + + + +class RankOptionsT(object): + + # RankOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + rankOptions = RankOptions() + rankOptions.Init(buf, pos) + return cls.InitFromObj(rankOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, rankOptions): + x = RankOptionsT() + x._UnPack(rankOptions) + return x + + # RankOptionsT + def _UnPack(self, rankOptions): + if rankOptions is None: + return + + # RankOptionsT + def Pack(self, builder): + RankOptionsStart(builder) + rankOptions = RankOptionsEnd(builder) + return rankOptions + + +class PowOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = PowOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsPowOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def PowOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # PowOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def PowOptionsStart(builder): + builder.StartObject(0) + +def PowOptionsEnd(builder): + return builder.EndObject() + + + +class PowOptionsT(object): + + # PowOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + powOptions = PowOptions() + powOptions.Init(buf, pos) + return cls.InitFromObj(powOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, powOptions): + x = PowOptionsT() + x._UnPack(powOptions) + return x + + # PowOptionsT + def _UnPack(self, powOptions): + if powOptions is None: + return + + # PowOptionsT + def Pack(self, builder): + PowOptionsStart(builder) + powOptions = PowOptionsEnd(builder) + return powOptions + + +class FakeQuantOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = FakeQuantOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsFakeQuantOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def FakeQuantOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # FakeQuantOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # FakeQuantOptions + def Min(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + + # FakeQuantOptions + def Max(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + + # FakeQuantOptions + def NumBits(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # FakeQuantOptions + def NarrowRange(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def FakeQuantOptionsStart(builder): + builder.StartObject(4) + +def FakeQuantOptionsAddMin(builder, min): + builder.PrependFloat32Slot(0, min, 0.0) + +def FakeQuantOptionsAddMax(builder, max): + builder.PrependFloat32Slot(1, max, 0.0) + +def FakeQuantOptionsAddNumBits(builder, numBits): + builder.PrependInt32Slot(2, numBits, 0) + +def FakeQuantOptionsAddNarrowRange(builder, narrowRange): + builder.PrependBoolSlot(3, narrowRange, 0) + +def FakeQuantOptionsEnd(builder): + return builder.EndObject() + + + +class FakeQuantOptionsT(object): + + # FakeQuantOptionsT + def __init__(self): + self.min = 0.0 # type: float + self.max = 0.0 # type: float + self.numBits = 0 # type: int + self.narrowRange = False # type: bool + + @classmethod + def InitFromBuf(cls, buf, pos): + fakeQuantOptions = FakeQuantOptions() + fakeQuantOptions.Init(buf, pos) + return cls.InitFromObj(fakeQuantOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, fakeQuantOptions): + x = FakeQuantOptionsT() + x._UnPack(fakeQuantOptions) + return x + + # FakeQuantOptionsT + def _UnPack(self, fakeQuantOptions): + if fakeQuantOptions is None: + return + self.min = fakeQuantOptions.Min() + self.max = fakeQuantOptions.Max() + self.numBits = fakeQuantOptions.NumBits() + self.narrowRange = fakeQuantOptions.NarrowRange() + + # FakeQuantOptionsT + def Pack(self, builder): + FakeQuantOptionsStart(builder) + FakeQuantOptionsAddMin(builder, self.min) + FakeQuantOptionsAddMax(builder, self.max) + FakeQuantOptionsAddNumBits(builder, self.numBits) + FakeQuantOptionsAddNarrowRange(builder, self.narrowRange) + fakeQuantOptions = FakeQuantOptionsEnd(builder) + return fakeQuantOptions + + +class PackOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = PackOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsPackOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def PackOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # PackOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # PackOptions + def ValuesCount(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # PackOptions + def Axis(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def PackOptionsStart(builder): + builder.StartObject(2) + +def PackOptionsAddValuesCount(builder, valuesCount): + builder.PrependInt32Slot(0, valuesCount, 0) + +def PackOptionsAddAxis(builder, axis): + builder.PrependInt32Slot(1, axis, 0) + +def PackOptionsEnd(builder): + return builder.EndObject() + + + +class PackOptionsT(object): + + # PackOptionsT + def __init__(self): + self.valuesCount = 0 # type: int + self.axis = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + packOptions = PackOptions() + packOptions.Init(buf, pos) + return cls.InitFromObj(packOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, packOptions): + x = PackOptionsT() + x._UnPack(packOptions) + return x + + # PackOptionsT + def _UnPack(self, packOptions): + if packOptions is None: + return + self.valuesCount = packOptions.ValuesCount() + self.axis = packOptions.Axis() + + # PackOptionsT + def Pack(self, builder): + PackOptionsStart(builder) + PackOptionsAddValuesCount(builder, self.valuesCount) + PackOptionsAddAxis(builder, self.axis) + packOptions = PackOptionsEnd(builder) + return packOptions + + +class LogicalOrOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = LogicalOrOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsLogicalOrOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def LogicalOrOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # LogicalOrOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def LogicalOrOptionsStart(builder): + builder.StartObject(0) + +def LogicalOrOptionsEnd(builder): + return builder.EndObject() + + + +class LogicalOrOptionsT(object): + + # LogicalOrOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + logicalOrOptions = LogicalOrOptions() + logicalOrOptions.Init(buf, pos) + return cls.InitFromObj(logicalOrOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, logicalOrOptions): + x = LogicalOrOptionsT() + x._UnPack(logicalOrOptions) + return x + + # LogicalOrOptionsT + def _UnPack(self, logicalOrOptions): + if logicalOrOptions is None: + return + + # LogicalOrOptionsT + def Pack(self, builder): + LogicalOrOptionsStart(builder) + logicalOrOptions = LogicalOrOptionsEnd(builder) + return logicalOrOptions + + +class OneHotOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = OneHotOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsOneHotOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def OneHotOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # OneHotOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # OneHotOptions + def Axis(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def OneHotOptionsStart(builder): + builder.StartObject(1) + +def OneHotOptionsAddAxis(builder, axis): + builder.PrependInt32Slot(0, axis, 0) + +def OneHotOptionsEnd(builder): + return builder.EndObject() + + + +class OneHotOptionsT(object): + + # OneHotOptionsT + def __init__(self): + self.axis = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + oneHotOptions = OneHotOptions() + oneHotOptions.Init(buf, pos) + return cls.InitFromObj(oneHotOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, oneHotOptions): + x = OneHotOptionsT() + x._UnPack(oneHotOptions) + return x + + # OneHotOptionsT + def _UnPack(self, oneHotOptions): + if oneHotOptions is None: + return + self.axis = oneHotOptions.Axis() + + # OneHotOptionsT + def Pack(self, builder): + OneHotOptionsStart(builder) + OneHotOptionsAddAxis(builder, self.axis) + oneHotOptions = OneHotOptionsEnd(builder) + return oneHotOptions + + +class AbsOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = AbsOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsAbsOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def AbsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # AbsOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def AbsOptionsStart(builder): + builder.StartObject(0) + +def AbsOptionsEnd(builder): + return builder.EndObject() + + + +class AbsOptionsT(object): + + # AbsOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + absOptions = AbsOptions() + absOptions.Init(buf, pos) + return cls.InitFromObj(absOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, absOptions): + x = AbsOptionsT() + x._UnPack(absOptions) + return x + + # AbsOptionsT + def _UnPack(self, absOptions): + if absOptions is None: + return + + # AbsOptionsT + def Pack(self, builder): + AbsOptionsStart(builder) + absOptions = AbsOptionsEnd(builder) + return absOptions + + +class HardSwishOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = HardSwishOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsHardSwishOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def HardSwishOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # HardSwishOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def HardSwishOptionsStart(builder): + builder.StartObject(0) + +def HardSwishOptionsEnd(builder): + return builder.EndObject() + + + +class HardSwishOptionsT(object): + + # HardSwishOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + hardSwishOptions = HardSwishOptions() + hardSwishOptions.Init(buf, pos) + return cls.InitFromObj(hardSwishOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, hardSwishOptions): + x = HardSwishOptionsT() + x._UnPack(hardSwishOptions) + return x + + # HardSwishOptionsT + def _UnPack(self, hardSwishOptions): + if hardSwishOptions is None: + return + + # HardSwishOptionsT + def Pack(self, builder): + HardSwishOptionsStart(builder) + hardSwishOptions = HardSwishOptionsEnd(builder) + return hardSwishOptions + + +class LogicalAndOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = LogicalAndOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsLogicalAndOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def LogicalAndOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # LogicalAndOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def LogicalAndOptionsStart(builder): + builder.StartObject(0) + +def LogicalAndOptionsEnd(builder): + return builder.EndObject() + + + +class LogicalAndOptionsT(object): + + # LogicalAndOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + logicalAndOptions = LogicalAndOptions() + logicalAndOptions.Init(buf, pos) + return cls.InitFromObj(logicalAndOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, logicalAndOptions): + x = LogicalAndOptionsT() + x._UnPack(logicalAndOptions) + return x + + # LogicalAndOptionsT + def _UnPack(self, logicalAndOptions): + if logicalAndOptions is None: + return + + # LogicalAndOptionsT + def Pack(self, builder): + LogicalAndOptionsStart(builder) + logicalAndOptions = LogicalAndOptionsEnd(builder) + return logicalAndOptions + + +class LogicalNotOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = LogicalNotOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsLogicalNotOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def LogicalNotOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # LogicalNotOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def LogicalNotOptionsStart(builder): + builder.StartObject(0) + +def LogicalNotOptionsEnd(builder): + return builder.EndObject() + + + +class LogicalNotOptionsT(object): + + # LogicalNotOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + logicalNotOptions = LogicalNotOptions() + logicalNotOptions.Init(buf, pos) + return cls.InitFromObj(logicalNotOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, logicalNotOptions): + x = LogicalNotOptionsT() + x._UnPack(logicalNotOptions) + return x + + # LogicalNotOptionsT + def _UnPack(self, logicalNotOptions): + if logicalNotOptions is None: + return + + # LogicalNotOptionsT + def Pack(self, builder): + LogicalNotOptionsStart(builder) + logicalNotOptions = LogicalNotOptionsEnd(builder) + return logicalNotOptions + + +class UnpackOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = UnpackOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsUnpackOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def UnpackOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # UnpackOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # UnpackOptions + def Num(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # UnpackOptions + def Axis(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def UnpackOptionsStart(builder): + builder.StartObject(2) + +def UnpackOptionsAddNum(builder, num): + builder.PrependInt32Slot(0, num, 0) + +def UnpackOptionsAddAxis(builder, axis): + builder.PrependInt32Slot(1, axis, 0) + +def UnpackOptionsEnd(builder): + return builder.EndObject() + + + +class UnpackOptionsT(object): + + # UnpackOptionsT + def __init__(self): + self.num = 0 # type: int + self.axis = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + unpackOptions = UnpackOptions() + unpackOptions.Init(buf, pos) + return cls.InitFromObj(unpackOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, unpackOptions): + x = UnpackOptionsT() + x._UnPack(unpackOptions) + return x + + # UnpackOptionsT + def _UnPack(self, unpackOptions): + if unpackOptions is None: + return + self.num = unpackOptions.Num() + self.axis = unpackOptions.Axis() + + # UnpackOptionsT + def Pack(self, builder): + UnpackOptionsStart(builder) + UnpackOptionsAddNum(builder, self.num) + UnpackOptionsAddAxis(builder, self.axis) + unpackOptions = UnpackOptionsEnd(builder) + return unpackOptions + + +class FloorDivOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = FloorDivOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsFloorDivOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def FloorDivOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # FloorDivOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def FloorDivOptionsStart(builder): + builder.StartObject(0) + +def FloorDivOptionsEnd(builder): + return builder.EndObject() + + + +class FloorDivOptionsT(object): + + # FloorDivOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + floorDivOptions = FloorDivOptions() + floorDivOptions.Init(buf, pos) + return cls.InitFromObj(floorDivOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, floorDivOptions): + x = FloorDivOptionsT() + x._UnPack(floorDivOptions) + return x + + # FloorDivOptionsT + def _UnPack(self, floorDivOptions): + if floorDivOptions is None: + return + + # FloorDivOptionsT + def Pack(self, builder): + FloorDivOptionsStart(builder) + floorDivOptions = FloorDivOptionsEnd(builder) + return floorDivOptions + + +class SquareOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SquareOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSquareOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def SquareOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SquareOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def SquareOptionsStart(builder): + builder.StartObject(0) + +def SquareOptionsEnd(builder): + return builder.EndObject() + + + +class SquareOptionsT(object): + + # SquareOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + squareOptions = SquareOptions() + squareOptions.Init(buf, pos) + return cls.InitFromObj(squareOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, squareOptions): + x = SquareOptionsT() + x._UnPack(squareOptions) + return x + + # SquareOptionsT + def _UnPack(self, squareOptions): + if squareOptions is None: + return + + # SquareOptionsT + def Pack(self, builder): + SquareOptionsStart(builder) + squareOptions = SquareOptionsEnd(builder) + return squareOptions + + +class ZerosLikeOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ZerosLikeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsZerosLikeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def ZerosLikeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ZerosLikeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def ZerosLikeOptionsStart(builder): + builder.StartObject(0) + +def ZerosLikeOptionsEnd(builder): + return builder.EndObject() + + + +class ZerosLikeOptionsT(object): + + # ZerosLikeOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + zerosLikeOptions = ZerosLikeOptions() + zerosLikeOptions.Init(buf, pos) + return cls.InitFromObj(zerosLikeOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, zerosLikeOptions): + x = ZerosLikeOptionsT() + x._UnPack(zerosLikeOptions) + return x + + # ZerosLikeOptionsT + def _UnPack(self, zerosLikeOptions): + if zerosLikeOptions is None: + return + + # ZerosLikeOptionsT + def Pack(self, builder): + ZerosLikeOptionsStart(builder) + zerosLikeOptions = ZerosLikeOptionsEnd(builder) + return zerosLikeOptions + + +class FillOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = FillOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsFillOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def FillOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # FillOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def FillOptionsStart(builder): + builder.StartObject(0) + +def FillOptionsEnd(builder): + return builder.EndObject() + + + +class FillOptionsT(object): + + # FillOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + fillOptions = FillOptions() + fillOptions.Init(buf, pos) + return cls.InitFromObj(fillOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, fillOptions): + x = FillOptionsT() + x._UnPack(fillOptions) + return x + + # FillOptionsT + def _UnPack(self, fillOptions): + if fillOptions is None: + return + + # FillOptionsT + def Pack(self, builder): + FillOptionsStart(builder) + fillOptions = FillOptionsEnd(builder) + return fillOptions + + +class FloorModOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = FloorModOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsFloorModOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def FloorModOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # FloorModOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def FloorModOptionsStart(builder): + builder.StartObject(0) + +def FloorModOptionsEnd(builder): + return builder.EndObject() + + + +class FloorModOptionsT(object): + + # FloorModOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + floorModOptions = FloorModOptions() + floorModOptions.Init(buf, pos) + return cls.InitFromObj(floorModOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, floorModOptions): + x = FloorModOptionsT() + x._UnPack(floorModOptions) + return x + + # FloorModOptionsT + def _UnPack(self, floorModOptions): + if floorModOptions is None: + return + + # FloorModOptionsT + def Pack(self, builder): + FloorModOptionsStart(builder) + floorModOptions = FloorModOptionsEnd(builder) + return floorModOptions + + +class RangeOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = RangeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsRangeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def RangeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # RangeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def RangeOptionsStart(builder): + builder.StartObject(0) + +def RangeOptionsEnd(builder): + return builder.EndObject() + + + +class RangeOptionsT(object): + + # RangeOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + rangeOptions = RangeOptions() + rangeOptions.Init(buf, pos) + return cls.InitFromObj(rangeOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, rangeOptions): + x = RangeOptionsT() + x._UnPack(rangeOptions) + return x + + # RangeOptionsT + def _UnPack(self, rangeOptions): + if rangeOptions is None: + return + + # RangeOptionsT + def Pack(self, builder): + RangeOptionsStart(builder) + rangeOptions = RangeOptionsEnd(builder) + return rangeOptions + + +class LeakyReluOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = LeakyReluOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsLeakyReluOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def LeakyReluOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # LeakyReluOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # LeakyReluOptions + def Alpha(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + +def LeakyReluOptionsStart(builder): + builder.StartObject(1) + +def LeakyReluOptionsAddAlpha(builder, alpha): + builder.PrependFloat32Slot(0, alpha, 0.0) + +def LeakyReluOptionsEnd(builder): + return builder.EndObject() + + + +class LeakyReluOptionsT(object): + + # LeakyReluOptionsT + def __init__(self): + self.alpha = 0.0 # type: float + + @classmethod + def InitFromBuf(cls, buf, pos): + leakyReluOptions = LeakyReluOptions() + leakyReluOptions.Init(buf, pos) + return cls.InitFromObj(leakyReluOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, leakyReluOptions): + x = LeakyReluOptionsT() + x._UnPack(leakyReluOptions) + return x + + # LeakyReluOptionsT + def _UnPack(self, leakyReluOptions): + if leakyReluOptions is None: + return + self.alpha = leakyReluOptions.Alpha() + + # LeakyReluOptionsT + def Pack(self, builder): + LeakyReluOptionsStart(builder) + LeakyReluOptionsAddAlpha(builder, self.alpha) + leakyReluOptions = LeakyReluOptionsEnd(builder) + return leakyReluOptions + + +class SquaredDifferenceOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SquaredDifferenceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSquaredDifferenceOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def SquaredDifferenceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SquaredDifferenceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def SquaredDifferenceOptionsStart(builder): + builder.StartObject(0) + +def SquaredDifferenceOptionsEnd(builder): + return builder.EndObject() + + + +class SquaredDifferenceOptionsT(object): + + # SquaredDifferenceOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + squaredDifferenceOptions = SquaredDifferenceOptions() + squaredDifferenceOptions.Init(buf, pos) + return cls.InitFromObj(squaredDifferenceOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, squaredDifferenceOptions): + x = SquaredDifferenceOptionsT() + x._UnPack(squaredDifferenceOptions) + return x + + # SquaredDifferenceOptionsT + def _UnPack(self, squaredDifferenceOptions): + if squaredDifferenceOptions is None: + return + + # SquaredDifferenceOptionsT + def Pack(self, builder): + SquaredDifferenceOptionsStart(builder) + squaredDifferenceOptions = SquaredDifferenceOptionsEnd(builder) + return squaredDifferenceOptions + + +class MirrorPadOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = MirrorPadOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsMirrorPadOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def MirrorPadOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # MirrorPadOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # MirrorPadOptions + def Mode(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + +def MirrorPadOptionsStart(builder): + builder.StartObject(1) + +def MirrorPadOptionsAddMode(builder, mode): + builder.PrependInt8Slot(0, mode, 0) + +def MirrorPadOptionsEnd(builder): + return builder.EndObject() + + + +class MirrorPadOptionsT(object): + + # MirrorPadOptionsT + def __init__(self): + self.mode = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + mirrorPadOptions = MirrorPadOptions() + mirrorPadOptions.Init(buf, pos) + return cls.InitFromObj(mirrorPadOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, mirrorPadOptions): + x = MirrorPadOptionsT() + x._UnPack(mirrorPadOptions) + return x + + # MirrorPadOptionsT + def _UnPack(self, mirrorPadOptions): + if mirrorPadOptions is None: + return + self.mode = mirrorPadOptions.Mode() + + # MirrorPadOptionsT + def Pack(self, builder): + MirrorPadOptionsStart(builder) + MirrorPadOptionsAddMode(builder, self.mode) + mirrorPadOptions = MirrorPadOptionsEnd(builder) + return mirrorPadOptions + + +class UniqueOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = UniqueOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsUniqueOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def UniqueOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # UniqueOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # UniqueOptions + def IdxOutType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 2 + +def UniqueOptionsStart(builder): + builder.StartObject(1) + +def UniqueOptionsAddIdxOutType(builder, idxOutType): + builder.PrependInt8Slot(0, idxOutType, 2) + +def UniqueOptionsEnd(builder): + return builder.EndObject() + + + +class UniqueOptionsT(object): + + # UniqueOptionsT + def __init__(self): + self.idxOutType = 2 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + uniqueOptions = UniqueOptions() + uniqueOptions.Init(buf, pos) + return cls.InitFromObj(uniqueOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, uniqueOptions): + x = UniqueOptionsT() + x._UnPack(uniqueOptions) + return x + + # UniqueOptionsT + def _UnPack(self, uniqueOptions): + if uniqueOptions is None: + return + self.idxOutType = uniqueOptions.IdxOutType() + + # UniqueOptionsT + def Pack(self, builder): + UniqueOptionsStart(builder) + UniqueOptionsAddIdxOutType(builder, self.idxOutType) + uniqueOptions = UniqueOptionsEnd(builder) + return uniqueOptions + + +class ReverseV2Options(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ReverseV2Options() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsReverseV2Options(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def ReverseV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ReverseV2Options + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def ReverseV2OptionsStart(builder): + builder.StartObject(0) + +def ReverseV2OptionsEnd(builder): + return builder.EndObject() + + + +class ReverseV2OptionsT(object): + + # ReverseV2OptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + reverseV2Options = ReverseV2Options() + reverseV2Options.Init(buf, pos) + return cls.InitFromObj(reverseV2Options) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, reverseV2Options): + x = ReverseV2OptionsT() + x._UnPack(reverseV2Options) + return x + + # ReverseV2OptionsT + def _UnPack(self, reverseV2Options): + if reverseV2Options is None: + return + + # ReverseV2OptionsT + def Pack(self, builder): + ReverseV2OptionsStart(builder) + reverseV2Options = ReverseV2OptionsEnd(builder) + return reverseV2Options + + +class AddNOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = AddNOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsAddNOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def AddNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # AddNOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def AddNOptionsStart(builder): + builder.StartObject(0) + +def AddNOptionsEnd(builder): + return builder.EndObject() + + + +class AddNOptionsT(object): + + # AddNOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + addNoptions = AddNOptions() + addNoptions.Init(buf, pos) + return cls.InitFromObj(addNoptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, addNoptions): + x = AddNOptionsT() + x._UnPack(addNoptions) + return x + + # AddNOptionsT + def _UnPack(self, addNoptions): + if addNoptions is None: + return + + # AddNOptionsT + def Pack(self, builder): + AddNOptionsStart(builder) + addNoptions = AddNOptionsEnd(builder) + return addNoptions + + +class GatherNdOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = GatherNdOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsGatherNdOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def GatherNdOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # GatherNdOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def GatherNdOptionsStart(builder): + builder.StartObject(0) + +def GatherNdOptionsEnd(builder): + return builder.EndObject() + + + +class GatherNdOptionsT(object): + + # GatherNdOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + gatherNdOptions = GatherNdOptions() + gatherNdOptions.Init(buf, pos) + return cls.InitFromObj(gatherNdOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, gatherNdOptions): + x = GatherNdOptionsT() + x._UnPack(gatherNdOptions) + return x + + # GatherNdOptionsT + def _UnPack(self, gatherNdOptions): + if gatherNdOptions is None: + return + + # GatherNdOptionsT + def Pack(self, builder): + GatherNdOptionsStart(builder) + gatherNdOptions = GatherNdOptionsEnd(builder) + return gatherNdOptions + + +class WhereOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = WhereOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsWhereOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def WhereOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # WhereOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def WhereOptionsStart(builder): + builder.StartObject(0) + +def WhereOptionsEnd(builder): + return builder.EndObject() + + + +class WhereOptionsT(object): + + # WhereOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + whereOptions = WhereOptions() + whereOptions.Init(buf, pos) + return cls.InitFromObj(whereOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, whereOptions): + x = WhereOptionsT() + x._UnPack(whereOptions) + return x + + # WhereOptionsT + def _UnPack(self, whereOptions): + if whereOptions is None: + return + + # WhereOptionsT + def Pack(self, builder): + WhereOptionsStart(builder) + whereOptions = WhereOptionsEnd(builder) + return whereOptions + + +class ReverseSequenceOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ReverseSequenceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsReverseSequenceOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def ReverseSequenceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ReverseSequenceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ReverseSequenceOptions + def SeqDim(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # ReverseSequenceOptions + def BatchDim(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def ReverseSequenceOptionsStart(builder): + builder.StartObject(2) + +def ReverseSequenceOptionsAddSeqDim(builder, seqDim): + builder.PrependInt32Slot(0, seqDim, 0) + +def ReverseSequenceOptionsAddBatchDim(builder, batchDim): + builder.PrependInt32Slot(1, batchDim, 0) + +def ReverseSequenceOptionsEnd(builder): + return builder.EndObject() + + + +class ReverseSequenceOptionsT(object): + + # ReverseSequenceOptionsT + def __init__(self): + self.seqDim = 0 # type: int + self.batchDim = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + reverseSequenceOptions = ReverseSequenceOptions() + reverseSequenceOptions.Init(buf, pos) + return cls.InitFromObj(reverseSequenceOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, reverseSequenceOptions): + x = ReverseSequenceOptionsT() + x._UnPack(reverseSequenceOptions) + return x + + # ReverseSequenceOptionsT + def _UnPack(self, reverseSequenceOptions): + if reverseSequenceOptions is None: + return + self.seqDim = reverseSequenceOptions.SeqDim() + self.batchDim = reverseSequenceOptions.BatchDim() + + # ReverseSequenceOptionsT + def Pack(self, builder): + ReverseSequenceOptionsStart(builder) + ReverseSequenceOptionsAddSeqDim(builder, self.seqDim) + ReverseSequenceOptionsAddBatchDim(builder, self.batchDim) + reverseSequenceOptions = ReverseSequenceOptionsEnd(builder) + return reverseSequenceOptions + + +class MatrixDiagOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = MatrixDiagOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsMatrixDiagOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def MatrixDiagOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # MatrixDiagOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def MatrixDiagOptionsStart(builder): + builder.StartObject(0) + +def MatrixDiagOptionsEnd(builder): + return builder.EndObject() + + + +class MatrixDiagOptionsT(object): + + # MatrixDiagOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + matrixDiagOptions = MatrixDiagOptions() + matrixDiagOptions.Init(buf, pos) + return cls.InitFromObj(matrixDiagOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, matrixDiagOptions): + x = MatrixDiagOptionsT() + x._UnPack(matrixDiagOptions) + return x + + # MatrixDiagOptionsT + def _UnPack(self, matrixDiagOptions): + if matrixDiagOptions is None: + return + + # MatrixDiagOptionsT + def Pack(self, builder): + MatrixDiagOptionsStart(builder) + matrixDiagOptions = MatrixDiagOptionsEnd(builder) + return matrixDiagOptions + + +class QuantizeOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = QuantizeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsQuantizeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def QuantizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # QuantizeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def QuantizeOptionsStart(builder): + builder.StartObject(0) + +def QuantizeOptionsEnd(builder): + return builder.EndObject() + + + +class QuantizeOptionsT(object): + + # QuantizeOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + quantizeOptions = QuantizeOptions() + quantizeOptions.Init(buf, pos) + return cls.InitFromObj(quantizeOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, quantizeOptions): + x = QuantizeOptionsT() + x._UnPack(quantizeOptions) + return x + + # QuantizeOptionsT + def _UnPack(self, quantizeOptions): + if quantizeOptions is None: + return + + # QuantizeOptionsT + def Pack(self, builder): + QuantizeOptionsStart(builder) + quantizeOptions = QuantizeOptionsEnd(builder) + return quantizeOptions + + +class MatrixSetDiagOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = MatrixSetDiagOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsMatrixSetDiagOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def MatrixSetDiagOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # MatrixSetDiagOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def MatrixSetDiagOptionsStart(builder): + builder.StartObject(0) + +def MatrixSetDiagOptionsEnd(builder): + return builder.EndObject() + + + +class MatrixSetDiagOptionsT(object): + + # MatrixSetDiagOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + matrixSetDiagOptions = MatrixSetDiagOptions() + matrixSetDiagOptions.Init(buf, pos) + return cls.InitFromObj(matrixSetDiagOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, matrixSetDiagOptions): + x = MatrixSetDiagOptionsT() + x._UnPack(matrixSetDiagOptions) + return x + + # MatrixSetDiagOptionsT + def _UnPack(self, matrixSetDiagOptions): + if matrixSetDiagOptions is None: + return + + # MatrixSetDiagOptionsT + def Pack(self, builder): + MatrixSetDiagOptionsStart(builder) + matrixSetDiagOptions = MatrixSetDiagOptionsEnd(builder) + return matrixSetDiagOptions + + +class IfOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = IfOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsIfOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def IfOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # IfOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # IfOptions + def ThenSubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # IfOptions + def ElseSubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def IfOptionsStart(builder): + builder.StartObject(2) + +def IfOptionsAddThenSubgraphIndex(builder, thenSubgraphIndex): + builder.PrependInt32Slot(0, thenSubgraphIndex, 0) + +def IfOptionsAddElseSubgraphIndex(builder, elseSubgraphIndex): + builder.PrependInt32Slot(1, elseSubgraphIndex, 0) + +def IfOptionsEnd(builder): + return builder.EndObject() + + + +class IfOptionsT(object): + + # IfOptionsT + def __init__(self): + self.thenSubgraphIndex = 0 # type: int + self.elseSubgraphIndex = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + ifOptions = IfOptions() + ifOptions.Init(buf, pos) + return cls.InitFromObj(ifOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, ifOptions): + x = IfOptionsT() + x._UnPack(ifOptions) + return x + + # IfOptionsT + def _UnPack(self, ifOptions): + if ifOptions is None: + return + self.thenSubgraphIndex = ifOptions.ThenSubgraphIndex() + self.elseSubgraphIndex = ifOptions.ElseSubgraphIndex() + + # IfOptionsT + def Pack(self, builder): + IfOptionsStart(builder) + IfOptionsAddThenSubgraphIndex(builder, self.thenSubgraphIndex) + IfOptionsAddElseSubgraphIndex(builder, self.elseSubgraphIndex) + ifOptions = IfOptionsEnd(builder) + return ifOptions + + +class CallOnceOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = CallOnceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsCallOnceOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def CallOnceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # CallOnceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # CallOnceOptions + def InitSubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def CallOnceOptionsStart(builder): + builder.StartObject(1) + +def CallOnceOptionsAddInitSubgraphIndex(builder, initSubgraphIndex): + builder.PrependInt32Slot(0, initSubgraphIndex, 0) + +def CallOnceOptionsEnd(builder): + return builder.EndObject() + + + +class CallOnceOptionsT(object): + + # CallOnceOptionsT + def __init__(self): + self.initSubgraphIndex = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + callOnceOptions = CallOnceOptions() + callOnceOptions.Init(buf, pos) + return cls.InitFromObj(callOnceOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, callOnceOptions): + x = CallOnceOptionsT() + x._UnPack(callOnceOptions) + return x + + # CallOnceOptionsT + def _UnPack(self, callOnceOptions): + if callOnceOptions is None: + return + self.initSubgraphIndex = callOnceOptions.InitSubgraphIndex() + + # CallOnceOptionsT + def Pack(self, builder): + CallOnceOptionsStart(builder) + CallOnceOptionsAddInitSubgraphIndex(builder, self.initSubgraphIndex) + callOnceOptions = CallOnceOptionsEnd(builder) + return callOnceOptions + + +class WhileOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = WhileOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsWhileOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def WhileOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # WhileOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # WhileOptions + def CondSubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # WhileOptions + def BodySubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def WhileOptionsStart(builder): + builder.StartObject(2) + +def WhileOptionsAddCondSubgraphIndex(builder, condSubgraphIndex): + builder.PrependInt32Slot(0, condSubgraphIndex, 0) + +def WhileOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex): + builder.PrependInt32Slot(1, bodySubgraphIndex, 0) + +def WhileOptionsEnd(builder): + return builder.EndObject() + + + +class WhileOptionsT(object): + + # WhileOptionsT + def __init__(self): + self.condSubgraphIndex = 0 # type: int + self.bodySubgraphIndex = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + whileOptions = WhileOptions() + whileOptions.Init(buf, pos) + return cls.InitFromObj(whileOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, whileOptions): + x = WhileOptionsT() + x._UnPack(whileOptions) + return x + + # WhileOptionsT + def _UnPack(self, whileOptions): + if whileOptions is None: + return + self.condSubgraphIndex = whileOptions.CondSubgraphIndex() + self.bodySubgraphIndex = whileOptions.BodySubgraphIndex() + + # WhileOptionsT + def Pack(self, builder): + WhileOptionsStart(builder) + WhileOptionsAddCondSubgraphIndex(builder, self.condSubgraphIndex) + WhileOptionsAddBodySubgraphIndex(builder, self.bodySubgraphIndex) + whileOptions = WhileOptionsEnd(builder) + return whileOptions + + +class NonMaxSuppressionV4Options(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = NonMaxSuppressionV4Options() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsNonMaxSuppressionV4Options(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def NonMaxSuppressionV4OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # NonMaxSuppressionV4Options + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def NonMaxSuppressionV4OptionsStart(builder): + builder.StartObject(0) + +def NonMaxSuppressionV4OptionsEnd(builder): + return builder.EndObject() + + + +class NonMaxSuppressionV4OptionsT(object): + + # NonMaxSuppressionV4OptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + nonMaxSuppressionV4Options = NonMaxSuppressionV4Options() + nonMaxSuppressionV4Options.Init(buf, pos) + return cls.InitFromObj(nonMaxSuppressionV4Options) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, nonMaxSuppressionV4Options): + x = NonMaxSuppressionV4OptionsT() + x._UnPack(nonMaxSuppressionV4Options) + return x + + # NonMaxSuppressionV4OptionsT + def _UnPack(self, nonMaxSuppressionV4Options): + if nonMaxSuppressionV4Options is None: + return + + # NonMaxSuppressionV4OptionsT + def Pack(self, builder): + NonMaxSuppressionV4OptionsStart(builder) + nonMaxSuppressionV4Options = NonMaxSuppressionV4OptionsEnd(builder) + return nonMaxSuppressionV4Options + + +class NonMaxSuppressionV5Options(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = NonMaxSuppressionV5Options() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsNonMaxSuppressionV5Options(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def NonMaxSuppressionV5OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # NonMaxSuppressionV5Options + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def NonMaxSuppressionV5OptionsStart(builder): + builder.StartObject(0) + +def NonMaxSuppressionV5OptionsEnd(builder): + return builder.EndObject() + + + +class NonMaxSuppressionV5OptionsT(object): + + # NonMaxSuppressionV5OptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + nonMaxSuppressionV5Options = NonMaxSuppressionV5Options() + nonMaxSuppressionV5Options.Init(buf, pos) + return cls.InitFromObj(nonMaxSuppressionV5Options) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, nonMaxSuppressionV5Options): + x = NonMaxSuppressionV5OptionsT() + x._UnPack(nonMaxSuppressionV5Options) + return x + + # NonMaxSuppressionV5OptionsT + def _UnPack(self, nonMaxSuppressionV5Options): + if nonMaxSuppressionV5Options is None: + return + + # NonMaxSuppressionV5OptionsT + def Pack(self, builder): + NonMaxSuppressionV5OptionsStart(builder) + nonMaxSuppressionV5Options = NonMaxSuppressionV5OptionsEnd(builder) + return nonMaxSuppressionV5Options + + +class ScatterNdOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ScatterNdOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsScatterNdOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def ScatterNdOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ScatterNdOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def ScatterNdOptionsStart(builder): + builder.StartObject(0) + +def ScatterNdOptionsEnd(builder): + return builder.EndObject() + + + +class ScatterNdOptionsT(object): + + # ScatterNdOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + scatterNdOptions = ScatterNdOptions() + scatterNdOptions.Init(buf, pos) + return cls.InitFromObj(scatterNdOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, scatterNdOptions): + x = ScatterNdOptionsT() + x._UnPack(scatterNdOptions) + return x + + # ScatterNdOptionsT + def _UnPack(self, scatterNdOptions): + if scatterNdOptions is None: + return + + # ScatterNdOptionsT + def Pack(self, builder): + ScatterNdOptionsStart(builder) + scatterNdOptions = ScatterNdOptionsEnd(builder) + return scatterNdOptions + + +class SelectV2Options(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SelectV2Options() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSelectV2Options(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def SelectV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SelectV2Options + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def SelectV2OptionsStart(builder): + builder.StartObject(0) + +def SelectV2OptionsEnd(builder): + return builder.EndObject() + + + +class SelectV2OptionsT(object): + + # SelectV2OptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + selectV2Options = SelectV2Options() + selectV2Options.Init(buf, pos) + return cls.InitFromObj(selectV2Options) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, selectV2Options): + x = SelectV2OptionsT() + x._UnPack(selectV2Options) + return x + + # SelectV2OptionsT + def _UnPack(self, selectV2Options): + if selectV2Options is None: + return + + # SelectV2OptionsT + def Pack(self, builder): + SelectV2OptionsStart(builder) + selectV2Options = SelectV2OptionsEnd(builder) + return selectV2Options + + +class DensifyOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = DensifyOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsDensifyOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def DensifyOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # DensifyOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def DensifyOptionsStart(builder): + builder.StartObject(0) + +def DensifyOptionsEnd(builder): + return builder.EndObject() + + + +class DensifyOptionsT(object): + + # DensifyOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + densifyOptions = DensifyOptions() + densifyOptions.Init(buf, pos) + return cls.InitFromObj(densifyOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, densifyOptions): + x = DensifyOptionsT() + x._UnPack(densifyOptions) + return x + + # DensifyOptionsT + def _UnPack(self, densifyOptions): + if densifyOptions is None: + return + + # DensifyOptionsT + def Pack(self, builder): + DensifyOptionsStart(builder) + densifyOptions = DensifyOptionsEnd(builder) + return densifyOptions + + +class SegmentSumOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SegmentSumOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSegmentSumOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def SegmentSumOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SegmentSumOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def SegmentSumOptionsStart(builder): + builder.StartObject(0) + +def SegmentSumOptionsEnd(builder): + return builder.EndObject() + + + +class SegmentSumOptionsT(object): + + # SegmentSumOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + segmentSumOptions = SegmentSumOptions() + segmentSumOptions.Init(buf, pos) + return cls.InitFromObj(segmentSumOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, segmentSumOptions): + x = SegmentSumOptionsT() + x._UnPack(segmentSumOptions) + return x + + # SegmentSumOptionsT + def _UnPack(self, segmentSumOptions): + if segmentSumOptions is None: + return + + # SegmentSumOptionsT + def Pack(self, builder): + SegmentSumOptionsStart(builder) + segmentSumOptions = SegmentSumOptionsEnd(builder) + return segmentSumOptions + + +class BatchMatMulOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = BatchMatMulOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsBatchMatMulOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def BatchMatMulOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # BatchMatMulOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # BatchMatMulOptions + def AdjX(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # BatchMatMulOptions + def AdjY(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # BatchMatMulOptions + def AsymmetricQuantizeInputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def BatchMatMulOptionsStart(builder): + builder.StartObject(3) + +def BatchMatMulOptionsAddAdjX(builder, adjX): + builder.PrependBoolSlot(0, adjX, 0) + +def BatchMatMulOptionsAddAdjY(builder, adjY): + builder.PrependBoolSlot(1, adjY, 0) + +def BatchMatMulOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): + builder.PrependBoolSlot(2, asymmetricQuantizeInputs, 0) + +def BatchMatMulOptionsEnd(builder): + return builder.EndObject() + + + +class BatchMatMulOptionsT(object): + + # BatchMatMulOptionsT + def __init__(self): + self.adjX = False # type: bool + self.adjY = False # type: bool + self.asymmetricQuantizeInputs = False # type: bool + + @classmethod + def InitFromBuf(cls, buf, pos): + batchMatMulOptions = BatchMatMulOptions() + batchMatMulOptions.Init(buf, pos) + return cls.InitFromObj(batchMatMulOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, batchMatMulOptions): + x = BatchMatMulOptionsT() + x._UnPack(batchMatMulOptions) + return x + + # BatchMatMulOptionsT + def _UnPack(self, batchMatMulOptions): + if batchMatMulOptions is None: + return + self.adjX = batchMatMulOptions.AdjX() + self.adjY = batchMatMulOptions.AdjY() + self.asymmetricQuantizeInputs = batchMatMulOptions.AsymmetricQuantizeInputs() + + # BatchMatMulOptionsT + def Pack(self, builder): + BatchMatMulOptionsStart(builder) + BatchMatMulOptionsAddAdjX(builder, self.adjX) + BatchMatMulOptionsAddAdjY(builder, self.adjY) + BatchMatMulOptionsAddAsymmetricQuantizeInputs(builder, self.asymmetricQuantizeInputs) + batchMatMulOptions = BatchMatMulOptionsEnd(builder) + return batchMatMulOptions + + +class CumsumOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = CumsumOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsCumsumOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def CumsumOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # CumsumOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # CumsumOptions + def Exclusive(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # CumsumOptions + def Reverse(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def CumsumOptionsStart(builder): + builder.StartObject(2) + +def CumsumOptionsAddExclusive(builder, exclusive): + builder.PrependBoolSlot(0, exclusive, 0) + +def CumsumOptionsAddReverse(builder, reverse): + builder.PrependBoolSlot(1, reverse, 0) + +def CumsumOptionsEnd(builder): + return builder.EndObject() + + + +class CumsumOptionsT(object): + + # CumsumOptionsT + def __init__(self): + self.exclusive = False # type: bool + self.reverse = False # type: bool + + @classmethod + def InitFromBuf(cls, buf, pos): + cumsumOptions = CumsumOptions() + cumsumOptions.Init(buf, pos) + return cls.InitFromObj(cumsumOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, cumsumOptions): + x = CumsumOptionsT() + x._UnPack(cumsumOptions) + return x + + # CumsumOptionsT + def _UnPack(self, cumsumOptions): + if cumsumOptions is None: + return + self.exclusive = cumsumOptions.Exclusive() + self.reverse = cumsumOptions.Reverse() + + # CumsumOptionsT + def Pack(self, builder): + CumsumOptionsStart(builder) + CumsumOptionsAddExclusive(builder, self.exclusive) + CumsumOptionsAddReverse(builder, self.reverse) + cumsumOptions = CumsumOptionsEnd(builder) + return cumsumOptions + + +class BroadcastToOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = BroadcastToOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsBroadcastToOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def BroadcastToOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # BroadcastToOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def BroadcastToOptionsStart(builder): + builder.StartObject(0) + +def BroadcastToOptionsEnd(builder): + return builder.EndObject() + + + +class BroadcastToOptionsT(object): + + # BroadcastToOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + broadcastToOptions = BroadcastToOptions() + broadcastToOptions.Init(buf, pos) + return cls.InitFromObj(broadcastToOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, broadcastToOptions): + x = BroadcastToOptionsT() + x._UnPack(broadcastToOptions) + return x + + # BroadcastToOptionsT + def _UnPack(self, broadcastToOptions): + if broadcastToOptions is None: + return + + # BroadcastToOptionsT + def Pack(self, builder): + BroadcastToOptionsStart(builder) + broadcastToOptions = BroadcastToOptionsEnd(builder) + return broadcastToOptions + + +class Rfft2dOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Rfft2dOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsRfft2dOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def Rfft2dOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # Rfft2dOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def Rfft2dOptionsStart(builder): + builder.StartObject(0) + +def Rfft2dOptionsEnd(builder): + return builder.EndObject() + + + +class Rfft2dOptionsT(object): + + # Rfft2dOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + rfft2dOptions = Rfft2dOptions() + rfft2dOptions.Init(buf, pos) + return cls.InitFromObj(rfft2dOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, rfft2dOptions): + x = Rfft2dOptionsT() + x._UnPack(rfft2dOptions) + return x + + # Rfft2dOptionsT + def _UnPack(self, rfft2dOptions): + if rfft2dOptions is None: + return + + # Rfft2dOptionsT + def Pack(self, builder): + Rfft2dOptionsStart(builder) + rfft2dOptions = Rfft2dOptionsEnd(builder) + return rfft2dOptions + + +class HashtableOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = HashtableOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsHashtableOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def HashtableOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # HashtableOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # HashtableOptions + def TableId(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # HashtableOptions + def KeyDtype(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # HashtableOptions + def ValueDtype(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + +def HashtableOptionsStart(builder): + builder.StartObject(3) + +def HashtableOptionsAddTableId(builder, tableId): + builder.PrependInt32Slot(0, tableId, 0) + +def HashtableOptionsAddKeyDtype(builder, keyDtype): + builder.PrependInt8Slot(1, keyDtype, 0) + +def HashtableOptionsAddValueDtype(builder, valueDtype): + builder.PrependInt8Slot(2, valueDtype, 0) + +def HashtableOptionsEnd(builder): + return builder.EndObject() + + + +class HashtableOptionsT(object): + + # HashtableOptionsT + def __init__(self): + self.tableId = 0 # type: int + self.keyDtype = 0 # type: int + self.valueDtype = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + hashtableOptions = HashtableOptions() + hashtableOptions.Init(buf, pos) + return cls.InitFromObj(hashtableOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, hashtableOptions): + x = HashtableOptionsT() + x._UnPack(hashtableOptions) + return x + + # HashtableOptionsT + def _UnPack(self, hashtableOptions): + if hashtableOptions is None: + return + self.tableId = hashtableOptions.TableId() + self.keyDtype = hashtableOptions.KeyDtype() + self.valueDtype = hashtableOptions.ValueDtype() + + # HashtableOptionsT + def Pack(self, builder): + HashtableOptionsStart(builder) + HashtableOptionsAddTableId(builder, self.tableId) + HashtableOptionsAddKeyDtype(builder, self.keyDtype) + HashtableOptionsAddValueDtype(builder, self.valueDtype) + hashtableOptions = HashtableOptionsEnd(builder) + return hashtableOptions + + +class HashtableFindOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = HashtableFindOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsHashtableFindOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def HashtableFindOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # HashtableFindOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def HashtableFindOptionsStart(builder): + builder.StartObject(0) + +def HashtableFindOptionsEnd(builder): + return builder.EndObject() + + + +class HashtableFindOptionsT(object): + + # HashtableFindOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + hashtableFindOptions = HashtableFindOptions() + hashtableFindOptions.Init(buf, pos) + return cls.InitFromObj(hashtableFindOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, hashtableFindOptions): + x = HashtableFindOptionsT() + x._UnPack(hashtableFindOptions) + return x + + # HashtableFindOptionsT + def _UnPack(self, hashtableFindOptions): + if hashtableFindOptions is None: + return + + # HashtableFindOptionsT + def Pack(self, builder): + HashtableFindOptionsStart(builder) + hashtableFindOptions = HashtableFindOptionsEnd(builder) + return hashtableFindOptions + + +class HashtableImportOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = HashtableImportOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsHashtableImportOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def HashtableImportOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # HashtableImportOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def HashtableImportOptionsStart(builder): + builder.StartObject(0) + +def HashtableImportOptionsEnd(builder): + return builder.EndObject() + + + +class HashtableImportOptionsT(object): + + # HashtableImportOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + hashtableImportOptions = HashtableImportOptions() + hashtableImportOptions.Init(buf, pos) + return cls.InitFromObj(hashtableImportOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, hashtableImportOptions): + x = HashtableImportOptionsT() + x._UnPack(hashtableImportOptions) + return x + + # HashtableImportOptionsT + def _UnPack(self, hashtableImportOptions): + if hashtableImportOptions is None: + return + + # HashtableImportOptionsT + def Pack(self, builder): + HashtableImportOptionsStart(builder) + hashtableImportOptions = HashtableImportOptionsEnd(builder) + return hashtableImportOptions + + +class HashtableSizeOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = HashtableSizeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsHashtableSizeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def HashtableSizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # HashtableSizeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def HashtableSizeOptionsStart(builder): + builder.StartObject(0) + +def HashtableSizeOptionsEnd(builder): + return builder.EndObject() + + + +class HashtableSizeOptionsT(object): + + # HashtableSizeOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + hashtableSizeOptions = HashtableSizeOptions() + hashtableSizeOptions.Init(buf, pos) + return cls.InitFromObj(hashtableSizeOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, hashtableSizeOptions): + x = HashtableSizeOptionsT() + x._UnPack(hashtableSizeOptions) + return x + + # HashtableSizeOptionsT + def _UnPack(self, hashtableSizeOptions): + if hashtableSizeOptions is None: + return + + # HashtableSizeOptionsT + def Pack(self, builder): + HashtableSizeOptionsStart(builder) + hashtableSizeOptions = HashtableSizeOptionsEnd(builder) + return hashtableSizeOptions + + +class VarHandleOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = VarHandleOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsVarHandleOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def VarHandleOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # VarHandleOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # VarHandleOptions + def Container(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # VarHandleOptions + def SharedName(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + +def VarHandleOptionsStart(builder): + builder.StartObject(2) + +def VarHandleOptionsAddContainer(builder, container): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(container), 0) + +def VarHandleOptionsAddSharedName(builder, sharedName): + builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(sharedName), 0) + +def VarHandleOptionsEnd(builder): + return builder.EndObject() + + + +class VarHandleOptionsT(object): + + # VarHandleOptionsT + def __init__(self): + self.container = None # type: str + self.sharedName = None # type: str + + @classmethod + def InitFromBuf(cls, buf, pos): + varHandleOptions = VarHandleOptions() + varHandleOptions.Init(buf, pos) + return cls.InitFromObj(varHandleOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, varHandleOptions): + x = VarHandleOptionsT() + x._UnPack(varHandleOptions) + return x + + # VarHandleOptionsT + def _UnPack(self, varHandleOptions): + if varHandleOptions is None: + return + self.container = varHandleOptions.Container() + self.sharedName = varHandleOptions.SharedName() + + # VarHandleOptionsT + def Pack(self, builder): + if self.container is not None: + container = builder.CreateString(self.container) + if self.sharedName is not None: + sharedName = builder.CreateString(self.sharedName) + VarHandleOptionsStart(builder) + if self.container is not None: + VarHandleOptionsAddContainer(builder, container) + if self.sharedName is not None: + VarHandleOptionsAddSharedName(builder, sharedName) + varHandleOptions = VarHandleOptionsEnd(builder) + return varHandleOptions + + +class ReadVariableOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ReadVariableOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsReadVariableOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def ReadVariableOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ReadVariableOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def ReadVariableOptionsStart(builder): + builder.StartObject(0) + +def ReadVariableOptionsEnd(builder): + return builder.EndObject() + + + +class ReadVariableOptionsT(object): + + # ReadVariableOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + readVariableOptions = ReadVariableOptions() + readVariableOptions.Init(buf, pos) + return cls.InitFromObj(readVariableOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, readVariableOptions): + x = ReadVariableOptionsT() + x._UnPack(readVariableOptions) + return x + + # ReadVariableOptionsT + def _UnPack(self, readVariableOptions): + if readVariableOptions is None: + return + + # ReadVariableOptionsT + def Pack(self, builder): + ReadVariableOptionsStart(builder) + readVariableOptions = ReadVariableOptionsEnd(builder) + return readVariableOptions + + +class AssignVariableOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = AssignVariableOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsAssignVariableOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def AssignVariableOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # AssignVariableOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def AssignVariableOptionsStart(builder): + builder.StartObject(0) + +def AssignVariableOptionsEnd(builder): + return builder.EndObject() + + + +class AssignVariableOptionsT(object): + + # AssignVariableOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + assignVariableOptions = AssignVariableOptions() + assignVariableOptions.Init(buf, pos) + return cls.InitFromObj(assignVariableOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, assignVariableOptions): + x = AssignVariableOptionsT() + x._UnPack(assignVariableOptions) + return x + + # AssignVariableOptionsT + def _UnPack(self, assignVariableOptions): + if assignVariableOptions is None: + return + + # AssignVariableOptionsT + def Pack(self, builder): + AssignVariableOptionsStart(builder) + assignVariableOptions = AssignVariableOptionsEnd(builder) + return assignVariableOptions + + +class RandomOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = RandomOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsRandomOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def RandomOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # RandomOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # RandomOptions + def Seed(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + + # RandomOptions + def Seed2(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos) + return 0 + +def RandomOptionsStart(builder): + builder.StartObject(2) + +def RandomOptionsAddSeed(builder, seed): + builder.PrependInt64Slot(0, seed, 0) + +def RandomOptionsAddSeed2(builder, seed2): + builder.PrependInt64Slot(1, seed2, 0) + +def RandomOptionsEnd(builder): + return builder.EndObject() + + + +class RandomOptionsT(object): + + # RandomOptionsT + def __init__(self): + self.seed = 0 # type: int + self.seed2 = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + randomOptions = RandomOptions() + randomOptions.Init(buf, pos) + return cls.InitFromObj(randomOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, randomOptions): + x = RandomOptionsT() + x._UnPack(randomOptions) + return x + + # RandomOptionsT + def _UnPack(self, randomOptions): + if randomOptions is None: + return + self.seed = randomOptions.Seed() + self.seed2 = randomOptions.Seed2() + + # RandomOptionsT + def Pack(self, builder): + RandomOptionsStart(builder) + RandomOptionsAddSeed(builder, self.seed) + RandomOptionsAddSeed2(builder, self.seed2) + randomOptions = RandomOptionsEnd(builder) + return randomOptions + + +class BucketizeOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = BucketizeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsBucketizeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def BucketizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # BucketizeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # BucketizeOptions + def Boundaries(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # BucketizeOptions + def BoundariesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o) + return 0 + + # BucketizeOptions + def BoundariesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # BucketizeOptions + def BoundariesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + +def BucketizeOptionsStart(builder): + builder.StartObject(1) + +def BucketizeOptionsAddBoundaries(builder, boundaries): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(boundaries), 0) + +def BucketizeOptionsStartBoundariesVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def BucketizeOptionsEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class BucketizeOptionsT(object): + + # BucketizeOptionsT + def __init__(self): + self.boundaries = None # type: List[float] + + @classmethod + def InitFromBuf(cls, buf, pos): + bucketizeOptions = BucketizeOptions() + bucketizeOptions.Init(buf, pos) + return cls.InitFromObj(bucketizeOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, bucketizeOptions): + x = BucketizeOptionsT() + x._UnPack(bucketizeOptions) + return x + + # BucketizeOptionsT + def _UnPack(self, bucketizeOptions): + if bucketizeOptions is None: + return + if not bucketizeOptions.BoundariesIsNone(): + if np is None: + self.boundaries = [] + for i in range(bucketizeOptions.BoundariesLength()): + self.boundaries.append(bucketizeOptions.Boundaries(i)) + else: + self.boundaries = bucketizeOptions.BoundariesAsNumpy() + + # BucketizeOptionsT + def Pack(self, builder): + if self.boundaries is not None: + if np is not None and type(self.boundaries) is np.ndarray: + boundaries = builder.CreateNumpyVector(self.boundaries) + else: + BucketizeOptionsStartBoundariesVector(builder, len(self.boundaries)) + for i in reversed(range(len(self.boundaries))): + builder.PrependFloat32(self.boundaries[i]) + boundaries = builder.EndVector() + BucketizeOptionsStart(builder) + if self.boundaries is not None: + BucketizeOptionsAddBoundaries(builder, boundaries) + bucketizeOptions = BucketizeOptionsEnd(builder) + return bucketizeOptions + + +class GeluOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = GeluOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsGeluOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def GeluOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # GeluOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # GeluOptions + def Approximate(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def GeluOptionsStart(builder): + builder.StartObject(1) + +def GeluOptionsAddApproximate(builder, approximate): + builder.PrependBoolSlot(0, approximate, 0) + +def GeluOptionsEnd(builder): + return builder.EndObject() + + + +class GeluOptionsT(object): + + # GeluOptionsT + def __init__(self): + self.approximate = False # type: bool + + @classmethod + def InitFromBuf(cls, buf, pos): + geluOptions = GeluOptions() + geluOptions.Init(buf, pos) + return cls.InitFromObj(geluOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, geluOptions): + x = GeluOptionsT() + x._UnPack(geluOptions) + return x + + # GeluOptionsT + def _UnPack(self, geluOptions): + if geluOptions is None: + return + self.approximate = geluOptions.Approximate() + + # GeluOptionsT + def Pack(self, builder): + GeluOptionsStart(builder) + GeluOptionsAddApproximate(builder, self.approximate) + geluOptions = GeluOptionsEnd(builder) + return geluOptions + + +class DynamicUpdateSliceOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = DynamicUpdateSliceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsDynamicUpdateSliceOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def DynamicUpdateSliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # DynamicUpdateSliceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def DynamicUpdateSliceOptionsStart(builder): + builder.StartObject(0) + +def DynamicUpdateSliceOptionsEnd(builder): + return builder.EndObject() + + + +class DynamicUpdateSliceOptionsT(object): + + # DynamicUpdateSliceOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + dynamicUpdateSliceOptions = DynamicUpdateSliceOptions() + dynamicUpdateSliceOptions.Init(buf, pos) + return cls.InitFromObj(dynamicUpdateSliceOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, dynamicUpdateSliceOptions): + x = DynamicUpdateSliceOptionsT() + x._UnPack(dynamicUpdateSliceOptions) + return x + + # DynamicUpdateSliceOptionsT + def _UnPack(self, dynamicUpdateSliceOptions): + if dynamicUpdateSliceOptions is None: + return + + # DynamicUpdateSliceOptionsT + def Pack(self, builder): + DynamicUpdateSliceOptionsStart(builder) + dynamicUpdateSliceOptions = DynamicUpdateSliceOptionsEnd(builder) + return dynamicUpdateSliceOptions + + +class UnsortedSegmentProdOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = UnsortedSegmentProdOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsUnsortedSegmentProdOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def UnsortedSegmentProdOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # UnsortedSegmentProdOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def UnsortedSegmentProdOptionsStart(builder): + builder.StartObject(0) + +def UnsortedSegmentProdOptionsEnd(builder): + return builder.EndObject() + + + +class UnsortedSegmentProdOptionsT(object): + + # UnsortedSegmentProdOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + unsortedSegmentProdOptions = UnsortedSegmentProdOptions() + unsortedSegmentProdOptions.Init(buf, pos) + return cls.InitFromObj(unsortedSegmentProdOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, unsortedSegmentProdOptions): + x = UnsortedSegmentProdOptionsT() + x._UnPack(unsortedSegmentProdOptions) + return x + + # UnsortedSegmentProdOptionsT + def _UnPack(self, unsortedSegmentProdOptions): + if unsortedSegmentProdOptions is None: + return + + # UnsortedSegmentProdOptionsT + def Pack(self, builder): + UnsortedSegmentProdOptionsStart(builder) + unsortedSegmentProdOptions = UnsortedSegmentProdOptionsEnd(builder) + return unsortedSegmentProdOptions + + +class UnsortedSegmentMaxOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = UnsortedSegmentMaxOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsUnsortedSegmentMaxOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def UnsortedSegmentMaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # UnsortedSegmentMaxOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def UnsortedSegmentMaxOptionsStart(builder): + builder.StartObject(0) + +def UnsortedSegmentMaxOptionsEnd(builder): + return builder.EndObject() + + + +class UnsortedSegmentMaxOptionsT(object): + + # UnsortedSegmentMaxOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + unsortedSegmentMaxOptions = UnsortedSegmentMaxOptions() + unsortedSegmentMaxOptions.Init(buf, pos) + return cls.InitFromObj(unsortedSegmentMaxOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, unsortedSegmentMaxOptions): + x = UnsortedSegmentMaxOptionsT() + x._UnPack(unsortedSegmentMaxOptions) + return x + + # UnsortedSegmentMaxOptionsT + def _UnPack(self, unsortedSegmentMaxOptions): + if unsortedSegmentMaxOptions is None: + return + + # UnsortedSegmentMaxOptionsT + def Pack(self, builder): + UnsortedSegmentMaxOptionsStart(builder) + unsortedSegmentMaxOptions = UnsortedSegmentMaxOptionsEnd(builder) + return unsortedSegmentMaxOptions + + +class UnsortedSegmentSumOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = UnsortedSegmentSumOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsUnsortedSegmentSumOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def UnsortedSegmentSumOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # UnsortedSegmentSumOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def UnsortedSegmentSumOptionsStart(builder): + builder.StartObject(0) + +def UnsortedSegmentSumOptionsEnd(builder): + return builder.EndObject() + + + +class UnsortedSegmentSumOptionsT(object): + + # UnsortedSegmentSumOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + unsortedSegmentSumOptions = UnsortedSegmentSumOptions() + unsortedSegmentSumOptions.Init(buf, pos) + return cls.InitFromObj(unsortedSegmentSumOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, unsortedSegmentSumOptions): + x = UnsortedSegmentSumOptionsT() + x._UnPack(unsortedSegmentSumOptions) + return x + + # UnsortedSegmentSumOptionsT + def _UnPack(self, unsortedSegmentSumOptions): + if unsortedSegmentSumOptions is None: + return + + # UnsortedSegmentSumOptionsT + def Pack(self, builder): + UnsortedSegmentSumOptionsStart(builder) + unsortedSegmentSumOptions = UnsortedSegmentSumOptionsEnd(builder) + return unsortedSegmentSumOptions + + +class ATan2Options(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ATan2Options() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsATan2Options(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def ATan2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ATan2Options + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def ATan2OptionsStart(builder): + builder.StartObject(0) + +def ATan2OptionsEnd(builder): + return builder.EndObject() + + + +class ATan2OptionsT(object): + + # ATan2OptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + atan2Options = ATan2Options() + atan2Options.Init(buf, pos) + return cls.InitFromObj(atan2Options) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, atan2Options): + x = ATan2OptionsT() + x._UnPack(atan2Options) + return x + + # ATan2OptionsT + def _UnPack(self, atan2Options): + if atan2Options is None: + return + + # ATan2OptionsT + def Pack(self, builder): + ATan2OptionsStart(builder) + atan2Options = ATan2OptionsEnd(builder) + return atan2Options + + +class UnsortedSegmentMinOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = UnsortedSegmentMinOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsUnsortedSegmentMinOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def UnsortedSegmentMinOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # UnsortedSegmentMinOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def UnsortedSegmentMinOptionsStart(builder): + builder.StartObject(0) + +def UnsortedSegmentMinOptionsEnd(builder): + return builder.EndObject() + + + +class UnsortedSegmentMinOptionsT(object): + + # UnsortedSegmentMinOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + unsortedSegmentMinOptions = UnsortedSegmentMinOptions() + unsortedSegmentMinOptions.Init(buf, pos) + return cls.InitFromObj(unsortedSegmentMinOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, unsortedSegmentMinOptions): + x = UnsortedSegmentMinOptionsT() + x._UnPack(unsortedSegmentMinOptions) + return x + + # UnsortedSegmentMinOptionsT + def _UnPack(self, unsortedSegmentMinOptions): + if unsortedSegmentMinOptions is None: + return + + # UnsortedSegmentMinOptionsT + def Pack(self, builder): + UnsortedSegmentMinOptionsStart(builder) + unsortedSegmentMinOptions = UnsortedSegmentMinOptionsEnd(builder) + return unsortedSegmentMinOptions + + +class SignOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SignOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSignOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def SignOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SignOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def SignOptionsStart(builder): + builder.StartObject(0) + +def SignOptionsEnd(builder): + return builder.EndObject() + + + +class SignOptionsT(object): + + # SignOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + signOptions = SignOptions() + signOptions.Init(buf, pos) + return cls.InitFromObj(signOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, signOptions): + x = SignOptionsT() + x._UnPack(signOptions) + return x + + # SignOptionsT + def _UnPack(self, signOptions): + if signOptions is None: + return + + # SignOptionsT + def Pack(self, builder): + SignOptionsStart(builder) + signOptions = SignOptionsEnd(builder) + return signOptions + + +class BitcastOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = BitcastOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsBitcastOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def BitcastOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # BitcastOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def BitcastOptionsStart(builder): + builder.StartObject(0) + +def BitcastOptionsEnd(builder): + return builder.EndObject() + + + +class BitcastOptionsT(object): + + # BitcastOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + bitcastOptions = BitcastOptions() + bitcastOptions.Init(buf, pos) + return cls.InitFromObj(bitcastOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, bitcastOptions): + x = BitcastOptionsT() + x._UnPack(bitcastOptions) + return x + + # BitcastOptionsT + def _UnPack(self, bitcastOptions): + if bitcastOptions is None: + return + + # BitcastOptionsT + def Pack(self, builder): + BitcastOptionsStart(builder) + bitcastOptions = BitcastOptionsEnd(builder) + return bitcastOptions + + +class BitwiseXorOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = BitwiseXorOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsBitwiseXorOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def BitwiseXorOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # BitwiseXorOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def BitwiseXorOptionsStart(builder): + builder.StartObject(0) + +def BitwiseXorOptionsEnd(builder): + return builder.EndObject() + + + +class BitwiseXorOptionsT(object): + + # BitwiseXorOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + bitwiseXorOptions = BitwiseXorOptions() + bitwiseXorOptions.Init(buf, pos) + return cls.InitFromObj(bitwiseXorOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, bitwiseXorOptions): + x = BitwiseXorOptionsT() + x._UnPack(bitwiseXorOptions) + return x + + # BitwiseXorOptionsT + def _UnPack(self, bitwiseXorOptions): + if bitwiseXorOptions is None: + return + + # BitwiseXorOptionsT + def Pack(self, builder): + BitwiseXorOptionsStart(builder) + bitwiseXorOptions = BitwiseXorOptionsEnd(builder) + return bitwiseXorOptions + + +class RightShiftOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = RightShiftOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsRightShiftOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def RightShiftOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # RightShiftOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def RightShiftOptionsStart(builder): + builder.StartObject(0) + +def RightShiftOptionsEnd(builder): + return builder.EndObject() + + + +class RightShiftOptionsT(object): + + # RightShiftOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + rightShiftOptions = RightShiftOptions() + rightShiftOptions.Init(buf, pos) + return cls.InitFromObj(rightShiftOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, rightShiftOptions): + x = RightShiftOptionsT() + x._UnPack(rightShiftOptions) + return x + + # RightShiftOptionsT + def _UnPack(self, rightShiftOptions): + if rightShiftOptions is None: + return + + # RightShiftOptionsT + def Pack(self, builder): + RightShiftOptionsStart(builder) + rightShiftOptions = RightShiftOptionsEnd(builder) + return rightShiftOptions + + +class DilateOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = DilateOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsDilateOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def DilateOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # DilateOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def DilateOptionsStart(builder): + builder.StartObject(0) + +def DilateOptionsEnd(builder): + return builder.EndObject() + + + +class DilateOptionsT(object): + + # DilateOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + dilateOptions = DilateOptions() + dilateOptions.Init(buf, pos) + return cls.InitFromObj(dilateOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, dilateOptions): + x = DilateOptionsT() + x._UnPack(dilateOptions) + return x + + # DilateOptionsT + def _UnPack(self, dilateOptions): + if dilateOptions is None: + return + + # DilateOptionsT + def Pack(self, builder): + DilateOptionsStart(builder) + dilateOptions = DilateOptionsEnd(builder) + return dilateOptions + + +class ReduceWindowOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ReduceWindowOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsReduceWindowOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def ReduceWindowOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ReduceWindowOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ReduceWindowOptions + def ReduceFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def ReduceWindowOptionsStart(builder): + builder.StartObject(1) + +def ReduceWindowOptionsAddReduceFunction(builder, reduceFunction): + builder.PrependInt32Slot(0, reduceFunction, 0) + +def ReduceWindowOptionsEnd(builder): + return builder.EndObject() + + + +class ReduceWindowOptionsT(object): + + # ReduceWindowOptionsT + def __init__(self): + self.reduceFunction = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + reduceWindowOptions = ReduceWindowOptions() + reduceWindowOptions.Init(buf, pos) + return cls.InitFromObj(reduceWindowOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, reduceWindowOptions): + x = ReduceWindowOptionsT() + x._UnPack(reduceWindowOptions) + return x + + # ReduceWindowOptionsT + def _UnPack(self, reduceWindowOptions): + if reduceWindowOptions is None: + return + self.reduceFunction = reduceWindowOptions.ReduceFunction() + + # ReduceWindowOptionsT + def Pack(self, builder): + ReduceWindowOptionsStart(builder) + ReduceWindowOptionsAddReduceFunction(builder, self.reduceFunction) + reduceWindowOptions = ReduceWindowOptionsEnd(builder) + return reduceWindowOptions + + +class OperatorCode(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = OperatorCode() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsOperatorCode(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def OperatorCodeBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # OperatorCode + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # OperatorCode + def DeprecatedBuiltinCode(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # OperatorCode + def CustomCode(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # OperatorCode + def Version(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 1 + + # OperatorCode + def BuiltinCode(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def OperatorCodeStart(builder): + builder.StartObject(4) + +def OperatorCodeAddDeprecatedBuiltinCode(builder, deprecatedBuiltinCode): + builder.PrependInt8Slot(0, deprecatedBuiltinCode, 0) + +def OperatorCodeAddCustomCode(builder, customCode): + builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(customCode), 0) + +def OperatorCodeAddVersion(builder, version): + builder.PrependInt32Slot(2, version, 1) + +def OperatorCodeAddBuiltinCode(builder, builtinCode): + builder.PrependInt32Slot(3, builtinCode, 0) + +def OperatorCodeEnd(builder): + return builder.EndObject() + + + +class OperatorCodeT(object): + + # OperatorCodeT + def __init__(self): + self.deprecatedBuiltinCode = 0 # type: int + self.customCode = None # type: str + self.version = 1 # type: int + self.builtinCode = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + operatorCode = OperatorCode() + operatorCode.Init(buf, pos) + return cls.InitFromObj(operatorCode) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, operatorCode): + x = OperatorCodeT() + x._UnPack(operatorCode) + return x + + # OperatorCodeT + def _UnPack(self, operatorCode): + if operatorCode is None: + return + self.deprecatedBuiltinCode = operatorCode.DeprecatedBuiltinCode() + self.customCode = operatorCode.CustomCode() + self.version = operatorCode.Version() + self.builtinCode = operatorCode.BuiltinCode() + + # OperatorCodeT + def Pack(self, builder): + if self.customCode is not None: + customCode = builder.CreateString(self.customCode) + OperatorCodeStart(builder) + OperatorCodeAddDeprecatedBuiltinCode(builder, self.deprecatedBuiltinCode) + if self.customCode is not None: + OperatorCodeAddCustomCode(builder, customCode) + OperatorCodeAddVersion(builder, self.version) + OperatorCodeAddBuiltinCode(builder, self.builtinCode) + operatorCode = OperatorCodeEnd(builder) + return operatorCode + + +class StableHLOCompositeOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StableHLOCompositeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStableHLOCompositeOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def StableHLOCompositeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # StableHLOCompositeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StableHLOCompositeOptions + def Name(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # StableHLOCompositeOptions + def DecompositionSubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # StableHLOCompositeOptions + def CompositeAttributes(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1)) + return 0 + + # StableHLOCompositeOptions + def CompositeAttributesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o) + return 0 + + # StableHLOCompositeOptions + def CompositeAttributesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # StableHLOCompositeOptions + def CompositeAttributesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + # StableHLOCompositeOptions + def CompositeAttributesFormat(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # StableHLOCompositeOptions + def Version(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def StableHLOCompositeOptionsStart(builder): + builder.StartObject(5) + +def StableHLOCompositeOptionsAddName(builder, name): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0) + +def StableHLOCompositeOptionsAddDecompositionSubgraphIndex(builder, decompositionSubgraphIndex): + builder.PrependInt32Slot(1, decompositionSubgraphIndex, 0) + +def StableHLOCompositeOptionsAddCompositeAttributes(builder, compositeAttributes): + builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(compositeAttributes), 0) + +def StableHLOCompositeOptionsStartCompositeAttributesVector(builder, numElems): + return builder.StartVector(1, numElems, 1) + +def StableHLOCompositeOptionsAddCompositeAttributesFormat(builder, compositeAttributesFormat): + builder.PrependInt8Slot(3, compositeAttributesFormat, 0) + +def StableHLOCompositeOptionsAddVersion(builder, version): + builder.PrependInt32Slot(4, version, 0) + +def StableHLOCompositeOptionsEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class StableHLOCompositeOptionsT(object): + + # StableHLOCompositeOptionsT + def __init__(self): + self.name = None # type: str + self.decompositionSubgraphIndex = 0 # type: int + self.compositeAttributes = None # type: List[int] + self.compositeAttributesFormat = 0 # type: int + self.version = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + stableHlocompositeOptions = StableHLOCompositeOptions() + stableHlocompositeOptions.Init(buf, pos) + return cls.InitFromObj(stableHlocompositeOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, stableHlocompositeOptions): + x = StableHLOCompositeOptionsT() + x._UnPack(stableHlocompositeOptions) + return x + + # StableHLOCompositeOptionsT + def _UnPack(self, stableHlocompositeOptions): + if stableHlocompositeOptions is None: + return + self.name = stableHlocompositeOptions.Name() + self.decompositionSubgraphIndex = stableHlocompositeOptions.DecompositionSubgraphIndex() + if not stableHlocompositeOptions.CompositeAttributesIsNone(): + if np is None: + self.compositeAttributes = [] + for i in range(stableHlocompositeOptions.CompositeAttributesLength()): + self.compositeAttributes.append(stableHlocompositeOptions.CompositeAttributes(i)) + else: + self.compositeAttributes = stableHlocompositeOptions.CompositeAttributesAsNumpy() + self.compositeAttributesFormat = stableHlocompositeOptions.CompositeAttributesFormat() + self.version = stableHlocompositeOptions.Version() + + # StableHLOCompositeOptionsT + def Pack(self, builder): + if self.name is not None: + name = builder.CreateString(self.name) + if self.compositeAttributes is not None: + if np is not None and type(self.compositeAttributes) is np.ndarray: + compositeAttributes = builder.CreateNumpyVector(self.compositeAttributes) + else: + StableHLOCompositeOptionsStartCompositeAttributesVector(builder, len(self.compositeAttributes)) + for i in reversed(range(len(self.compositeAttributes))): + builder.PrependUint8(self.compositeAttributes[i]) + compositeAttributes = builder.EndVector() + StableHLOCompositeOptionsStart(builder) + if self.name is not None: + StableHLOCompositeOptionsAddName(builder, name) + StableHLOCompositeOptionsAddDecompositionSubgraphIndex(builder, self.decompositionSubgraphIndex) + if self.compositeAttributes is not None: + StableHLOCompositeOptionsAddCompositeAttributes(builder, compositeAttributes) + StableHLOCompositeOptionsAddCompositeAttributesFormat(builder, self.compositeAttributesFormat) + StableHLOCompositeOptionsAddVersion(builder, self.version) + stableHlocompositeOptions = StableHLOCompositeOptionsEnd(builder) + return stableHlocompositeOptions + + +class StablehloShiftLeftOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StablehloShiftLeftOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsStablehloShiftLeftOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def StablehloShiftLeftOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # StablehloShiftLeftOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def StablehloShiftLeftOptionsStart(builder): + builder.StartObject(0) + +def StablehloShiftLeftOptionsEnd(builder): + return builder.EndObject() + + + +class StablehloShiftLeftOptionsT(object): + + # StablehloShiftLeftOptionsT + def __init__(self): + pass + + @classmethod + def InitFromBuf(cls, buf, pos): + stablehloShiftLeftOptions = StablehloShiftLeftOptions() + stablehloShiftLeftOptions.Init(buf, pos) + return cls.InitFromObj(stablehloShiftLeftOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, stablehloShiftLeftOptions): + x = StablehloShiftLeftOptionsT() + x._UnPack(stablehloShiftLeftOptions) + return x + + # StablehloShiftLeftOptionsT + def _UnPack(self, stablehloShiftLeftOptions): + if stablehloShiftLeftOptions is None: + return + + # StablehloShiftLeftOptionsT + def Pack(self, builder): + StablehloShiftLeftOptionsStart(builder) + stablehloShiftLeftOptions = StablehloShiftLeftOptionsEnd(builder) + return stablehloShiftLeftOptions + + +class Operator(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Operator() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsOperator(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def OperatorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # Operator + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Operator + def OpcodeIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) + return 0 + + # Operator + def Inputs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # Operator + def InputsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Operator + def InputsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Operator + def InputsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # Operator + def Outputs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # Operator + def OutputsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Operator + def OutputsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Operator + def OutputsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + # Operator + def BuiltinOptionsType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) + return 0 + + # Operator + def BuiltinOptions(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + from flatbuffers.table import Table + obj = Table(bytearray(), 0) + self._tab.Union(obj, o) + return obj + return None + + # Operator + def CustomOptions(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1)) + return 0 + + # Operator + def CustomOptionsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o) + return 0 + + # Operator + def CustomOptionsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Operator + def CustomOptionsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + return o == 0 + + # Operator + def CustomOptionsFormat(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # Operator + def MutatingVariableInputs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.BoolFlags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1)) + return 0 + + # Operator + def MutatingVariableInputsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.BoolFlags, o) + return 0 + + # Operator + def MutatingVariableInputsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Operator + def MutatingVariableInputsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + return o == 0 + + # Operator + def Intermediates(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # Operator + def IntermediatesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Operator + def IntermediatesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Operator + def IntermediatesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) + return o == 0 + + # Operator + def LargeCustomOptionsOffset(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos) + return 0 + + # Operator + def LargeCustomOptionsSize(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos) + return 0 + + # Operator + def BuiltinOptions2Type(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) + return 0 + + # Operator + def BuiltinOptions2(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(28)) + if o != 0: + from flatbuffers.table import Table + obj = Table(bytearray(), 0) + self._tab.Union(obj, o) + return obj + return None + + # Operator + def DebugMetadataIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return -1 + +def OperatorStart(builder): + builder.StartObject(14) + +def OperatorAddOpcodeIndex(builder, opcodeIndex): + builder.PrependUint32Slot(0, opcodeIndex, 0) + +def OperatorAddInputs(builder, inputs): + builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0) + +def OperatorStartInputsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def OperatorAddOutputs(builder, outputs): + builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0) + +def OperatorStartOutputsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def OperatorAddBuiltinOptionsType(builder, builtinOptionsType): + builder.PrependUint8Slot(3, builtinOptionsType, 0) + +def OperatorAddBuiltinOptions(builder, builtinOptions): + builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(builtinOptions), 0) + +def OperatorAddCustomOptions(builder, customOptions): + builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(customOptions), 0) + +def OperatorStartCustomOptionsVector(builder, numElems): + return builder.StartVector(1, numElems, 1) + +def OperatorAddCustomOptionsFormat(builder, customOptionsFormat): + builder.PrependInt8Slot(6, customOptionsFormat, 0) + +def OperatorAddMutatingVariableInputs(builder, mutatingVariableInputs): + builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(mutatingVariableInputs), 0) + +def OperatorStartMutatingVariableInputsVector(builder, numElems): + return builder.StartVector(1, numElems, 1) + +def OperatorAddIntermediates(builder, intermediates): + builder.PrependUOffsetTRelativeSlot(8, flatbuffers.number_types.UOffsetTFlags.py_type(intermediates), 0) + +def OperatorStartIntermediatesVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def OperatorAddLargeCustomOptionsOffset(builder, largeCustomOptionsOffset): + builder.PrependUint64Slot(9, largeCustomOptionsOffset, 0) + +def OperatorAddLargeCustomOptionsSize(builder, largeCustomOptionsSize): + builder.PrependUint64Slot(10, largeCustomOptionsSize, 0) + +def OperatorAddBuiltinOptions2Type(builder, builtinOptions2Type): + builder.PrependUint8Slot(11, builtinOptions2Type, 0) + +def OperatorAddBuiltinOptions2(builder, builtinOptions2): + builder.PrependUOffsetTRelativeSlot(12, flatbuffers.number_types.UOffsetTFlags.py_type(builtinOptions2), 0) + +def OperatorAddDebugMetadataIndex(builder, debugMetadataIndex): + builder.PrependInt32Slot(13, debugMetadataIndex, -1) + +def OperatorEnd(builder): + return builder.EndObject() + + +try: + from typing import List, Union +except: + pass + +class OperatorT(object): + + # OperatorT + def __init__(self): + self.opcodeIndex = 0 # type: int + self.inputs = None # type: List[int] + self.outputs = None # type: List[int] + self.builtinOptionsType = 0 # type: int + self.builtinOptions = None # type: Union[None, Conv2DOptionsT, DepthwiseConv2DOptionsT, ConcatEmbeddingsOptionsT, LSHProjectionOptionsT, Pool2DOptionsT, SVDFOptionsT, RNNOptionsT, FullyConnectedOptionsT, SoftmaxOptionsT, ConcatenationOptionsT, AddOptionsT, L2NormOptionsT, LocalResponseNormalizationOptionsT, LSTMOptionsT, ResizeBilinearOptionsT, CallOptionsT, ReshapeOptionsT, SkipGramOptionsT, SpaceToDepthOptionsT, EmbeddingLookupSparseOptionsT, MulOptionsT, PadOptionsT, GatherOptionsT, BatchToSpaceNDOptionsT, SpaceToBatchNDOptionsT, TransposeOptionsT, ReducerOptionsT, SubOptionsT, DivOptionsT, SqueezeOptionsT, SequenceRNNOptionsT, StridedSliceOptionsT, ExpOptionsT, TopKV2OptionsT, SplitOptionsT, LogSoftmaxOptionsT, CastOptionsT, DequantizeOptionsT, MaximumMinimumOptionsT, ArgMaxOptionsT, LessOptionsT, NegOptionsT, PadV2OptionsT, GreaterOptionsT, GreaterEqualOptionsT, LessEqualOptionsT, SelectOptionsT, SliceOptionsT, TransposeConvOptionsT, SparseToDenseOptionsT, TileOptionsT, ExpandDimsOptionsT, EqualOptionsT, NotEqualOptionsT, ShapeOptionsT, PowOptionsT, ArgMinOptionsT, FakeQuantOptionsT, PackOptionsT, LogicalOrOptionsT, OneHotOptionsT, LogicalAndOptionsT, LogicalNotOptionsT, UnpackOptionsT, FloorDivOptionsT, SquareOptionsT, ZerosLikeOptionsT, FillOptionsT, BidirectionalSequenceLSTMOptionsT, BidirectionalSequenceRNNOptionsT, UnidirectionalSequenceLSTMOptionsT, FloorModOptionsT, RangeOptionsT, ResizeNearestNeighborOptionsT, LeakyReluOptionsT, SquaredDifferenceOptionsT, MirrorPadOptionsT, AbsOptionsT, SplitVOptionsT, UniqueOptionsT, ReverseV2OptionsT, AddNOptionsT, GatherNdOptionsT, CosOptionsT, WhereOptionsT, RankOptionsT, ReverseSequenceOptionsT, MatrixDiagOptionsT, QuantizeOptionsT, MatrixSetDiagOptionsT, HardSwishOptionsT, IfOptionsT, WhileOptionsT, DepthToSpaceOptionsT, NonMaxSuppressionV4OptionsT, NonMaxSuppressionV5OptionsT, ScatterNdOptionsT, SelectV2OptionsT, DensifyOptionsT, SegmentSumOptionsT, BatchMatMulOptionsT, CumsumOptionsT, CallOnceOptionsT, BroadcastToOptionsT, Rfft2dOptionsT, Conv3DOptionsT, HashtableOptionsT, HashtableFindOptionsT, HashtableImportOptionsT, HashtableSizeOptionsT, VarHandleOptionsT, ReadVariableOptionsT, AssignVariableOptionsT, RandomOptionsT, BucketizeOptionsT, GeluOptionsT, DynamicUpdateSliceOptionsT, UnsortedSegmentProdOptionsT, UnsortedSegmentMaxOptionsT, UnsortedSegmentMinOptionsT, UnsortedSegmentSumOptionsT, ATan2OptionsT, SignOptionsT, BitcastOptionsT, BitwiseXorOptionsT, RightShiftOptionsT] + self.customOptions = None # type: List[int] + self.customOptionsFormat = 0 # type: int + self.mutatingVariableInputs = None # type: List[bool] + self.intermediates = None # type: List[int] + self.largeCustomOptionsOffset = 0 # type: int + self.largeCustomOptionsSize = 0 # type: int + self.builtinOptions2Type = 0 # type: int + self.builtinOptions2 = None # type: Union[None, StablehloConcatenateOptionsT, StablehloBroadcastInDimOptionsT, StablehloSliceOptionsT, StablehloConvolutionOptionsT, StablehloCustomCallOptionsT, StablehloReduceOptionsT, StablehloScatterOptionsT, StablehloCompareOptionsT, StablehloDynamicSliceOptionsT, StablehloPadOptionsT, StablehloIotaOptionsT, StablehloDotGeneralOptionsT, StablehloReduceWindowOptionsT, StablehloSortOptionsT, StablehloWhileOptionsT, StablehloGatherOptionsT, StablehloTransposeOptionsT, DilateOptionsT, StablehloRngBitGeneratorOptionsT, ReduceWindowOptionsT, StableHLOCompositeOptionsT, StablehloShiftLeftOptionsT] + self.debugMetadataIndex = -1 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + operator = Operator() + operator.Init(buf, pos) + return cls.InitFromObj(operator) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, operator): + x = OperatorT() + x._UnPack(operator) + return x + + # OperatorT + def _UnPack(self, operator): + if operator is None: + return + self.opcodeIndex = operator.OpcodeIndex() + if not operator.InputsIsNone(): + if np is None: + self.inputs = [] + for i in range(operator.InputsLength()): + self.inputs.append(operator.Inputs(i)) + else: + self.inputs = operator.InputsAsNumpy() + if not operator.OutputsIsNone(): + if np is None: + self.outputs = [] + for i in range(operator.OutputsLength()): + self.outputs.append(operator.Outputs(i)) + else: + self.outputs = operator.OutputsAsNumpy() + self.builtinOptionsType = operator.BuiltinOptionsType() + self.builtinOptions = BuiltinOptionsCreator(self.builtinOptionsType, operator.BuiltinOptions()) + if not operator.CustomOptionsIsNone(): + if np is None: + self.customOptions = [] + for i in range(operator.CustomOptionsLength()): + self.customOptions.append(operator.CustomOptions(i)) + else: + self.customOptions = operator.CustomOptionsAsNumpy() + self.customOptionsFormat = operator.CustomOptionsFormat() + if not operator.MutatingVariableInputsIsNone(): + if np is None: + self.mutatingVariableInputs = [] + for i in range(operator.MutatingVariableInputsLength()): + self.mutatingVariableInputs.append(operator.MutatingVariableInputs(i)) + else: + self.mutatingVariableInputs = operator.MutatingVariableInputsAsNumpy() + if not operator.IntermediatesIsNone(): + if np is None: + self.intermediates = [] + for i in range(operator.IntermediatesLength()): + self.intermediates.append(operator.Intermediates(i)) + else: + self.intermediates = operator.IntermediatesAsNumpy() + self.largeCustomOptionsOffset = operator.LargeCustomOptionsOffset() + self.largeCustomOptionsSize = operator.LargeCustomOptionsSize() + self.builtinOptions2Type = operator.BuiltinOptions2Type() + self.builtinOptions2 = BuiltinOptions2Creator(self.builtinOptions2Type, operator.BuiltinOptions2()) + self.debugMetadataIndex = operator.DebugMetadataIndex() + + # OperatorT + def Pack(self, builder): + if self.inputs is not None: + if np is not None and type(self.inputs) is np.ndarray: + inputs = builder.CreateNumpyVector(self.inputs) + else: + OperatorStartInputsVector(builder, len(self.inputs)) + for i in reversed(range(len(self.inputs))): + builder.PrependInt32(self.inputs[i]) + inputs = builder.EndVector() + if self.outputs is not None: + if np is not None and type(self.outputs) is np.ndarray: + outputs = builder.CreateNumpyVector(self.outputs) + else: + OperatorStartOutputsVector(builder, len(self.outputs)) + for i in reversed(range(len(self.outputs))): + builder.PrependInt32(self.outputs[i]) + outputs = builder.EndVector() + if self.builtinOptions is not None: + builtinOptions = self.builtinOptions.Pack(builder) + if self.customOptions is not None: + if np is not None and type(self.customOptions) is np.ndarray: + customOptions = builder.CreateNumpyVector(self.customOptions) + else: + OperatorStartCustomOptionsVector(builder, len(self.customOptions)) + for i in reversed(range(len(self.customOptions))): + builder.PrependUint8(self.customOptions[i]) + customOptions = builder.EndVector() + if self.mutatingVariableInputs is not None: + if np is not None and type(self.mutatingVariableInputs) is np.ndarray: + mutatingVariableInputs = builder.CreateNumpyVector(self.mutatingVariableInputs) + else: + OperatorStartMutatingVariableInputsVector(builder, len(self.mutatingVariableInputs)) + for i in reversed(range(len(self.mutatingVariableInputs))): + builder.PrependBool(self.mutatingVariableInputs[i]) + mutatingVariableInputs = builder.EndVector() + if self.intermediates is not None: + if np is not None and type(self.intermediates) is np.ndarray: + intermediates = builder.CreateNumpyVector(self.intermediates) + else: + OperatorStartIntermediatesVector(builder, len(self.intermediates)) + for i in reversed(range(len(self.intermediates))): + builder.PrependInt32(self.intermediates[i]) + intermediates = builder.EndVector() + if self.builtinOptions2 is not None: + builtinOptions2 = self.builtinOptions2.Pack(builder) + OperatorStart(builder) + OperatorAddOpcodeIndex(builder, self.opcodeIndex) + if self.inputs is not None: + OperatorAddInputs(builder, inputs) + if self.outputs is not None: + OperatorAddOutputs(builder, outputs) + OperatorAddBuiltinOptionsType(builder, self.builtinOptionsType) + if self.builtinOptions is not None: + OperatorAddBuiltinOptions(builder, builtinOptions) + if self.customOptions is not None: + OperatorAddCustomOptions(builder, customOptions) + OperatorAddCustomOptionsFormat(builder, self.customOptionsFormat) + if self.mutatingVariableInputs is not None: + OperatorAddMutatingVariableInputs(builder, mutatingVariableInputs) + if self.intermediates is not None: + OperatorAddIntermediates(builder, intermediates) + OperatorAddLargeCustomOptionsOffset(builder, self.largeCustomOptionsOffset) + OperatorAddLargeCustomOptionsSize(builder, self.largeCustomOptionsSize) + OperatorAddBuiltinOptions2Type(builder, self.builtinOptions2Type) + if self.builtinOptions2 is not None: + OperatorAddBuiltinOptions2(builder, builtinOptions2) + OperatorAddDebugMetadataIndex(builder, self.debugMetadataIndex) + operator = OperatorEnd(builder) + return operator + + +class SubGraph(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SubGraph() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSubGraph(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def SubGraphBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SubGraph + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SubGraph + def Tensors(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + obj = Tensor() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # SubGraph + def TensorsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SubGraph + def TensorsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # SubGraph + def Inputs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # SubGraph + def InputsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # SubGraph + def InputsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SubGraph + def InputsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # SubGraph + def Outputs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # SubGraph + def OutputsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # SubGraph + def OutputsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SubGraph + def OutputsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + # SubGraph + def Operators(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + obj = Operator() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # SubGraph + def OperatorsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SubGraph + def OperatorsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + return o == 0 + + # SubGraph + def Name(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # SubGraph + def DebugMetadataIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return -1 + +def SubGraphStart(builder): + builder.StartObject(6) + +def SubGraphAddTensors(builder, tensors): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(tensors), 0) + +def SubGraphStartTensorsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def SubGraphAddInputs(builder, inputs): + builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0) + +def SubGraphStartInputsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def SubGraphAddOutputs(builder, outputs): + builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0) + +def SubGraphStartOutputsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def SubGraphAddOperators(builder, operators): + builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(operators), 0) + +def SubGraphStartOperatorsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def SubGraphAddName(builder, name): + builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0) + +def SubGraphAddDebugMetadataIndex(builder, debugMetadataIndex): + builder.PrependInt32Slot(5, debugMetadataIndex, -1) + +def SubGraphEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class SubGraphT(object): + + # SubGraphT + def __init__(self): + self.tensors = None # type: List[TensorT] + self.inputs = None # type: List[int] + self.outputs = None # type: List[int] + self.operators = None # type: List[OperatorT] + self.name = None # type: str + self.debugMetadataIndex = -1 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + subGraph = SubGraph() + subGraph.Init(buf, pos) + return cls.InitFromObj(subGraph) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, subGraph): + x = SubGraphT() + x._UnPack(subGraph) + return x + + # SubGraphT + def _UnPack(self, subGraph): + if subGraph is None: + return + if not subGraph.TensorsIsNone(): + self.tensors = [] + for i in range(subGraph.TensorsLength()): + if subGraph.Tensors(i) is None: + self.tensors.append(None) + else: + tensor_ = TensorT.InitFromObj(subGraph.Tensors(i)) + self.tensors.append(tensor_) + if not subGraph.InputsIsNone(): + if np is None: + self.inputs = [] + for i in range(subGraph.InputsLength()): + self.inputs.append(subGraph.Inputs(i)) + else: + self.inputs = subGraph.InputsAsNumpy() + if not subGraph.OutputsIsNone(): + if np is None: + self.outputs = [] + for i in range(subGraph.OutputsLength()): + self.outputs.append(subGraph.Outputs(i)) + else: + self.outputs = subGraph.OutputsAsNumpy() + if not subGraph.OperatorsIsNone(): + self.operators = [] + for i in range(subGraph.OperatorsLength()): + if subGraph.Operators(i) is None: + self.operators.append(None) + else: + operator_ = OperatorT.InitFromObj(subGraph.Operators(i)) + self.operators.append(operator_) + self.name = subGraph.Name() + self.debugMetadataIndex = subGraph.DebugMetadataIndex() + + # SubGraphT + def Pack(self, builder): + if self.tensors is not None: + tensorslist = [] + for i in range(len(self.tensors)): + tensorslist.append(self.tensors[i].Pack(builder)) + SubGraphStartTensorsVector(builder, len(self.tensors)) + for i in reversed(range(len(self.tensors))): + builder.PrependUOffsetTRelative(tensorslist[i]) + tensors = builder.EndVector() + if self.inputs is not None: + if np is not None and type(self.inputs) is np.ndarray: + inputs = builder.CreateNumpyVector(self.inputs) + else: + SubGraphStartInputsVector(builder, len(self.inputs)) + for i in reversed(range(len(self.inputs))): + builder.PrependInt32(self.inputs[i]) + inputs = builder.EndVector() + if self.outputs is not None: + if np is not None and type(self.outputs) is np.ndarray: + outputs = builder.CreateNumpyVector(self.outputs) + else: + SubGraphStartOutputsVector(builder, len(self.outputs)) + for i in reversed(range(len(self.outputs))): + builder.PrependInt32(self.outputs[i]) + outputs = builder.EndVector() + if self.operators is not None: + operatorslist = [] + for i in range(len(self.operators)): + operatorslist.append(self.operators[i].Pack(builder)) + SubGraphStartOperatorsVector(builder, len(self.operators)) + for i in reversed(range(len(self.operators))): + builder.PrependUOffsetTRelative(operatorslist[i]) + operators = builder.EndVector() + if self.name is not None: + name = builder.CreateString(self.name) + SubGraphStart(builder) + if self.tensors is not None: + SubGraphAddTensors(builder, tensors) + if self.inputs is not None: + SubGraphAddInputs(builder, inputs) + if self.outputs is not None: + SubGraphAddOutputs(builder, outputs) + if self.operators is not None: + SubGraphAddOperators(builder, operators) + if self.name is not None: + SubGraphAddName(builder, name) + SubGraphAddDebugMetadataIndex(builder, self.debugMetadataIndex) + subGraph = SubGraphEnd(builder) + return subGraph + + +class Buffer(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Buffer() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsBuffer(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def BufferBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # Buffer + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Buffer + def Data(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1)) + return 0 + + # Buffer + def DataAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o) + return 0 + + # Buffer + def DataLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Buffer + def DataIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # Buffer + def Offset(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos) + return 0 + + # Buffer + def Size(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos) + return 0 + +def BufferStart(builder): + builder.StartObject(3) + +def BufferAddData(builder, data): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(data), 0) + +def BufferStartDataVector(builder, numElems): + return builder.StartVector(1, numElems, 1) + +def BufferAddOffset(builder, offset): + builder.PrependUint64Slot(1, offset, 0) + +def BufferAddSize(builder, size): + builder.PrependUint64Slot(2, size, 0) + +def BufferEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class BufferT(object): + + # BufferT + def __init__(self): + self.data = None # type: List[int] + self.offset = 0 # type: int + self.size = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + buffer = Buffer() + buffer.Init(buf, pos) + return cls.InitFromObj(buffer) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, buffer): + x = BufferT() + x._UnPack(buffer) + return x + + # BufferT + def _UnPack(self, buffer): + if buffer is None: + return + if not buffer.DataIsNone(): + if np is None: + self.data = [] + for i in range(buffer.DataLength()): + self.data.append(buffer.Data(i)) + else: + self.data = buffer.DataAsNumpy() + self.offset = buffer.Offset() + self.size = buffer.Size() + + # BufferT + def Pack(self, builder): + if self.data is not None: + if np is not None and type(self.data) is np.ndarray: + data = builder.CreateNumpyVector(self.data) + else: + BufferStartDataVector(builder, len(self.data)) + for i in reversed(range(len(self.data))): + builder.PrependUint8(self.data[i]) + data = builder.EndVector() + BufferStart(builder) + if self.data is not None: + BufferAddData(builder, data) + BufferAddOffset(builder, self.offset) + BufferAddSize(builder, self.size) + buffer = BufferEnd(builder) + return buffer + + +class Metadata(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Metadata() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsMetadata(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def MetadataBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # Metadata + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Metadata + def Name(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # Metadata + def Buffer(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) + return 0 + +def MetadataStart(builder): + builder.StartObject(2) + +def MetadataAddName(builder, name): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0) + +def MetadataAddBuffer(builder, buffer): + builder.PrependUint32Slot(1, buffer, 0) + +def MetadataEnd(builder): + return builder.EndObject() + + + +class MetadataT(object): + + # MetadataT + def __init__(self): + self.name = None # type: str + self.buffer = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + metadata = Metadata() + metadata.Init(buf, pos) + return cls.InitFromObj(metadata) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, metadata): + x = MetadataT() + x._UnPack(metadata) + return x + + # MetadataT + def _UnPack(self, metadata): + if metadata is None: + return + self.name = metadata.Name() + self.buffer = metadata.Buffer() + + # MetadataT + def Pack(self, builder): + if self.name is not None: + name = builder.CreateString(self.name) + MetadataStart(builder) + if self.name is not None: + MetadataAddName(builder, name) + MetadataAddBuffer(builder, self.buffer) + metadata = MetadataEnd(builder) + return metadata + + +class TensorMap(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = TensorMap() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsTensorMap(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def TensorMapBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # TensorMap + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # TensorMap + def Name(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # TensorMap + def TensorIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) + return 0 + +def TensorMapStart(builder): + builder.StartObject(2) + +def TensorMapAddName(builder, name): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0) + +def TensorMapAddTensorIndex(builder, tensorIndex): + builder.PrependUint32Slot(1, tensorIndex, 0) + +def TensorMapEnd(builder): + return builder.EndObject() + + + +class TensorMapT(object): + + # TensorMapT + def __init__(self): + self.name = None # type: str + self.tensorIndex = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + tensorMap = TensorMap() + tensorMap.Init(buf, pos) + return cls.InitFromObj(tensorMap) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, tensorMap): + x = TensorMapT() + x._UnPack(tensorMap) + return x + + # TensorMapT + def _UnPack(self, tensorMap): + if tensorMap is None: + return + self.name = tensorMap.Name() + self.tensorIndex = tensorMap.TensorIndex() + + # TensorMapT + def Pack(self, builder): + if self.name is not None: + name = builder.CreateString(self.name) + TensorMapStart(builder) + if self.name is not None: + TensorMapAddName(builder, name) + TensorMapAddTensorIndex(builder, self.tensorIndex) + tensorMap = TensorMapEnd(builder) + return tensorMap + + +class SignatureDef(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SignatureDef() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSignatureDef(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def SignatureDefBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SignatureDef + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SignatureDef + def Inputs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + obj = TensorMap() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # SignatureDef + def InputsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SignatureDef + def InputsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # SignatureDef + def Outputs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + obj = TensorMap() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # SignatureDef + def OutputsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SignatureDef + def OutputsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # SignatureDef + def SignatureKey(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # SignatureDef + def SubgraphIndex(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) + return 0 + +def SignatureDefStart(builder): + builder.StartObject(5) + +def SignatureDefAddInputs(builder, inputs): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0) + +def SignatureDefStartInputsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def SignatureDefAddOutputs(builder, outputs): + builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0) + +def SignatureDefStartOutputsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def SignatureDefAddSignatureKey(builder, signatureKey): + builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(signatureKey), 0) + +def SignatureDefAddSubgraphIndex(builder, subgraphIndex): + builder.PrependUint32Slot(4, subgraphIndex, 0) + +def SignatureDefEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class SignatureDefT(object): + + # SignatureDefT + def __init__(self): + self.inputs = None # type: List[TensorMapT] + self.outputs = None # type: List[TensorMapT] + self.signatureKey = None # type: str + self.subgraphIndex = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + signatureDef = SignatureDef() + signatureDef.Init(buf, pos) + return cls.InitFromObj(signatureDef) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, signatureDef): + x = SignatureDefT() + x._UnPack(signatureDef) + return x + + # SignatureDefT + def _UnPack(self, signatureDef): + if signatureDef is None: + return + if not signatureDef.InputsIsNone(): + self.inputs = [] + for i in range(signatureDef.InputsLength()): + if signatureDef.Inputs(i) is None: + self.inputs.append(None) + else: + tensorMap_ = TensorMapT.InitFromObj(signatureDef.Inputs(i)) + self.inputs.append(tensorMap_) + if not signatureDef.OutputsIsNone(): + self.outputs = [] + for i in range(signatureDef.OutputsLength()): + if signatureDef.Outputs(i) is None: + self.outputs.append(None) + else: + tensorMap_ = TensorMapT.InitFromObj(signatureDef.Outputs(i)) + self.outputs.append(tensorMap_) + self.signatureKey = signatureDef.SignatureKey() + self.subgraphIndex = signatureDef.SubgraphIndex() + + # SignatureDefT + def Pack(self, builder): + if self.inputs is not None: + inputslist = [] + for i in range(len(self.inputs)): + inputslist.append(self.inputs[i].Pack(builder)) + SignatureDefStartInputsVector(builder, len(self.inputs)) + for i in reversed(range(len(self.inputs))): + builder.PrependUOffsetTRelative(inputslist[i]) + inputs = builder.EndVector() + if self.outputs is not None: + outputslist = [] + for i in range(len(self.outputs)): + outputslist.append(self.outputs[i].Pack(builder)) + SignatureDefStartOutputsVector(builder, len(self.outputs)) + for i in reversed(range(len(self.outputs))): + builder.PrependUOffsetTRelative(outputslist[i]) + outputs = builder.EndVector() + if self.signatureKey is not None: + signatureKey = builder.CreateString(self.signatureKey) + SignatureDefStart(builder) + if self.inputs is not None: + SignatureDefAddInputs(builder, inputs) + if self.outputs is not None: + SignatureDefAddOutputs(builder, outputs) + if self.signatureKey is not None: + SignatureDefAddSignatureKey(builder, signatureKey) + SignatureDefAddSubgraphIndex(builder, self.subgraphIndex) + signatureDef = SignatureDefEnd(builder) + return signatureDef + + +class Model(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Model() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsModel(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + @classmethod + def ModelBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # Model + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Model + def Version(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) + return 0 + + # Model + def OperatorCodes(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + obj = OperatorCode() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # Model + def OperatorCodesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Model + def OperatorCodesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # Model + def Subgraphs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + obj = SubGraph() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # Model + def SubgraphsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Model + def SubgraphsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + # Model + def Description(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # Model + def Buffers(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + obj = Buffer() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # Model + def BuffersLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Model + def BuffersIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + return o == 0 + + # Model + def MetadataBuffer(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # Model + def MetadataBufferAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Model + def MetadataBufferLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Model + def MetadataBufferIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + return o == 0 + + # Model + def Metadata(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + obj = Metadata() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # Model + def MetadataLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Model + def MetadataIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + return o == 0 + + # Model + def SignatureDefs(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + obj = SignatureDef() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # Model + def SignatureDefsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Model + def SignatureDefsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + return o == 0 + +def ModelStart(builder): + builder.StartObject(8) + +def ModelAddVersion(builder, version): + builder.PrependUint32Slot(0, version, 0) + +def ModelAddOperatorCodes(builder, operatorCodes): + builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(operatorCodes), 0) + +def ModelStartOperatorCodesVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def ModelAddSubgraphs(builder, subgraphs): + builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(subgraphs), 0) + +def ModelStartSubgraphsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def ModelAddDescription(builder, description): + builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(description), 0) + +def ModelAddBuffers(builder, buffers): + builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(buffers), 0) + +def ModelStartBuffersVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def ModelAddMetadataBuffer(builder, metadataBuffer): + builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(metadataBuffer), 0) + +def ModelStartMetadataBufferVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def ModelAddMetadata(builder, metadata): + builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(metadata), 0) + +def ModelStartMetadataVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def ModelAddSignatureDefs(builder, signatureDefs): + builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(signatureDefs), 0) + +def ModelStartSignatureDefsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def ModelEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class ModelT(object): + + # ModelT + def __init__(self): + self.version = 0 # type: int + self.operatorCodes = None # type: List[OperatorCodeT] + self.subgraphs = None # type: List[SubGraphT] + self.description = None # type: str + self.buffers = None # type: List[BufferT] + self.metadataBuffer = None # type: List[int] + self.metadata = None # type: List[MetadataT] + self.signatureDefs = None # type: List[SignatureDefT] + + @classmethod + def InitFromBuf(cls, buf, pos): + model = Model() + model.Init(buf, pos) + return cls.InitFromObj(model) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, model): + x = ModelT() + x._UnPack(model) + return x + + # ModelT + def _UnPack(self, model): + if model is None: + return + self.version = model.Version() + if not model.OperatorCodesIsNone(): + self.operatorCodes = [] + for i in range(model.OperatorCodesLength()): + if model.OperatorCodes(i) is None: + self.operatorCodes.append(None) + else: + operatorCode_ = OperatorCodeT.InitFromObj(model.OperatorCodes(i)) + self.operatorCodes.append(operatorCode_) + if not model.SubgraphsIsNone(): + self.subgraphs = [] + for i in range(model.SubgraphsLength()): + if model.Subgraphs(i) is None: + self.subgraphs.append(None) + else: + subGraph_ = SubGraphT.InitFromObj(model.Subgraphs(i)) + self.subgraphs.append(subGraph_) + self.description = model.Description() + if not model.BuffersIsNone(): + self.buffers = [] + for i in range(model.BuffersLength()): + if model.Buffers(i) is None: + self.buffers.append(None) + else: + buffer_ = BufferT.InitFromObj(model.Buffers(i)) + self.buffers.append(buffer_) + if not model.MetadataBufferIsNone(): + if np is None: + self.metadataBuffer = [] + for i in range(model.MetadataBufferLength()): + self.metadataBuffer.append(model.MetadataBuffer(i)) + else: + self.metadataBuffer = model.MetadataBufferAsNumpy() + if not model.MetadataIsNone(): + self.metadata = [] + for i in range(model.MetadataLength()): + if model.Metadata(i) is None: + self.metadata.append(None) + else: + metadata_ = MetadataT.InitFromObj(model.Metadata(i)) + self.metadata.append(metadata_) + if not model.SignatureDefsIsNone(): + self.signatureDefs = [] + for i in range(model.SignatureDefsLength()): + if model.SignatureDefs(i) is None: + self.signatureDefs.append(None) + else: + signatureDef_ = SignatureDefT.InitFromObj(model.SignatureDefs(i)) + self.signatureDefs.append(signatureDef_) + + # ModelT + def Pack(self, builder): + if self.operatorCodes is not None: + operatorCodeslist = [] + for i in range(len(self.operatorCodes)): + operatorCodeslist.append(self.operatorCodes[i].Pack(builder)) + ModelStartOperatorCodesVector(builder, len(self.operatorCodes)) + for i in reversed(range(len(self.operatorCodes))): + builder.PrependUOffsetTRelative(operatorCodeslist[i]) + operatorCodes = builder.EndVector() + if self.subgraphs is not None: + subgraphslist = [] + for i in range(len(self.subgraphs)): + subgraphslist.append(self.subgraphs[i].Pack(builder)) + ModelStartSubgraphsVector(builder, len(self.subgraphs)) + for i in reversed(range(len(self.subgraphs))): + builder.PrependUOffsetTRelative(subgraphslist[i]) + subgraphs = builder.EndVector() + if self.description is not None: + description = builder.CreateString(self.description) + if self.buffers is not None: + bufferslist = [] + for i in range(len(self.buffers)): + bufferslist.append(self.buffers[i].Pack(builder)) + ModelStartBuffersVector(builder, len(self.buffers)) + for i in reversed(range(len(self.buffers))): + builder.PrependUOffsetTRelative(bufferslist[i]) + buffers = builder.EndVector() + if self.metadataBuffer is not None: + if np is not None and type(self.metadataBuffer) is np.ndarray: + metadataBuffer = builder.CreateNumpyVector(self.metadataBuffer) + else: + ModelStartMetadataBufferVector(builder, len(self.metadataBuffer)) + for i in reversed(range(len(self.metadataBuffer))): + builder.PrependInt32(self.metadataBuffer[i]) + metadataBuffer = builder.EndVector() + if self.metadata is not None: + metadatalist = [] + for i in range(len(self.metadata)): + metadatalist.append(self.metadata[i].Pack(builder)) + ModelStartMetadataVector(builder, len(self.metadata)) + for i in reversed(range(len(self.metadata))): + builder.PrependUOffsetTRelative(metadatalist[i]) + metadata = builder.EndVector() + if self.signatureDefs is not None: + signatureDefslist = [] + for i in range(len(self.signatureDefs)): + signatureDefslist.append(self.signatureDefs[i].Pack(builder)) + ModelStartSignatureDefsVector(builder, len(self.signatureDefs)) + for i in reversed(range(len(self.signatureDefs))): + builder.PrependUOffsetTRelative(signatureDefslist[i]) + signatureDefs = builder.EndVector() + ModelStart(builder) + ModelAddVersion(builder, self.version) + if self.operatorCodes is not None: + ModelAddOperatorCodes(builder, operatorCodes) + if self.subgraphs is not None: + ModelAddSubgraphs(builder, subgraphs) + if self.description is not None: + ModelAddDescription(builder, description) + if self.buffers is not None: + ModelAddBuffers(builder, buffers) + if self.metadataBuffer is not None: + ModelAddMetadataBuffer(builder, metadataBuffer) + if self.metadata is not None: + ModelAddMetadata(builder, metadata) + if self.signatureDefs is not None: + ModelAddSignatureDefs(builder, signatureDefs) + model = ModelEnd(builder) + return model + + diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/tflite_keras_util.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/tflite_keras_util.py new file mode 100644 index 0000000000000000000000000000000000000000..4c1f79a1fd5a9ddf7e75a110e60690de47790a53 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/tflite_keras_util.py @@ -0,0 +1,229 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Keras functions required by TensorFlow Lite. + +The functions defined in this library have been copied over from Keras in order +to remove the dependency from TensorFlow Lite to Keras. The functions which +could not be copied over are accessed using the dependency inversion principle. +(for details, refer to tensorflow/python/util/keras_deps.py). +""" + +import copy + +from tensorflow.python.eager import def_function +from tensorflow.python.framework import tensor_spec +from tensorflow.python.util import keras_deps +from tensorflow.python.util import nest +from tensorflow.python.util.compat import collections_abc + + +def _enforce_names_consistency(specs): + """Enforces that either all specs have names or none do.""" + + def _has_name(spec): + return hasattr(spec, 'name') and spec.name is not None + + def _clear_name(spec): + spec = copy.deepcopy(spec) + if hasattr(spec, 'name'): + spec._name = None # pylint:disable=protected-access + return spec + + flat_specs = nest.flatten(specs) + name_inconsistency = ( + any(_has_name(s) for s in flat_specs) and + not all(_has_name(s) for s in flat_specs)) + + if name_inconsistency: + specs = nest.map_structure(_clear_name, specs) + return specs + + +def get_save_spec(model): + """Returns the save spec of the subclassing keras model.""" + shapes_dict = getattr(model, '_build_shapes_dict', None) + if not shapes_dict: + return None + + if 'input_shape' not in shapes_dict: + raise ValueError( + 'Model {} cannot be saved because the input shapes have not been set.' + ) + + input_shape = shapes_dict['input_shape'] + if isinstance(input_shape, tuple): + shape = input_shape + shape = (None,) + shape[1:] + return tensor_spec.TensorSpec( + shape=shape, dtype=model.input_dtype + ) + elif isinstance(input_shape, dict): + specs = {} + for key, shape in input_shape.items(): + shape = (None,) + shape[1:] + specs[key] = tensor_spec.TensorSpec( + shape=shape, dtype=model.input_dtype, name=key + ) + return specs + elif isinstance(input_shape, list): + specs = [] + for shape in input_shape: + shape = (None,) + shape[1:] + specs.append(tensor_spec.TensorSpec(shape=shape, dtype=model.input_dtype)) + return specs + + +def model_input_signature(model, keep_original_batch_size=False): + """Inspect model to get its input signature. + + The model's input signature is a list with a single (possibly-nested) object. + This is due to the Keras-enforced restriction that tensor inputs must be + passed in as the first argument. + + For example, a model with input {'feature1': , 'feature2': } + will have input signature: [{'feature1': TensorSpec, 'feature2': TensorSpec}] + + Args: + model: Keras Model object. + keep_original_batch_size: A boolean indicating whether we want to keep using + the original batch size or set it to None. Default is `False`, which means + that the batch dim of the returned input signature will always be set to + `None`. + + Returns: + A list containing either a single TensorSpec or an object with nested + TensorSpecs. This list does not contain the `training` argument. + """ + if hasattr(model, 'save_spec'): + input_specs = model.save_spec(dynamic_batch=not keep_original_batch_size) + if input_specs is None: + return None + # The model's save spec returns (args, kwargs). Extract the first input arg + # to use as the input spec. + # TODO(b/188105669): Add support for multiple tensor arguments. + input_specs = input_specs[0][0] + else: + input_specs = model._get_save_spec( # pylint: disable=protected-access + dynamic_batch=not keep_original_batch_size) + if input_specs is None: + return None + input_specs = _enforce_names_consistency(input_specs) + # Return a list with a single element as the model's input signature. + if isinstance(input_specs, + collections_abc.Sequence) and len(input_specs) == 1: + # Note that the isinstance check filters out single-element dictionaries, + # which should also be wrapped as a single-element list. + return input_specs + else: + return [input_specs] + + +def raise_model_input_error(model): + raise ValueError( + 'Model {} cannot be saved because the input shapes have not been ' + 'set. Usually, input shapes are automatically determined from calling' + ' `.fit()` or `.predict()`. To manually set the shapes, call ' + '`model.build(input_shape)`.'.format(model)) + + +def _create_pseudo_names(tensors, prefix): + """Creates pseudo {input | output} names for subclassed Models. + + Warning: this function should only be used to define default + names for `Metics` and `SavedModel`. No other use cases should + rely on a `Model`'s input or output names. + + Example with dict: + + `{'a': [x1, x2], 'b': x3}` becomes: + `['a_1', 'a_2', 'b']` + + Example with list: + + `[x, y]` becomes: + `['output_1', 'output_2']` + + Args: + tensors: `Model`'s outputs or inputs. + prefix: 'output_' for outputs, 'input_' for inputs. + + Returns: + Flattened list of pseudo names. + """ + + def one_index(ele): + # Start with "output_1" instead of "output_0". + if isinstance(ele, int): + return ele + 1 + return ele + + flat_paths = list(nest.yield_flat_paths(tensors)) + flat_paths = nest.map_structure(one_index, flat_paths) + names = [] + for path in flat_paths: + if not path: + name = prefix + '1' # Single output. + else: + name = '_'.join(str(p) for p in path) + if isinstance(path[0], int): + name = prefix + name + names.append(name) + return names + + +def create_pseudo_output_names(outputs): + """Create pseudo output names for a subclassed Model.""" + return _create_pseudo_names(outputs, prefix='output_') + + +def trace_model_call(model, input_signature=None): + """Trace the model call to create a tf.function for exporting a Keras model. + + Args: + model: A Keras model. + input_signature: optional, a list of tf.TensorSpec objects specifying the + inputs to the model. + + Returns: + A tf.function wrapping the model's call function with input signatures set. + + Raises: + ValueError: if input signature cannot be inferred from the model. + """ + if input_signature is None: + if isinstance(model.call, def_function.Function): + input_signature = model.call.input_signature + + if input_signature is None: + input_signature = model_input_signature(model) + + if input_signature is None: + raise_model_input_error(model) + + @def_function.function(input_signature=input_signature, autograph=False) + def _wrapped_model(*args): + """A concrete tf.function that wraps the model's call function.""" + # When given a single input, Keras models will call the model on the tensor + # rather than a list consisting of the single tensor. + inputs = args[0] if len(input_signature) == 1 else list(args) + + with keras_deps.get_call_context_function()().enter( + model, inputs=inputs, build_graph=False, training=False, saving=True): + outputs = model(inputs, training=False) + + return outputs + + return _wrapped_model diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/toco/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/toco/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/toco/toco_flags_pb2.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/toco/toco_flags_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..25a0505e618c8f54fc4c98b34b8f91e1cd72ec17 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/toco/toco_flags_pb2.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorflow/lite/toco/toco_flags.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from tensorflow.compiler.mlir.lite.debug import debug_options_pb2 as tensorflow_dot_compiler_dot_mlir_dot_lite_dot_debug_dot_debug__options__pb2 +from tensorflow.compiler.mlir.quantization.stablehlo import quantization_config_pb2 as tensorflow_dot_compiler_dot_mlir_dot_quantization_dot_stablehlo_dot_quantization__config__pb2 +from tensorflow.compiler.mlir.quantization.stablehlo import quantization_options_pb2 as tensorflow_dot_compiler_dot_mlir_dot_quantization_dot_stablehlo_dot_quantization__options__pb2 +from tensorflow.lite.toco import types_pb2 as tensorflow_dot_lite_dot_toco_dot_types__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n%tensorflow/lite/toco/toco_flags.proto\x12\x04toco\x1a\x37tensorflow/compiler/mlir/lite/debug/debug_options.proto\x1aItensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto\x1aJtensorflow/compiler/mlir/quantization/stablehlo/quantization_options.proto\x1a tensorflow/lite/toco/types.proto\"\x94\x14\n\tTocoFlags\x12&\n\x0cinput_format\x18\x01 \x01(\x0e\x32\x10.toco.FileFormat\x12\'\n\routput_format\x18\x02 \x01(\x0e\x32\x10.toco.FileFormat\x12.\n\x14inference_input_type\x18\x0b \x01(\x0e\x32\x10.toco.IODataType\x12(\n\x0einference_type\x18\x04 \x01(\x0e\x32\x10.toco.IODataType\x12\x1a\n\x12\x64\x65\x66\x61ult_ranges_min\x18\x05 \x01(\x02\x12\x1a\n\x12\x64\x65\x66\x61ult_ranges_max\x18\x06 \x01(\x02\x12 \n\x18\x64\x65\x66\x61ult_int16_ranges_min\x18\x0f \x01(\x02\x12 \n\x18\x64\x65\x66\x61ult_int16_ranges_max\x18\x10 \x01(\x02\x12\x17\n\x0f\x64rop_fake_quant\x18\x07 \x01(\x08\x12!\n\x19reorder_across_fake_quant\x18\x08 \x01(\x08\x12\x18\n\x10\x61llow_custom_ops\x18\n \x01(\x08\x12\x1f\n\x17\x64rop_control_dependency\x18\x0c \x01(\x08\x12+\n#debug_disable_recurrent_cell_fusion\x18\r \x01(\x08\x12%\n\x1dpropagate_fake_quant_num_bits\x18\x0e \x01(\x08\x12\x35\n-allow_nudging_weights_to_use_fast_gemm_kernel\x18\x11 \x01(\x08\x12\'\n\x1b\x64\x65\x64upe_array_min_size_bytes\x18\x12 \x01(\x03:\x02\x36\x34\x12&\n\x18split_tflite_lstm_inputs\x18\x13 \x01(\x08:\x04true\x12\x1f\n\x10quantize_weights\x18\x14 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x11\x64ump_graphviz_dir\x18\x18 \x01(\t\x12#\n\x1b\x64ump_graphviz_include_video\x18\x19 \x01(\x08\x12%\n\x16post_training_quantize\x18\x1a \x01(\x08:\x05\x66\x61lse\x12#\n\x14\x65nable_select_tf_ops\x18\x1b \x01(\x08:\x05\x66\x61lse\x12\"\n\x13\x66orce_select_tf_ops\x18\x1c \x01(\x08:\x05\x66\x61lse\x12\"\n\x13quantize_to_float16\x18\x1d \x01(\x08:\x05\x66\x61lse\x12#\n\x15\x61llow_dynamic_tensors\x18\x1e \x01(\x08:\x04true\x12\x1e\n\x16\x63onversion_summary_dir\x18\x1f \x01(\t\x12\x19\n\rcustom_opdefs\x18 \x03(\tB\x02\x18\x01\x12\x1a\n\x12select_user_tf_ops\x18! \x03(\t\x12.\n enable_tflite_resource_variables\x18\" \x01(\x08:\x04true\x12!\n\x12unfold_batchmatmul\x18# \x01(\x08:\x05\x66\x61lse\x12#\n\x15lower_tensor_list_ops\x18$ \x01(\x08:\x04true\x12+\n\x11\x61\x63\x63umulation_type\x18% \x01(\x0e\x32\x10.toco.IODataType\x12\x1d\n\x0e\x61llow_bfloat16\x18& \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x17\x61llow_all_select_tf_ops\x18\' \x01(\x08\x12*\n\x1bunfold_large_splat_constant\x18( \x01(\x08:\x05\x66\x61lse\x12\x1a\n\x12supported_backends\x18) \x03(\t\x12\x39\n*default_to_single_batch_in_tensor_list_ops\x18* \x01(\x08:\x05\x66\x61lse\x12/\n disable_per_channel_quantization\x18+ \x01(\x08:\x05\x66\x61lse\x12\x32\n#enable_mlir_dynamic_range_quantizer\x18, \x01(\x08:\x05\x66\x61lse\x12\x1c\n\x14tf_quantization_mode\x18- \x01(\t\x12)\n\x1a\x64isable_infer_tensor_range\x18. \x01(\x08:\x05\x66\x61lse\x12&\n\x17use_fake_quant_num_bits\x18/ \x01(\x08:\x05\x66\x61lse\x12*\n\x1b\x65nable_dynamic_update_slice\x18\x30 \x01(\x08:\x05\x66\x61lse\x12!\n\x12preserve_assert_op\x18\x31 \x01(\x08:\x05\x66\x61lse\x12*\n\x1bguarantee_all_funcs_one_use\x18\x32 \x01(\x08:\x05\x66\x61lse\x12#\n\x14\x63onvert_to_stablehlo\x18\x33 \x01(\x08:\x05\x66\x61lse\x12\x30\n!enable_mlir_variable_quantization\x18\x34 \x01(\x08:\x05\x66\x61lse\x12&\n\x17\x64isable_fuse_mul_and_fc\x18\x35 \x01(\x08:\x05\x66\x61lse\x12M\n\x14quantization_options\x18\x36 \x01(\x0b\x32+.stablehlo.quantization.QuantizationOptionsB\x02\x18\x01\x12.\n\x1b\x65nable_hlo_to_tf_conversion\x18\x37 \x01(\x08:\x05\x66\x61lseB\x02\x18\x01\x12\x39\n\rdebug_options\x18\x38 \x01(\x0b\x32\".tensorflow.converter.DebugOptions\x12 \n\x11use_buffer_offset\x18\x39 \x01(\x08:\x05\x66\x61lse\x12.\n\x1flegalize_custom_tensor_list_ops\x18: \x01(\x08:\x05\x66\x61lse\x12$\n\x15reduce_type_precision\x18; \x01(\x08:\x05\x66\x61lse\x12!\n\x13qdq_conversion_mode\x18< \x01(\t:\x04NONE\x12G\n\x13quantization_config\x18= \x01(\x0b\x32*.stablehlo.quantization.QuantizationConfig\x12@\n1disable_per_channel_quantization_for_dense_layers\x18> \x01(\x08:\x05\x66\x61lse\x12/\n enable_composite_direct_lowering\x18? \x01(\x08:\x05\x66\x61lse\x12K\n\x16model_origin_framework\x18@ \x01(\x0e\x32$.toco.TocoFlags.ModelOriginFramework:\x05UNSET\x12\x32\n#canonicalizing_inf_as_min_max_float\x18\x41 \x01(\x08:\x05\x66\x61lse\"R\n\x14ModelOriginFramework\x12\t\n\x05UNSET\x10\x00\x12\x0e\n\nTENSORFLOW\x10\x01\x12\t\n\x05KERAS\x10\x02\x12\x07\n\x03JAX\x10\x03\x12\x0b\n\x07PYTORCH\x10\x04*\\\n\nFileFormat\x12\x17\n\x13\x46ILE_FORMAT_UNKNOWN\x10\x00\x12\x17\n\x13TENSORFLOW_GRAPHDEF\x10\x01\x12\n\n\x06TFLITE\x10\x02\x12\x10\n\x0cGRAPHVIZ_DOT\x10\x03') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.lite.toco.toco_flags_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + _TOCOFLAGS.fields_by_name['custom_opdefs']._options = None + _TOCOFLAGS.fields_by_name['custom_opdefs']._serialized_options = b'\030\001' + _TOCOFLAGS.fields_by_name['quantization_options']._options = None + _TOCOFLAGS.fields_by_name['quantization_options']._serialized_options = b'\030\001' + _TOCOFLAGS.fields_by_name['enable_hlo_to_tf_conversion']._options = None + _TOCOFLAGS.fields_by_name['enable_hlo_to_tf_conversion']._serialized_options = b'\030\001' + _FILEFORMAT._serialized_start=2872 + _FILEFORMAT._serialized_end=2964 + _TOCOFLAGS._serialized_start=290 + _TOCOFLAGS._serialized_end=2870 + _TOCOFLAGS_MODELORIGINFRAMEWORK._serialized_start=2788 + _TOCOFLAGS_MODELORIGINFRAMEWORK._serialized_end=2870 +# @@protoc_insertion_point(module_scope) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/toco/types_pb2.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/toco/types_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..897496121719af0e5e2b2dc59a940e602cd53595 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/toco/types_pb2.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorflow/lite/toco/types.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n tensorflow/lite/toco/types.proto\x12\x04toco*\xb3\x02\n\nIODataType\x12\x18\n\x14IO_DATA_TYPE_UNKNOWN\x10\x00\x12\t\n\x05\x46LOAT\x10\x01\x12\x13\n\x0fQUANTIZED_UINT8\x10\x02\x12\t\n\x05INT32\x10\x03\x12\t\n\x05INT64\x10\x04\x12\n\n\x06STRING\x10\x05\x12\x13\n\x0fQUANTIZED_INT16\x10\x06\x12\x08\n\x04\x42OOL\x10\x07\x12\r\n\tCOMPLEX64\x10\x08\x12\x12\n\x0eQUANTIZED_INT8\x10\t\x12\x0b\n\x07\x46LOAT16\x10\n\x12\x0b\n\x07\x46LOAT64\x10\x0b\x12\x0e\n\nCOMPLEX128\x10\x0c\x12\n\n\x06UINT64\x10\r\x12\x0c\n\x08RESOURCE\x10\x0e\x12\x0b\n\x07VARIANT\x10\x0f\x12\n\n\x06UINT32\x10\x10\x12\t\n\x05UINT8\x10\x11\x12\x08\n\x04INT8\x10\x12\x12\t\n\x05INT16\x10\x13\x12\n\n\x06UINT16\x10\x14') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.lite.toco.types_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + _IODATATYPE._serialized_start=43 + _IODATATYPE._serialized_end=350 +# @@protoc_insertion_point(module_scope)