diff --git a/.gitattributes b/.gitattributes
index 0c7cf931b55de1929b0aaeb8ef192799a588f71a..0f3eb782591b5e94b7ea3c15b3425513877e36cb 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -197,3 +197,4 @@ SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/gr
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/grappler/_pywrap_tf_item.so filter=lfs diff=lfs merge=lfs -text
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/profiler/internal/_pywrap_profiler.so filter=lfs diff=lfs merge=lfs -text
SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_tf_session.so filter=lfs diff=lfs merge=lfs -text
+SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/impl/testing/pybind_for_testing.so filter=lfs diff=lfs merge=lfs -text
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aa9c24592d6e6f5f7d1845d3dc55bfbbd13d0116
Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/__pycache__/__init__.cpython-310.pyc differ
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..67cd0726ffc7892f2851b8dcd31e3388022a280d
Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/__pycache__/__init__.cpython-310.pyc differ
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e04a9009cebf0eab8c8b33da02a1b5d75878676a
Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/__pycache__/__init__.cpython-310.pyc differ
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..25b8f4aaeac0114e1bf2b2c40c4145db0d7a5994
Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__pycache__/__init__.cpython-310.pyc differ
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__pycache__/gen_rpc_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__pycache__/gen_rpc_ops.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5bacbfaa95c07566f16f391ffd0bfdb75c3ea697
Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__pycache__/gen_rpc_ops.cpython-310.pyc differ
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/gen_rpc_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/gen_rpc_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..beb3f4ce5316f83453d1276033c621931c98b4bd
--- /dev/null
+++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/gen_rpc_ops.py
@@ -0,0 +1,763 @@
+"""Python wrappers around TensorFlow ops.
+
+This file is MACHINE GENERATED! Do not edit.
+"""
+
+import collections
+
+from tensorflow.python import pywrap_tfe as pywrap_tfe
+from tensorflow.python.eager import context as _context
+from tensorflow.python.eager import core as _core
+from tensorflow.python.eager import execute as _execute
+from tensorflow.python.framework import dtypes as _dtypes
+from tensorflow.security.fuzzing.py import annotation_types as _atypes
+
+from tensorflow.python.framework import op_def_registry as _op_def_registry
+from tensorflow.python.framework import ops as _ops
+from tensorflow.python.framework import op_def_library as _op_def_library
+from tensorflow.python.util.deprecation import deprecated_endpoints
+from tensorflow.python.util import dispatch as _dispatch
+from tensorflow.python.util.tf_export import tf_export
+
+from typing import TypeVar, List, Any
+from typing_extensions import Annotated
+
+@_dispatch.add_fallback_dispatch_list
+@_dispatch.add_type_based_api_dispatcher
+@tf_export('delete_rpc_future_resource')
+def delete_rpc_future_resource(handle: Annotated[Any, _atypes.Resource], deleter: Annotated[Any, _atypes.Variant], name=None):
+ r"""TODO: add doc.
+
+ Args:
+ handle: A `Tensor` of type `resource`.
+ deleter: A `Tensor` of type `variant`.
+ name: A name for the operation (optional).
+
+ Returns:
+ The created Operation.
+ """
+ _ctx = _context._context or _context.context()
+ tld = _ctx._thread_local_data
+ if tld.is_eager:
+ try:
+ _result = pywrap_tfe.TFE_Py_FastPathExecute(
+ _ctx, "DeleteRpcFutureResource", name, handle, deleter)
+ return _result
+ except _core._NotOkStatusException as e:
+ _ops.raise_from_not_ok_status(e, name)
+ except _core._FallbackException:
+ pass
+ try:
+ _result = _dispatcher_for_delete_rpc_future_resource(
+ (handle, deleter, name,), None)
+ if _result is not NotImplemented:
+ return _result
+ return delete_rpc_future_resource_eager_fallback(
+ handle, deleter, name=name, ctx=_ctx)
+ except _core._SymbolicException:
+ pass # Add nodes to the TensorFlow graph.
+ except (TypeError, ValueError):
+ _result = _dispatch.dispatch(
+ delete_rpc_future_resource, (), dict(handle=handle,
+ deleter=deleter, name=name)
+ )
+ if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
+ return _result
+ raise
+ else:
+ _result = _dispatcher_for_delete_rpc_future_resource(
+ (handle, deleter, name,), None)
+ if _result is not NotImplemented:
+ return _result
+ # Add nodes to the TensorFlow graph.
+ try:
+ _, _, _op, _outputs = _op_def_library._apply_op_helper(
+ "DeleteRpcFutureResource", handle=handle, deleter=deleter, name=name)
+ except (TypeError, ValueError):
+ _result = _dispatch.dispatch(
+ delete_rpc_future_resource, (), dict(handle=handle, deleter=deleter,
+ name=name)
+ )
+ if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
+ return _result
+ raise
+ return _op
+DeleteRpcFutureResource = tf_export("raw_ops.DeleteRpcFutureResource")(_ops.to_raw_op(delete_rpc_future_resource))
+_dispatcher_for_delete_rpc_future_resource = delete_rpc_future_resource._tf_type_based_dispatcher.Dispatch
+
+
+def delete_rpc_future_resource_eager_fallback(handle: Annotated[Any, _atypes.Resource], deleter: Annotated[Any, _atypes.Variant], name, ctx):
+ handle = _ops.convert_to_tensor(handle, _dtypes.resource)
+ deleter = _ops.convert_to_tensor(deleter, _dtypes.variant)
+ _inputs_flat = [handle, deleter]
+ _attrs = None
+ _result = _execute.execute(b"DeleteRpcFutureResource", 0,
+ inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
+ name=name)
+ _result = None
+ return _result
+
+_RpcCallOutput = collections.namedtuple(
+ "RpcCall",
+ ["future", "deleter"])
+
+
+@_dispatch.add_fallback_dispatch_list
+@_dispatch.add_type_based_api_dispatcher
+@tf_export('rpc_call')
+def rpc_call(client: Annotated[Any, _atypes.Resource], method_name: Annotated[Any, _atypes.String], args, timeout_in_ms: Annotated[Any, _atypes.Int64], name=None):
+ r"""TODO: add doc.
+
+ Args:
+ client: A `Tensor` of type `resource`.
+ method_name: A `Tensor` of type `string`.
+ args: A list of `Tensor` objects.
+ timeout_in_ms: A `Tensor` of type `int64`.
+ name: A name for the operation (optional).
+
+ Returns:
+ A tuple of `Tensor` objects (future, deleter).
+
+ future: A `Tensor` of type `resource`.
+ deleter: A `Tensor` of type `variant`.
+ """
+ _ctx = _context._context or _context.context()
+ tld = _ctx._thread_local_data
+ if tld.is_eager:
+ try:
+ _result = pywrap_tfe.TFE_Py_FastPathExecute(
+ _ctx, "RpcCall", name, client, method_name, args, timeout_in_ms)
+ _result = _RpcCallOutput._make(_result)
+ return _result
+ except _core._NotOkStatusException as e:
+ _ops.raise_from_not_ok_status(e, name)
+ except _core._FallbackException:
+ pass
+ try:
+ _result = _dispatcher_for_rpc_call(
+ (client, method_name, args, timeout_in_ms, name,), None)
+ if _result is not NotImplemented:
+ return _result
+ return rpc_call_eager_fallback(
+ client, method_name, args, timeout_in_ms, name=name, ctx=_ctx)
+ except _core._SymbolicException:
+ pass # Add nodes to the TensorFlow graph.
+ except (TypeError, ValueError):
+ _result = _dispatch.dispatch(
+ rpc_call, (), dict(client=client, method_name=method_name,
+ args=args, timeout_in_ms=timeout_in_ms,
+ name=name)
+ )
+ if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
+ return _result
+ raise
+ else:
+ _result = _dispatcher_for_rpc_call(
+ (client, method_name, args, timeout_in_ms, name,), None)
+ if _result is not NotImplemented:
+ return _result
+ # Add nodes to the TensorFlow graph.
+ try:
+ _, _, _op, _outputs = _op_def_library._apply_op_helper(
+ "RpcCall", client=client, method_name=method_name, args=args,
+ timeout_in_ms=timeout_in_ms, name=name)
+ except (TypeError, ValueError):
+ _result = _dispatch.dispatch(
+ rpc_call, (), dict(client=client, method_name=method_name,
+ args=args, timeout_in_ms=timeout_in_ms,
+ name=name)
+ )
+ if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
+ return _result
+ raise
+ _result = _outputs[:]
+ if _execute.must_record_gradient():
+ _attrs = ("Tin", _op.get_attr("Tin"))
+ _inputs_flat = _op.inputs
+ _execute.record_gradient(
+ "RpcCall", _inputs_flat, _attrs, _result)
+ _result = _RpcCallOutput._make(_result)
+ return _result
+
+RpcCall = tf_export("raw_ops.RpcCall")(_ops.to_raw_op(rpc_call))
+_dispatcher_for_rpc_call = rpc_call._tf_type_based_dispatcher.Dispatch
+
+
+def rpc_call_eager_fallback(client: Annotated[Any, _atypes.Resource], method_name: Annotated[Any, _atypes.String], args, timeout_in_ms: Annotated[Any, _atypes.Int64], name, ctx):
+ _attr_Tin, args = _execute.convert_to_mixed_eager_tensors(args, ctx)
+ client = _ops.convert_to_tensor(client, _dtypes.resource)
+ method_name = _ops.convert_to_tensor(method_name, _dtypes.string)
+ timeout_in_ms = _ops.convert_to_tensor(timeout_in_ms, _dtypes.int64)
+ _inputs_flat = [client, method_name] + list(args) + [timeout_in_ms]
+ _attrs = ("Tin", _attr_Tin)
+ _result = _execute.execute(b"RpcCall", 2, inputs=_inputs_flat, attrs=_attrs,
+ ctx=ctx, name=name)
+ if _execute.must_record_gradient():
+ _execute.record_gradient(
+ "RpcCall", _inputs_flat, _attrs, _result)
+ _result = _RpcCallOutput._make(_result)
+ return _result
+
+_RpcCheckStatusOutput = collections.namedtuple(
+ "RpcCheckStatus",
+ ["error_code", "error"])
+
+
+@_dispatch.add_fallback_dispatch_list
+@_dispatch.add_type_based_api_dispatcher
+@tf_export('rpc_check_status')
+def rpc_check_status(status_or: Annotated[Any, _atypes.Resource], name=None):
+ r"""TODO: add doc.
+
+ Args:
+ status_or: A `Tensor` of type `resource`.
+ name: A name for the operation (optional).
+
+ Returns:
+ A tuple of `Tensor` objects (error_code, error).
+
+ error_code: A `Tensor` of type `int64`.
+ error: A `Tensor` of type `string`.
+ """
+ _ctx = _context._context or _context.context()
+ tld = _ctx._thread_local_data
+ if tld.is_eager:
+ try:
+ _result = pywrap_tfe.TFE_Py_FastPathExecute(
+ _ctx, "RpcCheckStatus", name, status_or)
+ _result = _RpcCheckStatusOutput._make(_result)
+ return _result
+ except _core._NotOkStatusException as e:
+ _ops.raise_from_not_ok_status(e, name)
+ except _core._FallbackException:
+ pass
+ try:
+ _result = _dispatcher_for_rpc_check_status(
+ (status_or, name,), None)
+ if _result is not NotImplemented:
+ return _result
+ return rpc_check_status_eager_fallback(
+ status_or, name=name, ctx=_ctx)
+ except _core._SymbolicException:
+ pass # Add nodes to the TensorFlow graph.
+ except (TypeError, ValueError):
+ _result = _dispatch.dispatch(
+ rpc_check_status, (), dict(status_or=status_or, name=name)
+ )
+ if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
+ return _result
+ raise
+ else:
+ _result = _dispatcher_for_rpc_check_status(
+ (status_or, name,), None)
+ if _result is not NotImplemented:
+ return _result
+ # Add nodes to the TensorFlow graph.
+ try:
+ _, _, _op, _outputs = _op_def_library._apply_op_helper(
+ "RpcCheckStatus", status_or=status_or, name=name)
+ except (TypeError, ValueError):
+ _result = _dispatch.dispatch(
+ rpc_check_status, (), dict(status_or=status_or, name=name)
+ )
+ if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
+ return _result
+ raise
+ _result = _outputs[:]
+ if _execute.must_record_gradient():
+ _attrs = ()
+ _inputs_flat = _op.inputs
+ _execute.record_gradient(
+ "RpcCheckStatus", _inputs_flat, _attrs, _result)
+ _result = _RpcCheckStatusOutput._make(_result)
+ return _result
+
+RpcCheckStatus = tf_export("raw_ops.RpcCheckStatus")(_ops.to_raw_op(rpc_check_status))
+_dispatcher_for_rpc_check_status = rpc_check_status._tf_type_based_dispatcher.Dispatch
+
+
+def rpc_check_status_eager_fallback(status_or: Annotated[Any, _atypes.Resource], name, ctx):
+ status_or = _ops.convert_to_tensor(status_or, _dtypes.resource)
+ _inputs_flat = [status_or]
+ _attrs = None
+ _result = _execute.execute(b"RpcCheckStatus", 2, inputs=_inputs_flat,
+ attrs=_attrs, ctx=ctx, name=name)
+ if _execute.must_record_gradient():
+ _execute.record_gradient(
+ "RpcCheckStatus", _inputs_flat, _attrs, _result)
+ _result = _RpcCheckStatusOutput._make(_result)
+ return _result
+
+_RpcClientOutput = collections.namedtuple(
+ "RpcClient",
+ ["client", "method_specs"])
+
+
+@_dispatch.add_fallback_dispatch_list
+@_dispatch.add_type_based_api_dispatcher
+@tf_export('rpc_client')
+def rpc_client(server_address: Annotated[Any, _atypes.String], timeout_in_ms: Annotated[Any, _atypes.Int64], shared_name:str="", list_registered_methods:bool=False, name=None):
+ r"""TODO: add doc.
+
+ Args:
+ server_address: A `Tensor` of type `string`.
+ timeout_in_ms: A `Tensor` of type `int64`.
+ shared_name: An optional `string`. Defaults to `""`.
+ list_registered_methods: An optional `bool`. Defaults to `False`.
+ name: A name for the operation (optional).
+
+ Returns:
+ A tuple of `Tensor` objects (client, method_specs).
+
+ client: A `Tensor` of type `resource`.
+ method_specs: A `Tensor` of type `string`.
+ """
+ _ctx = _context._context or _context.context()
+ tld = _ctx._thread_local_data
+ if tld.is_eager:
+ try:
+ _result = pywrap_tfe.TFE_Py_FastPathExecute(
+ _ctx, "RpcClient", name, server_address, timeout_in_ms, "shared_name",
+ shared_name, "list_registered_methods", list_registered_methods)
+ _result = _RpcClientOutput._make(_result)
+ return _result
+ except _core._NotOkStatusException as e:
+ _ops.raise_from_not_ok_status(e, name)
+ except _core._FallbackException:
+ pass
+ try:
+ _result = _dispatcher_for_rpc_client(
+ (server_address, timeout_in_ms, shared_name,
+ list_registered_methods, name,), None)
+ if _result is not NotImplemented:
+ return _result
+ return rpc_client_eager_fallback(
+ server_address, timeout_in_ms, shared_name=shared_name,
+ list_registered_methods=list_registered_methods, name=name,
+ ctx=_ctx)
+ except _core._SymbolicException:
+ pass # Add nodes to the TensorFlow graph.
+ except (TypeError, ValueError):
+ _result = _dispatch.dispatch(
+ rpc_client, (), dict(server_address=server_address,
+ timeout_in_ms=timeout_in_ms,
+ shared_name=shared_name,
+ list_registered_methods=list_registered_methods,
+ name=name)
+ )
+ if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
+ return _result
+ raise
+ else:
+ _result = _dispatcher_for_rpc_client(
+ (server_address, timeout_in_ms, shared_name, list_registered_methods,
+ name,), None)
+ if _result is not NotImplemented:
+ return _result
+ # Add nodes to the TensorFlow graph.
+ if shared_name is None:
+ shared_name = ""
+ shared_name = _execute.make_str(shared_name, "shared_name")
+ if list_registered_methods is None:
+ list_registered_methods = False
+ list_registered_methods = _execute.make_bool(list_registered_methods, "list_registered_methods")
+ try:
+ _, _, _op, _outputs = _op_def_library._apply_op_helper(
+ "RpcClient", server_address=server_address,
+ timeout_in_ms=timeout_in_ms, shared_name=shared_name,
+ list_registered_methods=list_registered_methods,
+ name=name)
+ except (TypeError, ValueError):
+ _result = _dispatch.dispatch(
+ rpc_client, (), dict(server_address=server_address,
+ timeout_in_ms=timeout_in_ms,
+ shared_name=shared_name,
+ list_registered_methods=list_registered_methods,
+ name=name)
+ )
+ if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
+ return _result
+ raise
+ _result = _outputs[:]
+ if _execute.must_record_gradient():
+ _attrs = ("shared_name", _op.get_attr("shared_name"),
+ "list_registered_methods",
+ _op._get_attr_bool("list_registered_methods"))
+ _inputs_flat = _op.inputs
+ _execute.record_gradient(
+ "RpcClient", _inputs_flat, _attrs, _result)
+ _result = _RpcClientOutput._make(_result)
+ return _result
+
+RpcClient = tf_export("raw_ops.RpcClient")(_ops.to_raw_op(rpc_client))
+_dispatcher_for_rpc_client = rpc_client._tf_type_based_dispatcher.Dispatch
+
+
+def rpc_client_eager_fallback(server_address: Annotated[Any, _atypes.String], timeout_in_ms: Annotated[Any, _atypes.Int64], shared_name: str, list_registered_methods: bool, name, ctx):
+ if shared_name is None:
+ shared_name = ""
+ shared_name = _execute.make_str(shared_name, "shared_name")
+ if list_registered_methods is None:
+ list_registered_methods = False
+ list_registered_methods = _execute.make_bool(list_registered_methods, "list_registered_methods")
+ server_address = _ops.convert_to_tensor(server_address, _dtypes.string)
+ timeout_in_ms = _ops.convert_to_tensor(timeout_in_ms, _dtypes.int64)
+ _inputs_flat = [server_address, timeout_in_ms]
+ _attrs = ("shared_name", shared_name, "list_registered_methods",
+ list_registered_methods)
+ _result = _execute.execute(b"RpcClient", 2, inputs=_inputs_flat,
+ attrs=_attrs, ctx=ctx, name=name)
+ if _execute.must_record_gradient():
+ _execute.record_gradient(
+ "RpcClient", _inputs_flat, _attrs, _result)
+ _result = _RpcClientOutput._make(_result)
+ return _result
+
+
+@_dispatch.add_fallback_dispatch_list
+@_dispatch.add_type_based_api_dispatcher
+@tf_export('rpc_get_value')
+def rpc_get_value(status_or: Annotated[Any, _atypes.Resource], Tout, name=None):
+ r"""TODO: add doc.
+
+ Args:
+ status_or: A `Tensor` of type `resource`.
+ Tout: A list of `tf.DTypes`.
+ name: A name for the operation (optional).
+
+ Returns:
+ A list of `Tensor` objects of type `Tout`.
+ """
+ _ctx = _context._context or _context.context()
+ tld = _ctx._thread_local_data
+ if tld.is_eager:
+ try:
+ _result = pywrap_tfe.TFE_Py_FastPathExecute(
+ _ctx, "RpcGetValue", name, status_or, "Tout", Tout)
+ return _result
+ except _core._NotOkStatusException as e:
+ _ops.raise_from_not_ok_status(e, name)
+ except _core._FallbackException:
+ pass
+ try:
+ _result = _dispatcher_for_rpc_get_value(
+ (status_or, Tout, name,), None)
+ if _result is not NotImplemented:
+ return _result
+ return rpc_get_value_eager_fallback(
+ status_or, Tout=Tout, name=name, ctx=_ctx)
+ except _core._SymbolicException:
+ pass # Add nodes to the TensorFlow graph.
+ except (TypeError, ValueError):
+ _result = _dispatch.dispatch(
+ rpc_get_value, (), dict(status_or=status_or, Tout=Tout, name=name)
+ )
+ if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
+ return _result
+ raise
+ else:
+ _result = _dispatcher_for_rpc_get_value(
+ (status_or, Tout, name,), None)
+ if _result is not NotImplemented:
+ return _result
+ # Add nodes to the TensorFlow graph.
+ if not isinstance(Tout, (list, tuple)):
+ raise TypeError(
+ "Expected list for 'Tout' argument to "
+ "'rpc_get_value' Op, not %r." % Tout)
+ Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
+ try:
+ _, _, _op, _outputs = _op_def_library._apply_op_helper(
+ "RpcGetValue", status_or=status_or, Tout=Tout, name=name)
+ except (TypeError, ValueError):
+ _result = _dispatch.dispatch(
+ rpc_get_value, (), dict(status_or=status_or, Tout=Tout, name=name)
+ )
+ if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
+ return _result
+ raise
+ _result = _outputs[:]
+ if not _result:
+ return _op
+ if _execute.must_record_gradient():
+ _attrs = ("Tout", _op.get_attr("Tout"))
+ _inputs_flat = _op.inputs
+ _execute.record_gradient(
+ "RpcGetValue", _inputs_flat, _attrs, _result)
+ return _result
+
+RpcGetValue = tf_export("raw_ops.RpcGetValue")(_ops.to_raw_op(rpc_get_value))
+_dispatcher_for_rpc_get_value = rpc_get_value._tf_type_based_dispatcher.Dispatch
+
+
+def rpc_get_value_eager_fallback(status_or: Annotated[Any, _atypes.Resource], Tout, name, ctx):
+ if not isinstance(Tout, (list, tuple)):
+ raise TypeError(
+ "Expected list for 'Tout' argument to "
+ "'rpc_get_value' Op, not %r." % Tout)
+ Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
+ status_or = _ops.convert_to_tensor(status_or, _dtypes.resource)
+ _inputs_flat = [status_or]
+ _attrs = ("Tout", Tout)
+ _result = _execute.execute(b"RpcGetValue", len(Tout), inputs=_inputs_flat,
+ attrs=_attrs, ctx=ctx, name=name)
+ if _execute.must_record_gradient():
+ _execute.record_gradient(
+ "RpcGetValue", _inputs_flat, _attrs, _result)
+ return _result
+
+
+@_dispatch.add_fallback_dispatch_list
+@_dispatch.add_type_based_api_dispatcher
+@tf_export('rpc_server')
+def rpc_server(server_address: Annotated[Any, _atypes.String], name=None) -> Annotated[Any, _atypes.Resource]:
+ r"""TODO: add doc.
+
+ Args:
+ server_address: A `Tensor` of type `string`.
+ name: A name for the operation (optional).
+
+ Returns:
+ A `Tensor` of type `resource`.
+ """
+ _ctx = _context._context or _context.context()
+ tld = _ctx._thread_local_data
+ if tld.is_eager:
+ try:
+ _result = pywrap_tfe.TFE_Py_FastPathExecute(
+ _ctx, "RpcServer", name, server_address)
+ return _result
+ except _core._NotOkStatusException as e:
+ _ops.raise_from_not_ok_status(e, name)
+ except _core._FallbackException:
+ pass
+ try:
+ _result = _dispatcher_for_rpc_server(
+ (server_address, name,), None)
+ if _result is not NotImplemented:
+ return _result
+ return rpc_server_eager_fallback(
+ server_address, name=name, ctx=_ctx)
+ except _core._SymbolicException:
+ pass # Add nodes to the TensorFlow graph.
+ except (TypeError, ValueError):
+ _result = _dispatch.dispatch(
+ rpc_server, (), dict(server_address=server_address, name=name)
+ )
+ if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
+ return _result
+ raise
+ else:
+ _result = _dispatcher_for_rpc_server(
+ (server_address, name,), None)
+ if _result is not NotImplemented:
+ return _result
+ # Add nodes to the TensorFlow graph.
+ try:
+ _, _, _op, _outputs = _op_def_library._apply_op_helper(
+ "RpcServer", server_address=server_address, name=name)
+ except (TypeError, ValueError):
+ _result = _dispatch.dispatch(
+ rpc_server, (), dict(server_address=server_address, name=name)
+ )
+ if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
+ return _result
+ raise
+ _result = _outputs[:]
+ if _execute.must_record_gradient():
+ _attrs = ()
+ _inputs_flat = _op.inputs
+ _execute.record_gradient(
+ "RpcServer", _inputs_flat, _attrs, _result)
+ _result, = _result
+ return _result
+
+RpcServer = tf_export("raw_ops.RpcServer")(_ops.to_raw_op(rpc_server))
+_dispatcher_for_rpc_server = rpc_server._tf_type_based_dispatcher.Dispatch
+
+
+def rpc_server_eager_fallback(server_address: Annotated[Any, _atypes.String], name, ctx) -> Annotated[Any, _atypes.Resource]:
+ server_address = _ops.convert_to_tensor(server_address, _dtypes.string)
+ _inputs_flat = [server_address]
+ _attrs = None
+ _result = _execute.execute(b"RpcServer", 1, inputs=_inputs_flat,
+ attrs=_attrs, ctx=ctx, name=name)
+ if _execute.must_record_gradient():
+ _execute.record_gradient(
+ "RpcServer", _inputs_flat, _attrs, _result)
+ _result, = _result
+ return _result
+
+
+@_dispatch.add_fallback_dispatch_list
+@_dispatch.add_type_based_api_dispatcher
+@tf_export('rpc_server_register')
+def rpc_server_register(server: Annotated[Any, _atypes.Resource], method_name: Annotated[Any, _atypes.String], captured_inputs, f, output_specs: str, input_specs:str="", name=None):
+ r"""TODO: add doc.
+
+ Args:
+ server: A `Tensor` of type `resource`.
+ method_name: A `Tensor` of type `string`.
+ captured_inputs: A list of `Tensor` objects.
+ f: A function decorated with @Defun.
+ output_specs: A `string`.
+ input_specs: An optional `string`. Defaults to `""`.
+ name: A name for the operation (optional).
+
+ Returns:
+ The created Operation.
+ """
+ _ctx = _context._context or _context.context()
+ tld = _ctx._thread_local_data
+ if tld.is_eager:
+ try:
+ _result = pywrap_tfe.TFE_Py_FastPathExecute(
+ _ctx, "RpcServerRegister", name, server, method_name, captured_inputs,
+ "f", f, "input_specs", input_specs, "output_specs", output_specs)
+ return _result
+ except _core._NotOkStatusException as e:
+ _ops.raise_from_not_ok_status(e, name)
+ except _core._FallbackException:
+ pass
+ try:
+ _result = _dispatcher_for_rpc_server_register(
+ (server, method_name, captured_inputs, f, output_specs, input_specs,
+ name,), None)
+ if _result is not NotImplemented:
+ return _result
+ return rpc_server_register_eager_fallback(
+ server, method_name, captured_inputs, f=f, input_specs=input_specs,
+ output_specs=output_specs, name=name, ctx=_ctx)
+ except _core._SymbolicException:
+ pass # Add nodes to the TensorFlow graph.
+ except (TypeError, ValueError):
+ _result = _dispatch.dispatch(
+ rpc_server_register, (), dict(server=server,
+ method_name=method_name,
+ captured_inputs=captured_inputs,
+ f=f, output_specs=output_specs,
+ input_specs=input_specs, name=name)
+ )
+ if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
+ return _result
+ raise
+ else:
+ _result = _dispatcher_for_rpc_server_register(
+ (server, method_name, captured_inputs, f, output_specs, input_specs,
+ name,), None)
+ if _result is not NotImplemented:
+ return _result
+ # Add nodes to the TensorFlow graph.
+ output_specs = _execute.make_str(output_specs, "output_specs")
+ if input_specs is None:
+ input_specs = ""
+ input_specs = _execute.make_str(input_specs, "input_specs")
+ try:
+ _, _, _op, _outputs = _op_def_library._apply_op_helper(
+ "RpcServerRegister", server=server, method_name=method_name,
+ captured_inputs=captured_inputs, f=f,
+ output_specs=output_specs,
+ input_specs=input_specs, name=name)
+ except (TypeError, ValueError):
+ _result = _dispatch.dispatch(
+ rpc_server_register, (), dict(server=server,
+ method_name=method_name,
+ captured_inputs=captured_inputs, f=f,
+ output_specs=output_specs,
+ input_specs=input_specs, name=name)
+ )
+ if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
+ return _result
+ raise
+ return _op
+RpcServerRegister = tf_export("raw_ops.RpcServerRegister")(_ops.to_raw_op(rpc_server_register))
+_dispatcher_for_rpc_server_register = rpc_server_register._tf_type_based_dispatcher.Dispatch
+
+
+def rpc_server_register_eager_fallback(server: Annotated[Any, _atypes.Resource], method_name: Annotated[Any, _atypes.String], captured_inputs, f, output_specs: str, input_specs: str, name, ctx):
+ output_specs = _execute.make_str(output_specs, "output_specs")
+ if input_specs is None:
+ input_specs = ""
+ input_specs = _execute.make_str(input_specs, "input_specs")
+ _attr_Tin, captured_inputs = _execute.convert_to_mixed_eager_tensors(captured_inputs, ctx)
+ server = _ops.convert_to_tensor(server, _dtypes.resource)
+ method_name = _ops.convert_to_tensor(method_name, _dtypes.string)
+ _inputs_flat = [server, method_name] + list(captured_inputs)
+ _attrs = ("Tin", _attr_Tin, "f", f, "input_specs", input_specs,
+ "output_specs", output_specs)
+ _result = _execute.execute(b"RpcServerRegister", 0, inputs=_inputs_flat,
+ attrs=_attrs, ctx=ctx, name=name)
+ _result = None
+ return _result
+
+
+@_dispatch.add_fallback_dispatch_list
+@_dispatch.add_type_based_api_dispatcher
+@tf_export('rpc_server_start')
+def rpc_server_start(server: Annotated[Any, _atypes.Resource], name=None):
+ r"""TODO: add doc.
+
+ Args:
+ server: A `Tensor` of type `resource`.
+ name: A name for the operation (optional).
+
+ Returns:
+ The created Operation.
+ """
+ _ctx = _context._context or _context.context()
+ tld = _ctx._thread_local_data
+ if tld.is_eager:
+ try:
+ _result = pywrap_tfe.TFE_Py_FastPathExecute(
+ _ctx, "RpcServerStart", name, server)
+ return _result
+ except _core._NotOkStatusException as e:
+ _ops.raise_from_not_ok_status(e, name)
+ except _core._FallbackException:
+ pass
+ try:
+ _result = _dispatcher_for_rpc_server_start(
+ (server, name,), None)
+ if _result is not NotImplemented:
+ return _result
+ return rpc_server_start_eager_fallback(
+ server, name=name, ctx=_ctx)
+ except _core._SymbolicException:
+ pass # Add nodes to the TensorFlow graph.
+ except (TypeError, ValueError):
+ _result = _dispatch.dispatch(
+ rpc_server_start, (), dict(server=server, name=name)
+ )
+ if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
+ return _result
+ raise
+ else:
+ _result = _dispatcher_for_rpc_server_start(
+ (server, name,), None)
+ if _result is not NotImplemented:
+ return _result
+ # Add nodes to the TensorFlow graph.
+ try:
+ _, _, _op, _outputs = _op_def_library._apply_op_helper(
+ "RpcServerStart", server=server, name=name)
+ except (TypeError, ValueError):
+ _result = _dispatch.dispatch(
+ rpc_server_start, (), dict(server=server, name=name)
+ )
+ if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
+ return _result
+ raise
+ return _op
+RpcServerStart = tf_export("raw_ops.RpcServerStart")(_ops.to_raw_op(rpc_server_start))
+_dispatcher_for_rpc_server_start = rpc_server_start._tf_type_based_dispatcher.Dispatch
+
+
+def rpc_server_start_eager_fallback(server: Annotated[Any, _atypes.Resource], name, ctx):
+ server = _ops.convert_to_tensor(server, _dtypes.resource)
+ _inputs_flat = [server]
+ _attrs = None
+ _result = _execute.execute(b"RpcServerStart", 0, inputs=_inputs_flat,
+ attrs=_attrs, ctx=ctx, name=name)
+ _result = None
+ return _result
+
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..25ba7eee4766e3bcad03a8f8d47b20e961a8de59
Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/__init__.cpython-310.pyc differ
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/tf_rpc_service_pb2.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/tf_rpc_service_pb2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a6ad43b0dfbcf991d2ae995b4056e94d701d6e06
Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/tf_rpc_service_pb2.cpython-310.pyc differ
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/tf_rpc_service_pb2_grpc.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/tf_rpc_service_pb2_grpc.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..28ea08746738f2278c077c5d8754a0d1e33b278b
Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/tf_rpc_service_pb2_grpc.cpython-310.pyc differ
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/tf_rpc_service_pb2.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/tf_rpc_service_pb2.py
new file mode 100644
index 0000000000000000000000000000000000000000..39fa8850a62a590e1f1fd5d521d99462ec88a6ce
--- /dev/null
+++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/tf_rpc_service_pb2.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: tensorflow/distribute/experimental/rpc/proto/tf_rpc_service.proto
+"""Generated protocol buffer code."""
+from google.protobuf.internal import builder as _builder
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from tensorflow.core.framework import tensor_pb2 as tensorflow_dot_core_dot_framework_dot_tensor__pb2
+from tensorflow.core.protobuf import struct_pb2 as tensorflow_dot_core_dot_protobuf_dot_struct__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\nAtensorflow/distribute/experimental/rpc/proto/tf_rpc_service.proto\x12\x0etensorflow.rpc\x1a&tensorflow/core/framework/tensor.proto\x1a%tensorflow/core/protobuf/struct.proto\"M\n\x0b\x43\x61llRequest\x12\x0e\n\x06method\x18\x01 \x01(\t\x12.\n\rinput_tensors\x18\x02 \x03(\x0b\x32\x17.tensorflow.TensorProto\"?\n\x0c\x43\x61llResponse\x12/\n\x0eoutput_tensors\x18\x01 \x03(\x0b\x32\x17.tensorflow.TensorProto\"\r\n\x0bListRequest\"\x87\x01\n\x10RegisteredMethod\x12\x0e\n\x06method\x18\x01 \x01(\t\x12\x30\n\x0binput_specs\x18\x02 \x01(\x0b\x32\x1b.tensorflow.StructuredValue\x12\x31\n\x0coutput_specs\x18\x03 \x01(\x0b\x32\x1b.tensorflow.StructuredValue\"L\n\x0cListResponse\x12<\n\x12registered_methods\x18\x01 \x03(\x0b\x32 .tensorflow.rpc.RegisteredMethod2\x96\x01\n\nRpcService\x12\x43\n\x04\x43\x61ll\x12\x1b.tensorflow.rpc.CallRequest\x1a\x1c.tensorflow.rpc.CallResponse\"\x00\x12\x43\n\x04List\x12\x1b.tensorflow.rpc.ListRequest\x1a\x1c.tensorflow.rpc.ListResponse\"\x00\x62\x06proto3')
+
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.distribute.experimental.rpc.proto.tf_rpc_service_pb2', globals())
+if _descriptor._USE_C_DESCRIPTORS == False:
+
+ DESCRIPTOR._options = None
+ _CALLREQUEST._serialized_start=164
+ _CALLREQUEST._serialized_end=241
+ _CALLRESPONSE._serialized_start=243
+ _CALLRESPONSE._serialized_end=306
+ _LISTREQUEST._serialized_start=308
+ _LISTREQUEST._serialized_end=321
+ _REGISTEREDMETHOD._serialized_start=324
+ _REGISTEREDMETHOD._serialized_end=459
+ _LISTRESPONSE._serialized_start=461
+ _LISTRESPONSE._serialized_end=537
+ _RPCSERVICE._serialized_start=540
+ _RPCSERVICE._serialized_end=690
+# @@protoc_insertion_point(module_scope)
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/tf_rpc_service_pb2_grpc.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/tf_rpc_service_pb2_grpc.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe854a3a079e616b21c1effaedfbfa409857573d
--- /dev/null
+++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/tf_rpc_service_pb2_grpc.py
@@ -0,0 +1,63 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
+from tensorflow.distribute.experimental.rpc.proto import tf_rpc_service_pb2 as tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2
+
+
+class RpcServiceStub(object):
+ # missing associated documentation comment in .proto file
+ pass
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.Call = channel.unary_unary(
+ '/tensorflow.rpc.RpcService/Call',
+ request_serializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.CallRequest.SerializeToString,
+ response_deserializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.CallResponse.FromString,
+ )
+ self.List = channel.unary_unary(
+ '/tensorflow.rpc.RpcService/List',
+ request_serializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.ListRequest.SerializeToString,
+ response_deserializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.ListResponse.FromString,
+ )
+
+
+class RpcServiceServicer(object):
+ # missing associated documentation comment in .proto file
+ pass
+
+ def Call(self, request, context):
+ """RPC for invoking a registered function on remote server.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def List(self, request, context):
+ """RPC for listing available methods in a server.
+ """
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_RpcServiceServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'Call': grpc.unary_unary_rpc_method_handler(
+ servicer.Call,
+ request_deserializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.CallRequest.FromString,
+ response_serializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.CallResponse.SerializeToString,
+ ),
+ 'List': grpc.unary_unary_rpc_method_handler(
+ servicer.List,
+ request_deserializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.ListRequest.FromString,
+ response_serializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.ListResponse.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'tensorflow.rpc.RpcService', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b04d0b786602831f83e9260dae1265f447b7d8ba
Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/__pycache__/__init__.cpython-310.pyc differ
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer.py
new file mode 100644
index 0000000000000000000000000000000000000000..110d8014786b686f0c4f4051233712671d8d0c1d
--- /dev/null
+++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer.py
@@ -0,0 +1,107 @@
+# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""This tool analyzes a TensorFlow Lite graph."""
+
+import os
+
+# pylint: disable=g-import-not-at-top
+if not os.path.splitext(__file__)[0].endswith(
+ os.path.join("tflite_runtime", "analyzer")):
+ # This file is part of tensorflow package.
+ from tensorflow.compiler.mlir.lite.python import wrap_converter
+ from tensorflow.lite.python.analyzer_wrapper import _pywrap_analyzer_wrapper as _analyzer_wrapper
+ from tensorflow.python.util.tf_export import tf_export as _tf_export
+else:
+ # This file is part of tflite_runtime package.
+ from tflite_runtime import _pywrap_analyzer_wrapper as _analyzer_wrapper
+
+ def _tf_export(*x, **kwargs):
+ del x, kwargs
+ return lambda x: x
+
+
+@_tf_export("lite.experimental.Analyzer")
+class ModelAnalyzer():
+ """Provides a collection of TFLite model analyzer tools.
+
+ Example:
+
+ ```python
+ model = tf.keras.applications.MobileNetV3Large()
+ fb_model = tf.lite.TFLiteConverterV2.from_keras_model(model).convert()
+ tf.lite.experimental.Analyzer.analyze(model_content=fb_model)
+ # === TFLite ModelAnalyzer ===
+ #
+ # Your TFLite model has ‘1’ subgraph(s). In the subgraph description below,
+ # T# represents the Tensor numbers. For example, in Subgraph#0, the MUL op
+ # takes tensor #0 and tensor #19 as input and produces tensor #136 as output.
+ #
+ # Subgraph#0 main(T#0) -> [T#263]
+ # Op#0 MUL(T#0, T#19) -> [T#136]
+ # Op#1 ADD(T#136, T#18) -> [T#137]
+ # Op#2 CONV_2D(T#137, T#44, T#93) -> [T#138]
+ # Op#3 HARD_SWISH(T#138) -> [T#139]
+ # Op#4 DEPTHWISE_CONV_2D(T#139, T#94, T#24) -> [T#140]
+ # ...
+ ```
+
+ WARNING: Experimental interface, subject to change.
+ """
+
+ @staticmethod
+ def analyze(model_path=None,
+ model_content=None,
+ gpu_compatibility=False,
+ **kwargs):
+ """Analyzes the given tflite_model with dumping model structure.
+
+ This tool provides a way to understand users' TFLite flatbuffer model by
+ dumping internal graph structure. It also provides additional features
+ like checking GPU delegate compatibility.
+
+ WARNING: Experimental interface, subject to change.
+ The output format is not guaranteed to stay stable, so don't
+ write scripts to this.
+
+ Args:
+ model_path: TFLite flatbuffer model path.
+ model_content: TFLite flatbuffer model object.
+ gpu_compatibility: Whether to check GPU delegate compatibility.
+ **kwargs: Experimental keyword arguments to analyze API.
+
+ Returns:
+ Print analyzed report via console output.
+ """
+ if not model_path and not model_content:
+ raise ValueError("neither `model_path` nor `model_content` is provided")
+ if model_path:
+ print(f"=== {model_path} ===\n")
+ tflite_model = model_path
+ input_is_filepath = True
+ else:
+ print("=== TFLite ModelAnalyzer ===\n")
+ tflite_model = model_content
+ input_is_filepath = False
+
+ if kwargs.get("experimental_use_mlir", False):
+ print(
+ wrap_converter.wrapped_flat_buffer_file_to_mlir(
+ tflite_model, input_is_filepath
+ )
+ )
+ else:
+ print(
+ _analyzer_wrapper.ModelAnalyzer(tflite_model, input_is_filepath,
+ gpu_compatibility))
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/conversion_metadata_schema_py_generated.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/conversion_metadata_schema_py_generated.py
new file mode 100644
index 0000000000000000000000000000000000000000..85a460abc866dd51239e550cd96b598569af78c5
--- /dev/null
+++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/conversion_metadata_schema_py_generated.py
@@ -0,0 +1,568 @@
+import flatbuffers
+
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tflite
+
+from flatbuffers.compat import import_numpy
+np = import_numpy()
+
+class ModelType(object):
+ NONE = 0
+ TF_SAVED_MODEL = 1
+ KERAS_MODEL = 2
+ TF_CONCRETE_FUNCTIONS = 3
+ TF_GRAPH_DEF = 4
+ TF_SESSION = 5
+ JAX = 6
+ PYTORCH = 7
+
+
+class ModelOptimizationMode(object):
+ PTQ_FLOAT16 = 1001
+ PTQ_DYNAMIC_RANGE = 1002
+ PTQ_FULL_INTEGER = 1003
+ PTQ_INT16 = 1004
+ QUANTIZATION_AWARE_TRAINING = 2000
+ RANDOM_SPARSITY = 3001
+ BLOCK_SPARSITY = 3002
+ STRUCTURED_SPARSITY = 3003
+
+
+class Environment(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAs(cls, buf, offset=0):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = Environment()
+ x.Init(buf, n + offset)
+ return x
+
+ @classmethod
+ def GetRootAsEnvironment(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ # Environment
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # Environment
+ def TensorflowVersion(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.String(o + self._tab.Pos)
+ return None
+
+ # Environment
+ def ApiVersion(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
+ return 0
+
+ # Environment
+ def ModelType(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return 0
+
+ # Environment
+ def ModelHash(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
+ return 0
+
+def EnvironmentStart(builder):
+ builder.StartObject(4)
+
+def EnvironmentAddTensorflowVersion(builder, tensorflowVersion):
+ builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(tensorflowVersion), 0)
+
+def EnvironmentAddApiVersion(builder, apiVersion):
+ builder.PrependUint32Slot(1, apiVersion, 0)
+
+def EnvironmentAddModelType(builder, modelType):
+ builder.PrependInt32Slot(2, modelType, 0)
+
+def EnvironmentAddModelHash(builder, modelHash):
+ builder.PrependUint64Slot(3, modelHash, 0)
+
+def EnvironmentEnd(builder):
+ return builder.EndObject()
+
+
+
+class EnvironmentT(object):
+
+ # EnvironmentT
+ def __init__(self):
+ self.tensorflowVersion = None # type: str
+ self.apiVersion = 0 # type: int
+ self.modelType = 0 # type: int
+ self.modelHash = 0 # type: int
+
+ @classmethod
+ def InitFromBuf(cls, buf, pos):
+ environment = Environment()
+ environment.Init(buf, pos)
+ return cls.InitFromObj(environment)
+
+ @classmethod
+ def InitFromPackedBuf(cls, buf, pos=0):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+ return cls.InitFromBuf(buf, pos+n)
+
+ @classmethod
+ def InitFromObj(cls, environment):
+ x = EnvironmentT()
+ x._UnPack(environment)
+ return x
+
+ # EnvironmentT
+ def _UnPack(self, environment):
+ if environment is None:
+ return
+ self.tensorflowVersion = environment.TensorflowVersion()
+ self.apiVersion = environment.ApiVersion()
+ self.modelType = environment.ModelType()
+ self.modelHash = environment.ModelHash()
+
+ # EnvironmentT
+ def Pack(self, builder):
+ if self.tensorflowVersion is not None:
+ tensorflowVersion = builder.CreateString(self.tensorflowVersion)
+ EnvironmentStart(builder)
+ if self.tensorflowVersion is not None:
+ EnvironmentAddTensorflowVersion(builder, tensorflowVersion)
+ EnvironmentAddApiVersion(builder, self.apiVersion)
+ EnvironmentAddModelType(builder, self.modelType)
+ EnvironmentAddModelHash(builder, self.modelHash)
+ environment = EnvironmentEnd(builder)
+ return environment
+
+
+class SparsityBlockSize(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAs(cls, buf, offset=0):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = SparsityBlockSize()
+ x.Init(buf, n + offset)
+ return x
+
+ @classmethod
+ def GetRootAsSparsityBlockSize(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ # SparsityBlockSize
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # SparsityBlockSize
+ def Values(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(flatbuffers.number_types.Uint32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+ return 0
+
+ # SparsityBlockSize
+ def ValuesAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint32Flags, o)
+ return 0
+
+ # SparsityBlockSize
+ def ValuesLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # SparsityBlockSize
+ def ValuesIsNone(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ return o == 0
+
+def SparsityBlockSizeStart(builder):
+ builder.StartObject(1)
+
+def SparsityBlockSizeAddValues(builder, values):
+ builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0)
+
+def SparsityBlockSizeStartValuesVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+def SparsityBlockSizeEnd(builder):
+ return builder.EndObject()
+
+
+try:
+ from typing import List
+except:
+ pass
+
+class SparsityBlockSizeT(object):
+
+ # SparsityBlockSizeT
+ def __init__(self):
+ self.values = None # type: List[int]
+
+ @classmethod
+ def InitFromBuf(cls, buf, pos):
+ sparsityBlockSize = SparsityBlockSize()
+ sparsityBlockSize.Init(buf, pos)
+ return cls.InitFromObj(sparsityBlockSize)
+
+ @classmethod
+ def InitFromPackedBuf(cls, buf, pos=0):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+ return cls.InitFromBuf(buf, pos+n)
+
+ @classmethod
+ def InitFromObj(cls, sparsityBlockSize):
+ x = SparsityBlockSizeT()
+ x._UnPack(sparsityBlockSize)
+ return x
+
+ # SparsityBlockSizeT
+ def _UnPack(self, sparsityBlockSize):
+ if sparsityBlockSize is None:
+ return
+ if not sparsityBlockSize.ValuesIsNone():
+ if np is None:
+ self.values = []
+ for i in range(sparsityBlockSize.ValuesLength()):
+ self.values.append(sparsityBlockSize.Values(i))
+ else:
+ self.values = sparsityBlockSize.ValuesAsNumpy()
+
+ # SparsityBlockSizeT
+ def Pack(self, builder):
+ if self.values is not None:
+ if np is not None and type(self.values) is np.ndarray:
+ values = builder.CreateNumpyVector(self.values)
+ else:
+ SparsityBlockSizeStartValuesVector(builder, len(self.values))
+ for i in reversed(range(len(self.values))):
+ builder.PrependUint32(self.values[i])
+ values = builder.EndVector()
+ SparsityBlockSizeStart(builder)
+ if self.values is not None:
+ SparsityBlockSizeAddValues(builder, values)
+ sparsityBlockSize = SparsityBlockSizeEnd(builder)
+ return sparsityBlockSize
+
+
+class ConversionOptions(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAs(cls, buf, offset=0):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = ConversionOptions()
+ x.Init(buf, n + offset)
+ return x
+
+ @classmethod
+ def GetRootAsConversionOptions(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ # ConversionOptions
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # ConversionOptions
+ def ModelOptimizationModes(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+ return 0
+
+ # ConversionOptions
+ def ModelOptimizationModesAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+ return 0
+
+ # ConversionOptions
+ def ModelOptimizationModesLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # ConversionOptions
+ def ModelOptimizationModesIsNone(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ return o == 0
+
+ # ConversionOptions
+ def AllowCustomOps(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+ return False
+
+ # ConversionOptions
+ def EnableSelectTfOps(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+ return False
+
+ # ConversionOptions
+ def ForceSelectTfOps(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+ if o != 0:
+ return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+ return False
+
+ # ConversionOptions
+ def SparsityBlockSizes(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+ if o != 0:
+ x = self._tab.Vector(o)
+ x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
+ x = self._tab.Indirect(x)
+ obj = SparsityBlockSize()
+ obj.Init(self._tab.Bytes, x)
+ return obj
+ return None
+
+ # ConversionOptions
+ def SparsityBlockSizesLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # ConversionOptions
+ def SparsityBlockSizesIsNone(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+ return o == 0
+
+def ConversionOptionsStart(builder):
+ builder.StartObject(5)
+
+def ConversionOptionsAddModelOptimizationModes(builder, modelOptimizationModes):
+ builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(modelOptimizationModes), 0)
+
+def ConversionOptionsStartModelOptimizationModesVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+def ConversionOptionsAddAllowCustomOps(builder, allowCustomOps):
+ builder.PrependBoolSlot(1, allowCustomOps, 0)
+
+def ConversionOptionsAddEnableSelectTfOps(builder, enableSelectTfOps):
+ builder.PrependBoolSlot(2, enableSelectTfOps, 0)
+
+def ConversionOptionsAddForceSelectTfOps(builder, forceSelectTfOps):
+ builder.PrependBoolSlot(3, forceSelectTfOps, 0)
+
+def ConversionOptionsAddSparsityBlockSizes(builder, sparsityBlockSizes):
+ builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(sparsityBlockSizes), 0)
+
+def ConversionOptionsStartSparsityBlockSizesVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+def ConversionOptionsEnd(builder):
+ return builder.EndObject()
+
+
+try:
+ from typing import List
+except:
+ pass
+
+class ConversionOptionsT(object):
+
+ # ConversionOptionsT
+ def __init__(self):
+ self.modelOptimizationModes = None # type: List[int]
+ self.allowCustomOps = False # type: bool
+ self.enableSelectTfOps = False # type: bool
+ self.forceSelectTfOps = False # type: bool
+ self.sparsityBlockSizes = None # type: List[SparsityBlockSizeT]
+
+ @classmethod
+ def InitFromBuf(cls, buf, pos):
+ conversionOptions = ConversionOptions()
+ conversionOptions.Init(buf, pos)
+ return cls.InitFromObj(conversionOptions)
+
+ @classmethod
+ def InitFromPackedBuf(cls, buf, pos=0):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+ return cls.InitFromBuf(buf, pos+n)
+
+ @classmethod
+ def InitFromObj(cls, conversionOptions):
+ x = ConversionOptionsT()
+ x._UnPack(conversionOptions)
+ return x
+
+ # ConversionOptionsT
+ def _UnPack(self, conversionOptions):
+ if conversionOptions is None:
+ return
+ if not conversionOptions.ModelOptimizationModesIsNone():
+ if np is None:
+ self.modelOptimizationModes = []
+ for i in range(conversionOptions.ModelOptimizationModesLength()):
+ self.modelOptimizationModes.append(conversionOptions.ModelOptimizationModes(i))
+ else:
+ self.modelOptimizationModes = conversionOptions.ModelOptimizationModesAsNumpy()
+ self.allowCustomOps = conversionOptions.AllowCustomOps()
+ self.enableSelectTfOps = conversionOptions.EnableSelectTfOps()
+ self.forceSelectTfOps = conversionOptions.ForceSelectTfOps()
+ if not conversionOptions.SparsityBlockSizesIsNone():
+ self.sparsityBlockSizes = []
+ for i in range(conversionOptions.SparsityBlockSizesLength()):
+ if conversionOptions.SparsityBlockSizes(i) is None:
+ self.sparsityBlockSizes.append(None)
+ else:
+ sparsityBlockSize_ = SparsityBlockSizeT.InitFromObj(conversionOptions.SparsityBlockSizes(i))
+ self.sparsityBlockSizes.append(sparsityBlockSize_)
+
+ # ConversionOptionsT
+ def Pack(self, builder):
+ if self.modelOptimizationModes is not None:
+ if np is not None and type(self.modelOptimizationModes) is np.ndarray:
+ modelOptimizationModes = builder.CreateNumpyVector(self.modelOptimizationModes)
+ else:
+ ConversionOptionsStartModelOptimizationModesVector(builder, len(self.modelOptimizationModes))
+ for i in reversed(range(len(self.modelOptimizationModes))):
+ builder.PrependInt32(self.modelOptimizationModes[i])
+ modelOptimizationModes = builder.EndVector()
+ if self.sparsityBlockSizes is not None:
+ sparsityBlockSizeslist = []
+ for i in range(len(self.sparsityBlockSizes)):
+ sparsityBlockSizeslist.append(self.sparsityBlockSizes[i].Pack(builder))
+ ConversionOptionsStartSparsityBlockSizesVector(builder, len(self.sparsityBlockSizes))
+ for i in reversed(range(len(self.sparsityBlockSizes))):
+ builder.PrependUOffsetTRelative(sparsityBlockSizeslist[i])
+ sparsityBlockSizes = builder.EndVector()
+ ConversionOptionsStart(builder)
+ if self.modelOptimizationModes is not None:
+ ConversionOptionsAddModelOptimizationModes(builder, modelOptimizationModes)
+ ConversionOptionsAddAllowCustomOps(builder, self.allowCustomOps)
+ ConversionOptionsAddEnableSelectTfOps(builder, self.enableSelectTfOps)
+ ConversionOptionsAddForceSelectTfOps(builder, self.forceSelectTfOps)
+ if self.sparsityBlockSizes is not None:
+ ConversionOptionsAddSparsityBlockSizes(builder, sparsityBlockSizes)
+ conversionOptions = ConversionOptionsEnd(builder)
+ return conversionOptions
+
+
+class ConversionMetadata(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAs(cls, buf, offset=0):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = ConversionMetadata()
+ x.Init(buf, n + offset)
+ return x
+
+ @classmethod
+ def GetRootAsConversionMetadata(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ # ConversionMetadata
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # ConversionMetadata
+ def Environment(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ x = self._tab.Indirect(o + self._tab.Pos)
+ obj = Environment()
+ obj.Init(self._tab.Bytes, x)
+ return obj
+ return None
+
+ # ConversionMetadata
+ def Options(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ x = self._tab.Indirect(o + self._tab.Pos)
+ obj = ConversionOptions()
+ obj.Init(self._tab.Bytes, x)
+ return obj
+ return None
+
+def ConversionMetadataStart(builder):
+ builder.StartObject(2)
+
+def ConversionMetadataAddEnvironment(builder, environment):
+ builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(environment), 0)
+
+def ConversionMetadataAddOptions(builder, options):
+ builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(options), 0)
+
+def ConversionMetadataEnd(builder):
+ return builder.EndObject()
+
+
+try:
+ from typing import Optional
+except:
+ pass
+
+class ConversionMetadataT(object):
+
+ # ConversionMetadataT
+ def __init__(self):
+ self.environment = None # type: Optional[EnvironmentT]
+ self.options = None # type: Optional[ConversionOptionsT]
+
+ @classmethod
+ def InitFromBuf(cls, buf, pos):
+ conversionMetadata = ConversionMetadata()
+ conversionMetadata.Init(buf, pos)
+ return cls.InitFromObj(conversionMetadata)
+
+ @classmethod
+ def InitFromPackedBuf(cls, buf, pos=0):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
+ return cls.InitFromBuf(buf, pos+n)
+
+ @classmethod
+ def InitFromObj(cls, conversionMetadata):
+ x = ConversionMetadataT()
+ x._UnPack(conversionMetadata)
+ return x
+
+ # ConversionMetadataT
+ def _UnPack(self, conversionMetadata):
+ if conversionMetadata is None:
+ return
+ if conversionMetadata.Environment() is not None:
+ self.environment = EnvironmentT.InitFromObj(conversionMetadata.Environment())
+ if conversionMetadata.Options() is not None:
+ self.options = ConversionOptionsT.InitFromObj(conversionMetadata.Options())
+
+ # ConversionMetadataT
+ def Pack(self, builder):
+ if self.environment is not None:
+ environment = self.environment.Pack(builder)
+ if self.options is not None:
+ options = self.options.Pack(builder)
+ ConversionMetadataStart(builder)
+ if self.environment is not None:
+ ConversionMetadataAddEnvironment(builder, environment)
+ if self.options is not None:
+ ConversionMetadataAddOptions(builder, options)
+ conversionMetadata = ConversionMetadataEnd(builder)
+ return conversionMetadata
+
+
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/convert_phase.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/convert_phase.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f123698e7ab1851904bc8344110628ad205b3f4
--- /dev/null
+++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/convert_phase.py
@@ -0,0 +1,219 @@
+# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Utilities for collecting TFLite metrics."""
+
+import collections
+import enum
+import functools
+from typing import Text
+
+from tensorflow.compiler.mlir.lite.metrics import converter_error_data_pb2
+from tensorflow.lite.python.metrics import metrics
+
+
+class Component(enum.Enum):
+ """Enum class defining name of the converter components."""
+ # Validate the given input and prepare and optimize TensorFlow Model.
+ PREPARE_TF_MODEL = "PREPARE_TF_MODEL"
+
+ # Convert to TFLite model format.
+ CONVERT_TF_TO_TFLITE_MODEL = "CONVERT_TF_TO_TFLITE_MODEL"
+
+ # RUN quantization and sparsification.
+ OPTIMIZE_TFLITE_MODEL = "OPTIMIZE_TFLITE_MODEL"
+
+
+SubComponentItem = collections.namedtuple("SubComponentItem",
+ ["name", "component"])
+
+
+class SubComponent(SubComponentItem, enum.Enum):
+ """Enum class defining name of the converter subcomponents.
+
+ This enum only defines the subcomponents in Python, there might be more
+ subcomponents defined in C++.
+ """
+
+ def __str__(self):
+ return self.value.name
+
+ @property
+ def name(self):
+ return self.value.name
+
+ @property
+ def component(self):
+ return self.value.component
+
+ # The subcomponent name is unspecified.
+ UNSPECIFIED = SubComponentItem("UNSPECIFIED", None)
+
+ # Valid the given input and parameters.
+ VALIDATE_INPUTS = SubComponentItem("VALIDATE_INPUTS",
+ Component.PREPARE_TF_MODEL)
+
+ # Load GraphDef from SavedModel.
+ LOAD_SAVED_MODEL = SubComponentItem("LOAD_SAVED_MODEL",
+ Component.PREPARE_TF_MODEL)
+
+ # Convert a SavedModel to frozen graph.
+ FREEZE_SAVED_MODEL = SubComponentItem("FREEZE_SAVED_MODEL",
+ Component.PREPARE_TF_MODEL)
+
+ # Save a Keras model to SavedModel.
+ CONVERT_KERAS_TO_SAVED_MODEL = SubComponentItem(
+ "CONVERT_KERAS_TO_SAVED_MODEL", Component.PREPARE_TF_MODEL)
+
+ # Save Concrete functions to SavedModel.
+ CONVERT_CONCRETE_FUNCTIONS_TO_SAVED_MODEL = SubComponentItem(
+ "CONVERT_CONCRETE_FUNCTIONS_TO_SAVED_MODEL", Component.PREPARE_TF_MODEL)
+
+ # Convert a Keras model to a frozen graph.
+ FREEZE_KERAS_MODEL = SubComponentItem("FREEZE_KERAS_MODEL",
+ Component.PREPARE_TF_MODEL)
+
+ # Replace all the variables with constants in a ConcreteFunction.
+ FREEZE_CONCRETE_FUNCTION = SubComponentItem("FREEZE_CONCRETE_FUNCTION",
+ Component.PREPARE_TF_MODEL)
+
+ # Run grappler optimization.
+ OPTIMIZE_TF_MODEL = SubComponentItem("OPTIMIZE_TF_MODEL",
+ Component.PREPARE_TF_MODEL)
+
+ # Convert using the old TOCO converter.
+ CONVERT_GRAPHDEF_USING_DEPRECATED_CONVERTER = SubComponentItem(
+ "CONVERT_GRAPHDEF_USING_DEPRECATED_CONVERTER",
+ Component.CONVERT_TF_TO_TFLITE_MODEL)
+
+ # Convert a GraphDef to TFLite model.
+ CONVERT_GRAPHDEF = SubComponentItem("CONVERT_GRAPHDEF",
+ Component.CONVERT_TF_TO_TFLITE_MODEL)
+
+ # Convert a SavedModel to TFLite model.
+ CONVERT_SAVED_MODEL = SubComponentItem("CONVERT_SAVED_MODEL",
+ Component.CONVERT_TF_TO_TFLITE_MODEL)
+
+ # Convert a Jax HLO to TFLite model.
+ CONVERT_JAX_HLO = SubComponentItem("CONVERT_JAX_HLO",
+ Component.CONVERT_TF_TO_TFLITE_MODEL)
+
+ # Do quantization by the deprecated quantizer.
+ QUANTIZE_USING_DEPRECATED_QUANTIZER = SubComponentItem(
+ "QUANTIZE_USING_DEPRECATED_QUANTIZER", Component.OPTIMIZE_TFLITE_MODEL)
+
+ # Do calibration.
+ CALIBRATE = SubComponentItem("CALIBRATE", Component.OPTIMIZE_TFLITE_MODEL)
+
+ # Do quantization by MLIR.
+ QUANTIZE = SubComponentItem("QUANTIZE", Component.OPTIMIZE_TFLITE_MODEL)
+
+ # Do sparsification by MLIR.
+ SPARSIFY = SubComponentItem("SPARSIFY", Component.OPTIMIZE_TFLITE_MODEL)
+
+
+class ConverterError(Exception):
+ """Raised when an error occurs during model conversion."""
+
+ def __init__(self, message):
+ super(ConverterError, self).__init__(message)
+ self.errors = []
+ self._parse_error_message(message)
+
+ def append_error(self,
+ error_data: converter_error_data_pb2.ConverterErrorData):
+ self.errors.append(error_data)
+
+ def _parse_error_message(self, message):
+ """If the message matches a pattern, assigns the associated error code.
+
+ It is difficult to assign an error code to some errrors in MLIR side, Ex:
+ errors thrown by other components than TFLite or not using mlir::emitError.
+ This function try to detect them by the error message and assign the
+ corresponding error code.
+
+ Args:
+ message: The error message of this exception.
+ """
+ error_code_mapping = {
+ "Failed to functionalize Control Flow V1 ops. Consider using Control "
+ "Flow V2 ops instead. See https://www.tensorflow.org/api_docs/python/"
+ "tf/compat/v1/enable_control_flow_v2.":
+ converter_error_data_pb2.ConverterErrorData
+ .ERROR_UNSUPPORTED_CONTROL_FLOW_V1,
+ }
+ for pattern, error_code in error_code_mapping.items():
+ if pattern in message:
+ error_data = converter_error_data_pb2.ConverterErrorData()
+ error_data.error_message = message
+ error_data.error_code = error_code
+ self.append_error(error_data)
+ return
+
+
+def convert_phase(component, subcomponent=SubComponent.UNSPECIFIED):
+ """The decorator to identify converter component and subcomponent.
+
+ Args:
+ component: Converter component name.
+ subcomponent: Converter subcomponent name.
+
+ Returns:
+ Forward the result from the wrapped function.
+
+ Raises:
+ ValueError: if component and subcomponent name is not valid.
+ """
+ if component not in Component:
+ raise ValueError("Given component name not found")
+ if subcomponent not in SubComponent:
+ raise ValueError("Given subcomponent name not found")
+ if (subcomponent != SubComponent.UNSPECIFIED and
+ subcomponent.component != component):
+ raise ValueError("component and subcomponent name don't match")
+
+ def report_error(error_data: converter_error_data_pb2.ConverterErrorData):
+ # Always overwrites the component information, but only overwrites the
+ # subcomponent if it is not available.
+ error_data.component = component.value
+ if not error_data.subcomponent:
+ error_data.subcomponent = subcomponent.name
+ tflite_metrics = metrics.TFLiteConverterMetrics()
+ tflite_metrics.set_converter_error(error_data)
+
+ def report_error_message(error_message: Text):
+ error_data = converter_error_data_pb2.ConverterErrorData()
+ error_data.error_message = error_message
+ report_error(error_data)
+
+ def actual_decorator(func):
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except ConverterError as converter_error:
+ if converter_error.errors:
+ for error_data in converter_error.errors:
+ report_error(error_data)
+ else:
+ report_error_message(str(converter_error))
+ raise converter_error from None # Re-throws the exception.
+ except Exception as error:
+ report_error_message(str(error))
+ raise error from None # Re-throws the exception.
+
+ return wrapper
+
+ return actual_decorator
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/schema_util.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/schema_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..e898a47318d38a388b8ca661bef89dda53222593
--- /dev/null
+++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/schema_util.py
@@ -0,0 +1,45 @@
+# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Schema utilities to get builtin code from operator code."""
+
+from tensorflow.python.util import all_util
+
+
+def get_builtin_code_from_operator_code(opcode):
+ """Return the builtin code of the given operator code.
+
+ The following method is introduced to resolve op builtin code shortage
+ problem. The new builtin operator will be assigned to the extended builtin
+ code field in the flatbuffer schema. Those methods helps to hide builtin code
+ details.
+
+ Args:
+ opcode: Operator code.
+
+ Returns:
+ The builtin code of the given operator code.
+ """
+ # Access BuiltinCode() method first if available.
+ if hasattr(opcode, 'BuiltinCode') and callable(opcode.BuiltinCode):
+ return max(opcode.BuiltinCode(), opcode.DeprecatedBuiltinCode())
+
+ return max(opcode.builtinCode, opcode.deprecatedBuiltinCode)
+
+
+_allowed_symbols = [
+ 'get_builtin_code_from_operator_code',
+]
+
+all_util.remove_undocumented(__name__, _allowed_symbols)
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/tflite_convert.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/tflite_convert.py
new file mode 100644
index 0000000000000000000000000000000000000000..653a23ded0c8dcfe52b4c5a56ded6c6dc8c1f9df
--- /dev/null
+++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/tflite_convert.py
@@ -0,0 +1,696 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Python command line interface for converting TF models to TFLite models."""
+
+import argparse
+import os
+import sys
+import warnings
+
+from absl import app
+import tensorflow as tf
+
+from tensorflow.lite.python import lite
+from tensorflow.lite.python.convert import register_custom_opdefs
+from tensorflow.lite.toco import toco_flags_pb2 as _toco_flags_pb2
+from tensorflow.lite.toco.logging import gen_html
+from tensorflow.python import tf2
+from tensorflow.python.framework import dtypes
+from tensorflow.python.platform import gfile
+from tensorflow.python.util import keras_deps
+
+# Needed to enable TF2 by default.
+
+_ = tf.keras.models.save_model # ensure necessary imports are executed
+
+
+def _parse_array(values, type_fn=str):
+ if values is not None:
+ return [type_fn(val) for val in values.split(",") if val]
+ return None
+
+
+def _parse_set(values):
+ if values is not None:
+ return set([item for item in values.split(",") if item])
+ return None
+
+
+def _parse_inference_type(value, flag):
+ """Converts the inference type to the value of the constant.
+
+ Args:
+ value: str representing the inference type.
+ flag: str representing the flag name.
+
+ Returns:
+ tf.dtype.
+
+ Raises:
+ ValueError: Unsupported value.
+ """
+ if value == "FLOAT":
+ return dtypes.float32
+ if value == "INT8":
+ return dtypes.int8
+ if value == "UINT8" or value == "QUANTIZED_UINT8":
+ return dtypes.uint8
+ raise ValueError(
+ "Unsupported value for `{}` flag. Expected FLOAT, INT8, UINT8, or "
+ "QUANTIZED_UINT8 instead got {}.".format(flag, value))
+
+
+class _ParseBooleanFlag(argparse.Action):
+ """Helper class to parse boolean flag that optionally accepts truth value."""
+
+ def __init__(self, option_strings, dest, nargs=None, **kwargs):
+ if nargs != "?":
+ # This should never happen. This class is only used once below with
+ # nargs="?".
+ raise ValueError(
+ "This parser only supports nargs='?' (0 or 1 additional arguments)")
+ super(_ParseBooleanFlag, self).__init__(
+ option_strings, dest, nargs=nargs, **kwargs)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ if values is None:
+ # Handling `--boolean_flag`.
+ # Without additional arguments, it implies true.
+ flag_value = True
+ elif values.lower() == "true":
+ # Handling `--boolean_flag=true`.
+ # (Case insensitive after the equal sign)
+ flag_value = True
+ elif values.lower() == "false":
+ # Handling `--boolean_flag=false`.
+ # (Case insensitive after the equal sign)
+ flag_value = False
+ else:
+ raise ValueError("Invalid argument to --{}. Must use flag alone,"
+ " or specify true/false.".format(self.dest))
+ setattr(namespace, self.dest, flag_value)
+
+
+def _get_tflite_converter(flags):
+ """Makes a TFLiteConverter object based on the flags provided.
+
+ Args:
+ flags: argparse.Namespace object containing TFLite flags.
+
+ Returns:
+ TFLiteConverter object.
+
+ Raises:
+ ValueError: Invalid flags.
+ """
+ # Parse input and output arrays.
+ input_arrays = _parse_array(flags.input_arrays)
+ input_shapes = None
+ if flags.input_shapes:
+ input_shapes_list = [
+ _parse_array(shape, type_fn=int)
+ for shape in flags.input_shapes.split(":")
+ ]
+ input_shapes = dict(list(zip(input_arrays, input_shapes_list)))
+ output_arrays = _parse_array(flags.output_arrays)
+
+ converter_kwargs = {
+ "input_arrays": input_arrays,
+ "input_shapes": input_shapes,
+ "output_arrays": output_arrays
+ }
+
+ # Create TFLiteConverter.
+ if flags.graph_def_file:
+ converter_fn = lite.TFLiteConverter.from_frozen_graph
+ converter_kwargs["graph_def_file"] = flags.graph_def_file
+ elif flags.saved_model_dir:
+ converter_fn = lite.TFLiteConverter.from_saved_model
+ converter_kwargs["saved_model_dir"] = flags.saved_model_dir
+ converter_kwargs["tag_set"] = _parse_set(flags.saved_model_tag_set)
+ converter_kwargs["signature_key"] = flags.saved_model_signature_key
+ elif flags.keras_model_file:
+ converter_fn = lite.TFLiteConverter.from_keras_model_file
+ converter_kwargs["model_file"] = flags.keras_model_file
+ else:
+ raise ValueError("--graph_def_file, --saved_model_dir, or "
+ "--keras_model_file must be specified.")
+
+ return converter_fn(**converter_kwargs)
+
+
+def _convert_tf1_model(flags):
+ """Calls function to convert the TensorFlow 1.X model into a TFLite model.
+
+ Args:
+ flags: argparse.Namespace object.
+
+ Raises:
+ ValueError: Invalid flags.
+ """
+ # Register custom opdefs before converter object creation.
+ if flags.custom_opdefs:
+ register_custom_opdefs(_parse_array(flags.custom_opdefs))
+
+ # Create converter.
+ converter = _get_tflite_converter(flags)
+ if flags.inference_type:
+ converter.inference_type = _parse_inference_type(flags.inference_type,
+ "inference_type")
+ if flags.inference_input_type:
+ converter.inference_input_type = _parse_inference_type(
+ flags.inference_input_type, "inference_input_type")
+ if flags.output_format:
+ converter.output_format = _toco_flags_pb2.FileFormat.Value(
+ flags.output_format)
+
+ if flags.mean_values and flags.std_dev_values:
+ input_arrays = converter.get_input_arrays()
+ std_dev_values = _parse_array(flags.std_dev_values, type_fn=float)
+
+ # In quantized inference, mean_value has to be integer so that the real
+ # value 0.0 is exactly representable.
+ if converter.inference_type == dtypes.float32:
+ mean_values = _parse_array(flags.mean_values, type_fn=float)
+ else:
+ mean_values = _parse_array(flags.mean_values, type_fn=int)
+ quant_stats = list(zip(mean_values, std_dev_values))
+ if ((not flags.input_arrays and len(input_arrays) > 1) or
+ (len(input_arrays) != len(quant_stats))):
+ raise ValueError("Mismatching --input_arrays, --std_dev_values, and "
+ "--mean_values. The flags must have the same number of "
+ "items. The current input arrays are '{0}'. "
+ "--input_arrays must be present when specifying "
+ "--std_dev_values and --mean_values with multiple input "
+ "tensors in order to map between names and "
+ "values.".format(",".join(input_arrays)))
+ converter.quantized_input_stats = dict(list(zip(input_arrays, quant_stats)))
+ if (flags.default_ranges_min is not None) and (flags.default_ranges_max is
+ not None):
+ converter.default_ranges_stats = (flags.default_ranges_min,
+ flags.default_ranges_max)
+
+ if flags.drop_control_dependency:
+ converter.drop_control_dependency = flags.drop_control_dependency
+ if flags.reorder_across_fake_quant:
+ converter.reorder_across_fake_quant = flags.reorder_across_fake_quant
+ if flags.change_concat_input_ranges:
+ converter.change_concat_input_ranges = (
+ flags.change_concat_input_ranges == "TRUE")
+
+ if flags.allow_custom_ops:
+ converter.allow_custom_ops = flags.allow_custom_ops
+
+ if flags.target_ops:
+ ops_set_options = lite.OpsSet.get_options()
+ converter.target_spec.supported_ops = set()
+ for option in flags.target_ops.split(","):
+ if option not in ops_set_options:
+ raise ValueError("Invalid value for --target_ops. Options: "
+ "{0}".format(",".join(ops_set_options)))
+ converter.target_spec.supported_ops.add(lite.OpsSet(option))
+
+ if flags.experimental_select_user_tf_ops:
+ if lite.OpsSet.SELECT_TF_OPS not in converter.target_spec.supported_ops:
+ raise ValueError("--experimental_select_user_tf_ops can only be set if "
+ "--target_ops contains SELECT_TF_OPS.")
+ user_op_set = set()
+ for op_name in flags.experimental_select_user_tf_ops.split(","):
+ user_op_set.add(op_name)
+ converter.target_spec.experimental_select_user_tf_ops = list(user_op_set)
+
+ if flags.post_training_quantize:
+ converter.optimizations = [lite.Optimize.DEFAULT]
+ if converter.inference_type != dtypes.float32:
+ print("--post_training_quantize quantizes a graph of inference_type "
+ "FLOAT. Overriding inference_type to FLOAT.")
+ converter.inference_type = dtypes.float32
+
+ if flags.quantize_to_float16:
+ converter.target_spec.supported_types = [dtypes.float16]
+ if not flags.post_training_quantize:
+ print("--quantize_to_float16 will only take effect with the "
+ "--post_training_quantize flag enabled.")
+
+ if flags.dump_graphviz_dir:
+ converter.dump_graphviz_dir = flags.dump_graphviz_dir
+ if flags.dump_graphviz_video:
+ converter.dump_graphviz_vode = flags.dump_graphviz_video
+ if flags.conversion_summary_dir:
+ converter.conversion_summary_dir = flags.conversion_summary_dir
+
+ converter.experimental_new_converter = flags.experimental_new_converter
+
+ if flags.experimental_new_quantizer is not None:
+ converter.experimental_new_quantizer = flags.experimental_new_quantizer
+
+ # Convert model.
+ output_data = converter.convert()
+ with gfile.GFile(flags.output_file, "wb") as f:
+ f.write(output_data)
+
+
+def _convert_tf2_model(flags):
+ """Calls function to convert the TensorFlow 2.0 model into a TFLite model.
+
+ Args:
+ flags: argparse.Namespace object.
+
+ Raises:
+ ValueError: Unsupported file format.
+ """
+ # Load the model.
+ if flags.saved_model_dir:
+ converter = lite.TFLiteConverterV2.from_saved_model(
+ flags.saved_model_dir,
+ signature_keys=_parse_array(flags.saved_model_signature_key),
+ tags=_parse_set(flags.saved_model_tag_set))
+ elif flags.keras_model_file:
+ model = keras_deps.get_load_model_function()(flags.keras_model_file)
+ converter = lite.TFLiteConverterV2.from_keras_model(model)
+
+ converter.experimental_new_converter = flags.experimental_new_converter
+
+ if flags.experimental_new_quantizer is not None:
+ converter.experimental_new_quantizer = flags.experimental_new_quantizer
+
+ # Convert the model.
+ tflite_model = converter.convert()
+ with gfile.GFile(flags.output_file, "wb") as f:
+ f.write(tflite_model)
+
+
+def _check_tf1_flags(flags, unparsed):
+ """Checks the parsed and unparsed flags to ensure they are valid in 1.X.
+
+ Raises an error if previously support unparsed flags are found. Raises an
+ error for parsed flags that don't meet the required conditions.
+
+ Args:
+ flags: argparse.Namespace object containing TFLite flags.
+ unparsed: List of unparsed flags.
+
+ Raises:
+ ValueError: Invalid flags.
+ """
+
+ # Check unparsed flags for common mistakes based on previous TOCO.
+ def _get_message_unparsed(flag, orig_flag, new_flag):
+ if flag.startswith(orig_flag):
+ return "\n Use {0} instead of {1}".format(new_flag, orig_flag)
+ return ""
+
+ if unparsed:
+ output = ""
+ for flag in unparsed:
+ output += _get_message_unparsed(flag, "--input_file", "--graph_def_file")
+ output += _get_message_unparsed(flag, "--savedmodel_directory",
+ "--saved_model_dir")
+ output += _get_message_unparsed(flag, "--std_value", "--std_dev_values")
+ output += _get_message_unparsed(flag, "--batch_size", "--input_shapes")
+ output += _get_message_unparsed(flag, "--dump_graphviz",
+ "--dump_graphviz_dir")
+ if output:
+ raise ValueError(output)
+
+ # Check that flags are valid.
+ if flags.graph_def_file and (not flags.input_arrays or
+ not flags.output_arrays):
+ raise ValueError("--input_arrays and --output_arrays are required with "
+ "--graph_def_file")
+
+ if flags.input_shapes:
+ if not flags.input_arrays:
+ raise ValueError("--input_shapes must be used with --input_arrays")
+ if flags.input_shapes.count(":") != flags.input_arrays.count(","):
+ raise ValueError("--input_shapes and --input_arrays must have the same "
+ "number of items")
+
+ if flags.std_dev_values or flags.mean_values:
+ if bool(flags.std_dev_values) != bool(flags.mean_values):
+ raise ValueError("--std_dev_values and --mean_values must be used "
+ "together")
+ if flags.std_dev_values.count(",") != flags.mean_values.count(","):
+ raise ValueError("--std_dev_values, --mean_values must have the same "
+ "number of items")
+
+ if (flags.default_ranges_min is None) != (flags.default_ranges_max is None):
+ raise ValueError("--default_ranges_min and --default_ranges_max must be "
+ "used together")
+
+ if flags.dump_graphviz_video and not flags.dump_graphviz_dir:
+ raise ValueError("--dump_graphviz_video must be used with "
+ "--dump_graphviz_dir")
+
+ if flags.custom_opdefs and not flags.experimental_new_converter:
+ raise ValueError("--custom_opdefs must be used with "
+ "--experimental_new_converter")
+ if flags.custom_opdefs and not flags.allow_custom_ops:
+ raise ValueError("--custom_opdefs must be used with --allow_custom_ops")
+ if (flags.experimental_select_user_tf_ops and
+ not flags.experimental_new_converter):
+ raise ValueError("--experimental_select_user_tf_ops must be used with "
+ "--experimental_new_converter")
+
+
+def _check_tf2_flags(flags):
+ """Checks the parsed and unparsed flags to ensure they are valid in 2.X.
+
+ Args:
+ flags: argparse.Namespace object containing TFLite flags.
+
+ Raises:
+ ValueError: Invalid flags.
+ """
+ if not flags.keras_model_file and not flags.saved_model_dir:
+ raise ValueError("one of the arguments --saved_model_dir "
+ "--keras_model_file is required")
+
+
+def _get_tf1_flags(parser):
+ """Returns ArgumentParser for tflite_convert for TensorFlow 1.X.
+
+ Args:
+ parser: ArgumentParser
+ """
+ # Input file flags.
+ input_file_group = parser.add_mutually_exclusive_group(required=True)
+ input_file_group.add_argument(
+ "--graph_def_file",
+ type=str,
+ help="Full filepath of file containing frozen TensorFlow GraphDef.")
+ input_file_group.add_argument(
+ "--saved_model_dir",
+ type=str,
+ help="Full filepath of directory containing the SavedModel.")
+ input_file_group.add_argument(
+ "--keras_model_file",
+ type=str,
+ help="Full filepath of HDF5 file containing tf.Keras model.")
+
+ # Model format flags.
+ parser.add_argument(
+ "--output_format",
+ type=str.upper,
+ choices=["TFLITE", "GRAPHVIZ_DOT"],
+ help="Output file format.")
+ parser.add_argument(
+ "--inference_type",
+ type=str.upper,
+ default="FLOAT",
+ help=("Target data type of real-number arrays in the output file. "
+ "Must be either FLOAT, INT8 or UINT8."))
+ parser.add_argument(
+ "--inference_input_type",
+ type=str.upper,
+ help=("Target data type of real-number input arrays. Allows for a "
+ "different type for input arrays in the case of quantization. "
+ "Must be either FLOAT, INT8 or UINT8."))
+
+ # Input and output arrays flags.
+ parser.add_argument(
+ "--input_arrays",
+ type=str,
+ help="Names of the input arrays, comma-separated.")
+ parser.add_argument(
+ "--input_shapes",
+ type=str,
+ help="Shapes corresponding to --input_arrays, colon-separated.")
+ parser.add_argument(
+ "--output_arrays",
+ type=str,
+ help="Names of the output arrays, comma-separated.")
+
+ # SavedModel related flags.
+ parser.add_argument(
+ "--saved_model_tag_set",
+ type=str,
+ help=("Comma-separated set of tags identifying the MetaGraphDef within "
+ "the SavedModel to analyze. All tags must be present. In order to "
+ "pass in an empty tag set, pass in \"\". (default \"serve\")"))
+ parser.add_argument(
+ "--saved_model_signature_key",
+ type=str,
+ help=("Key identifying the SignatureDef containing inputs and outputs. "
+ "(default DEFAULT_SERVING_SIGNATURE_DEF_KEY)"))
+
+ # Quantization flags.
+ parser.add_argument(
+ "--std_dev_values",
+ type=str,
+ help=("Standard deviation of training data for each input tensor, "
+ "comma-separated floats. Used for quantized input tensors. "
+ "(default None)"))
+ parser.add_argument(
+ "--mean_values",
+ type=str,
+ help=("Mean of training data for each input tensor, comma-separated "
+ "floats. Used for quantized input tensors. (default None)"))
+ parser.add_argument(
+ "--default_ranges_min",
+ type=float,
+ help=("Default value for min bound of min/max range values used for all "
+ "arrays without a specified range, Intended for experimenting with "
+ "quantization via \"dummy quantization\". (default None)"))
+ parser.add_argument(
+ "--default_ranges_max",
+ type=float,
+ help=("Default value for max bound of min/max range values used for all "
+ "arrays without a specified range, Intended for experimenting with "
+ "quantization via \"dummy quantization\". (default None)"))
+ # quantize_weights is DEPRECATED.
+ parser.add_argument(
+ "--quantize_weights",
+ dest="post_training_quantize",
+ action="store_true",
+ help=argparse.SUPPRESS)
+ parser.add_argument(
+ "--post_training_quantize",
+ dest="post_training_quantize",
+ action="store_true",
+ help=(
+ "Boolean indicating whether to quantize the weights of the "
+ "converted float model. Model size will be reduced and there will "
+ "be latency improvements (at the cost of accuracy). (default False)"))
+ parser.add_argument(
+ "--quantize_to_float16",
+ dest="quantize_to_float16",
+ action="store_true",
+ help=("Boolean indicating whether to quantize weights to fp16 instead of "
+ "the default int8 when post-training quantization "
+ "(--post_training_quantize) is enabled. (default False)"))
+ # Graph manipulation flags.
+ parser.add_argument(
+ "--drop_control_dependency",
+ action="store_true",
+ help=("Boolean indicating whether to drop control dependencies silently. "
+ "This is due to TensorFlow not supporting control dependencies. "
+ "(default True)"))
+ parser.add_argument(
+ "--reorder_across_fake_quant",
+ action="store_true",
+ help=("Boolean indicating whether to reorder FakeQuant nodes in "
+ "unexpected locations. Used when the location of the FakeQuant "
+ "nodes is preventing graph transformations necessary to convert "
+ "the graph. Results in a graph that differs from the quantized "
+ "training graph, potentially causing differing arithmetic "
+ "behavior. (default False)"))
+ # Usage for this flag is --change_concat_input_ranges=true or
+ # --change_concat_input_ranges=false in order to make it clear what the flag
+ # is set to. This keeps the usage consistent with other usages of the flag
+ # where the default is different. The default value here is False.
+ parser.add_argument(
+ "--change_concat_input_ranges",
+ type=str.upper,
+ choices=["TRUE", "FALSE"],
+ help=("Boolean to change behavior of min/max ranges for inputs and "
+ "outputs of the concat operator for quantized models. Changes the "
+ "ranges of concat operator overlap when true. (default False)"))
+
+ # Permitted ops flags.
+ parser.add_argument(
+ "--allow_custom_ops",
+ action=_ParseBooleanFlag,
+ nargs="?",
+ help=("Boolean indicating whether to allow custom operations. When false "
+ "any unknown operation is an error. When true, custom ops are "
+ "created for any op that is unknown. The developer will need to "
+ "provide these to the TensorFlow Lite runtime with a custom "
+ "resolver. (default False)"))
+ parser.add_argument(
+ "--custom_opdefs",
+ type=str,
+ help=("String representing a list of custom ops OpDefs delineated with "
+ "commas that are included in the GraphDef. Required when using "
+ "custom operations with --experimental_new_converter."))
+ parser.add_argument(
+ "--target_ops",
+ type=str,
+ help=("Experimental flag, subject to change. Set of OpsSet options "
+ "indicating which converter to use. Options: {0}. One or more "
+ "option may be specified. (default set([OpsSet.TFLITE_BUILTINS]))"
+ "".format(",".join(lite.OpsSet.get_options()))))
+ parser.add_argument(
+ "--experimental_select_user_tf_ops",
+ type=str,
+ help=("Experimental flag, subject to change. Comma separated list of "
+ "user's defined TensorFlow operators required in the runtime."))
+
+ # Logging flags.
+ parser.add_argument(
+ "--dump_graphviz_dir",
+ type=str,
+ help=("Full filepath of folder to dump the graphs at various stages of "
+ "processing GraphViz .dot files. Preferred over --output_format="
+ "GRAPHVIZ_DOT in order to keep the requirements of the output "
+ "file."))
+ parser.add_argument(
+ "--dump_graphviz_video",
+ action="store_true",
+ help=("Boolean indicating whether to dump the graph after every graph "
+ "transformation"))
+ parser.add_argument(
+ "--conversion_summary_dir",
+ type=str,
+ help=("Full filepath to store the conversion logs, which includes "
+ "graphviz of the model before/after the conversion, an HTML report "
+ "and the conversion proto buffers. This will only be generated "
+ "when passing --experimental_new_converter"))
+
+
+def _get_tf2_flags(parser):
+ """Returns ArgumentParser for tflite_convert for TensorFlow 2.0.
+
+ Args:
+ parser: ArgumentParser
+ """
+ # Input file flags.
+ input_file_group = parser.add_mutually_exclusive_group()
+ input_file_group.add_argument(
+ "--saved_model_dir",
+ type=str,
+ help="Full path of the directory containing the SavedModel.")
+ input_file_group.add_argument(
+ "--keras_model_file",
+ type=str,
+ help="Full filepath of HDF5 file containing tf.Keras model.")
+ # SavedModel related flags.
+ parser.add_argument(
+ "--saved_model_tag_set",
+ type=str,
+ help=("Comma-separated set of tags identifying the MetaGraphDef within "
+ "the SavedModel to analyze. All tags must be present. In order to "
+ "pass in an empty tag set, pass in \"\". (default \"serve\")"))
+ parser.add_argument(
+ "--saved_model_signature_key",
+ type=str,
+ help=("Key identifying the SignatureDef containing inputs and outputs. "
+ "(default DEFAULT_SERVING_SIGNATURE_DEF_KEY)"))
+
+ # Enables 1.X converter in 2.X.
+ parser.add_argument(
+ "--enable_v1_converter",
+ action="store_true",
+ help=("Enables the TensorFlow V1 converter in 2.0"))
+
+
+def _get_parser(use_v2_converter):
+ """Returns an ArgumentParser for tflite_convert.
+
+ Args:
+ use_v2_converter: Indicates which converter to return.
+ Return: ArgumentParser.
+ """
+ parser = argparse.ArgumentParser(
+ description=("Command line tool to run TensorFlow Lite Converter."))
+
+ # Output file flag.
+ parser.add_argument(
+ "--output_file",
+ type=str,
+ help="Full filepath of the output file.",
+ required=True)
+
+ if use_v2_converter:
+ _get_tf2_flags(parser)
+ else:
+ _get_tf1_flags(parser)
+
+ parser.add_argument(
+ "--experimental_new_converter",
+ action=_ParseBooleanFlag,
+ nargs="?",
+ default=True,
+ help=("Experimental flag, subject to change. Enables MLIR-based "
+ "conversion instead of TOCO conversion. (default True)"))
+
+ parser.add_argument(
+ "--experimental_new_quantizer",
+ action=_ParseBooleanFlag,
+ nargs="?",
+ help=("Experimental flag, subject to change. Enables MLIR-based "
+ "quantizer instead of flatbuffer conversion. (default True)"))
+ return parser
+
+
+def run_main(_):
+ """Main in tflite_convert.py."""
+ use_v2_converter = tf2.enabled()
+ parser = _get_parser(use_v2_converter)
+ tflite_flags, unparsed = parser.parse_known_args(args=sys.argv[1:])
+
+ # If the user is running TensorFlow 2.X but has passed in enable_v1_converter
+ # then parse the flags again with the 1.X converter flags.
+ if tf2.enabled() and tflite_flags.enable_v1_converter:
+ use_v2_converter = False
+ parser = _get_parser(use_v2_converter)
+ tflite_flags, unparsed = parser.parse_known_args(args=sys.argv[1:])
+
+ # Checks if the flags are valid.
+ try:
+ if use_v2_converter:
+ _check_tf2_flags(tflite_flags)
+ else:
+ _check_tf1_flags(tflite_flags, unparsed)
+ except ValueError as e:
+ parser.print_usage()
+ file_name = os.path.basename(sys.argv[0])
+ sys.stderr.write("{0}: error: {1}\n".format(file_name, str(e)))
+ sys.exit(1)
+
+ # Convert the model according to the user provided flag.
+ if use_v2_converter:
+ _convert_tf2_model(tflite_flags)
+ else:
+ try:
+ _convert_tf1_model(tflite_flags)
+ finally:
+ if tflite_flags.conversion_summary_dir:
+ if tflite_flags.experimental_new_converter:
+ gen_html.gen_conversion_log_html(tflite_flags.conversion_summary_dir,
+ tflite_flags.post_training_quantize,
+ tflite_flags.output_file)
+ else:
+ warnings.warn(
+ "Conversion summary will only be generated when enabling"
+ " the new converter via --experimental_new_converter. ")
+
+
+def main():
+ app.run(main=run_main, argv=sys.argv[:1])
+
+
+if __name__ == "__main__":
+ main()
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/util.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0692655c3f127804bfa10fe96d746531569998a
--- /dev/null
+++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/util.py
@@ -0,0 +1,1177 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Functions used by multiple converter files."""
+
+import copy
+import datetime
+import sys
+
+from absl import logging
+import flatbuffers
+import numpy as np
+
+from tensorflow.core.protobuf import config_pb2 as _config_pb2
+from tensorflow.core.protobuf import meta_graph_pb2 as _meta_graph_pb2
+from tensorflow.lite.python import conversion_metadata_schema_py_generated as conversion_metadata_fb
+from tensorflow.lite.python import schema_py_generated as schema_fb
+from tensorflow.lite.python import schema_util
+from tensorflow.lite.python import tflite_keras_util as _tflite_keras_util
+from tensorflow.lite.python.op_hint import convert_op_hints_to_stubs
+from tensorflow.lite.python.op_hint import find_all_hinted_output_nodes
+from tensorflow.lite.tools import flatbuffer_utils
+from tensorflow.python.eager import function
+from tensorflow.python.framework import convert_to_constants as _convert_to_constants
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import error_interpolation as _error_interpolation
+from tensorflow.python.grappler import tf_optimizer
+from tensorflow.python.training.saver import export_meta_graph as _export_meta_graph
+
+# The field name of conversion metadata in the flatbuffer file.
+CONVERSION_METADATA_FIELD_NAME = "CONVERSION_METADATA"
+
+# Keras functions used by TFLite
+model_input_signature = _tflite_keras_util.model_input_signature
+trace_model_call = _tflite_keras_util.trace_model_call
+get_save_spec = _tflite_keras_util.get_save_spec
+
+# Jax functions used by TFLite
+# pylint: disable=g-import-not-at-top
+# pylint: disable=unused-import
+try:
+ from jax import jit as _jit
+except ImportError:
+ _jit = None
+# pylint: enable=g-import-not-at-top
+# pylint: enable=unused-import
+
+# Defined as per TFLite schema
+_MAP_TFLITE_ENUM_TO_TF_TYPES = {
+ 0: dtypes.float32,
+ 1: dtypes.float16,
+ 2: dtypes.int32,
+ 3: dtypes.uint8,
+ 4: dtypes.int64,
+ 5: dtypes.string,
+ 6: dtypes.bool,
+ 7: dtypes.int16,
+ 8: dtypes.complex64,
+ 9: dtypes.int8,
+ 10: dtypes.float64,
+ 11: dtypes.complex128,
+ 16: dtypes.uint32,
+}
+
+_TFLITE_FILE_IDENTIFIER = b"TFL3"
+
+_MAP_QUANT_TO_IO_TYPES = {
+ dtypes.int8: {dtypes.int8, dtypes.uint8},
+ dtypes.int16: {dtypes.int16},
+}
+
+
+def _convert_tflite_enum_type_to_tf_type(tflite_enum_type):
+ """Converts tflite enum type (eg: 0) to tf type (eg: tf.float32).
+
+ Args:
+ tflite_enum_type: tflite enum type (eg: 0, that corresponds to float32)
+
+ Raises:
+ ValueError: If an invalid tflite enum type is provided.
+
+ Returns:
+ tf type (eg: tf.float32)
+ """
+ tf_type = _MAP_TFLITE_ENUM_TO_TF_TYPES.get(tflite_enum_type)
+ if tf_type is None:
+ raise ValueError(
+ "Unsupported enum {}. The valid map of enum to tf types is : {}"
+ .format(tflite_enum_type, _MAP_TFLITE_ENUM_TO_TF_TYPES))
+ return tf_type
+
+
+def get_tf_type_name(tf_type):
+ """Converts tf.dtype (eg: tf.float32) to str (eg: "tf.float32")."""
+ return "tf." + tf_type.name if tf_type else None
+
+
+def get_tensor_name(tensor):
+ """Returns name of the input tensor.
+
+ Args:
+ tensor: tf.Tensor
+
+ Returns:
+ str
+ """
+ parts = tensor.name.split(":")
+ if len(parts) > 2:
+ raise ValueError("Tensor name invalid. Expect 0 or 1 colon, got {0}".format(
+ len(parts) - 1))
+
+ # To be consistent with the tensor naming scheme in tensorflow, we need
+ # drop the ':0' suffix for the first tensor.
+ if len(parts) > 1 and parts[1] != "0":
+ return tensor.name
+ return parts[0]
+
+
+def get_tensors_from_tensor_names(graph, tensor_names):
+ """Gets the Tensors associated with the `tensor_names` in the provided graph.
+
+ Args:
+ graph: TensorFlow Graph.
+ tensor_names: List of strings that represent names of tensors in the graph.
+
+ Returns:
+ A list of Tensor objects in the same order the names are provided.
+
+ Raises:
+ ValueError:
+ tensor_names contains an invalid tensor name.
+ """
+ # Get the list of all of the tensors.
+ tensor_name_to_tensor = {}
+ for op in graph.get_operations():
+ for tensor in op.values():
+ tensor_name_to_tensor[get_tensor_name(tensor)] = tensor
+
+ # Get the tensors associated with tensor_names.
+ tensors = []
+ invalid_tensors = []
+ for name in tensor_names:
+ if not isinstance(name, str):
+ raise ValueError("Invalid type for a tensor name in the provided graph. "
+ "Expected type for a tensor name is 'str', instead got "
+ "type '{}' for tensor name '{}'".format(
+ type(name), name))
+
+ tensor = tensor_name_to_tensor.get(name)
+ if tensor is None:
+ invalid_tensors.append(name)
+ else:
+ tensors.append(tensor)
+
+ # Throw ValueError if any user input names are not valid tensors.
+ if invalid_tensors:
+ raise ValueError("Invalid tensors '{}' were found.".format(
+ ",".join(invalid_tensors)))
+ return tensors
+
+
+def set_tensor_shapes(tensors, shapes):
+ """Sets Tensor shape for each tensor if the shape is defined.
+
+ Args:
+ tensors: TensorFlow tensor.Tensor.
+ shapes: Dict of strings representing input tensor names to list of
+ integers representing input shapes (e.g., {"foo": : [1, 16, 16, 3]}).
+
+ Raises:
+ ValueError:
+ `shapes` contains an invalid tensor.
+ `shapes` contains an invalid shape for a valid tensor.
+ """
+ if shapes:
+ tensor_names_to_tensor = {
+ get_tensor_name(tensor): tensor for tensor in tensors
+ }
+ for name, shape in shapes.items():
+ if name not in tensor_names_to_tensor:
+ raise ValueError("Invalid tensor \'{}\' found in tensor shapes "
+ "map.".format(name))
+ if shape is not None:
+ tensor = tensor_names_to_tensor[name]
+ try:
+ tensor.set_shape(shape)
+ except ValueError as error:
+ message = ("The shape of tensor '{0}' cannot be changed from {1} to "
+ "{2}. {3}".format(name, tensor.shape, shape, str(error)))
+ raise ValueError(message)
+
+
+def get_grappler_config(optimizers_list):
+ """Creates a tf.compat.v1.ConfigProto for configuring Grappler.
+
+ Args:
+ optimizers_list: List of strings that represents the list of optimizers.
+
+ Returns:
+ tf.ConfigProto.
+ """
+ config = _config_pb2.ConfigProto()
+ rewrite_options = config.graph_options.rewrite_options
+ for optimizer in optimizers_list:
+ rewrite_options.optimizers.append(optimizer)
+ return config
+
+
+def run_graph_optimizations(graph_def,
+ input_arrays,
+ output_arrays,
+ config,
+ graph=None):
+ """Apply standard TensorFlow optimizations to the graph_def.
+
+ Args:
+ graph_def: Frozen GraphDef to be optimized.
+ input_arrays: List of arrays that are considered inputs of the graph.
+ output_arrays: List of arrays that are considered outputs of the graph.
+ config: tf.ConfigProto.
+ graph: TensorFlow Graph. Required when Eager mode is enabled. (default None)
+
+ Returns:
+ A new, optimized GraphDef.
+ """
+ meta_graph = _export_meta_graph(graph_def=graph_def, graph=graph)
+
+ signature = _meta_graph_pb2.SignatureDef()
+ for array in input_arrays:
+ signature.inputs[array.name].name = array.name
+ signature.inputs[array.name].dtype = array.dtype.as_datatype_enum
+ signature.inputs[array.name].tensor_shape.CopyFrom(array.shape.as_proto())
+
+ for array in output_arrays:
+ signature.outputs[array.name].name = array.name
+ signature.outputs[array.name].dtype = array.dtype.as_datatype_enum
+ signature.outputs[array.name].tensor_shape.CopyFrom(array.shape.as_proto())
+
+ meta_graph.signature_def["not_used_key"].CopyFrom(signature)
+
+ # We need to add a collection called 'train_op' so that grappler
+ # knows what the outputs are.
+ fetch_collection = _meta_graph_pb2.CollectionDef()
+ for array in input_arrays + output_arrays:
+ fetch_collection.node_list.value.append(array.name)
+ meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)
+
+ return tf_optimizer.OptimizeGraph(config, meta_graph)
+
+
+def _convert_op_hints_if_present(sess, graph_def, output_tensors,
+ hinted_outputs_nodes):
+ if is_frozen_graph(sess):
+ raise ValueError("Try to convert op hints, needs unfrozen graph.")
+ output_arrays = [get_tensor_name(tensor) for tensor in output_tensors]
+ graph_def = _convert_to_constants.convert_variables_to_constants(
+ sess, graph_def, output_arrays + hinted_outputs_nodes)
+ graph_def = convert_op_hints_to_stubs(graph_def=graph_def)
+ return graph_def
+
+
+def freeze_graph(sess, input_tensors, output_tensors):
+ """Returns a frozen GraphDef.
+
+ Runs a Grappler pass and freezes a graph with Variables in it. Otherwise the
+ existing GraphDef is returned. The Grappler pass is only run on models that
+ are frozen in order to inline the functions in the graph.
+ If OpHints is present, it will try to convert the OpHint graph.
+
+ Args:
+ sess: TensorFlow Session.
+ input_tensors: List of input tensors.
+ output_tensors: List of output tensors (only .name is used from this).
+
+ Returns:
+ Frozen GraphDef.
+ """
+ # Runs a Grappler pass in order to inline any functions in the graph.
+ # Asides from inlining any simple function, Grappler will also try to lower
+ # while loop into switch merge representation which is undesired for Ophints,
+ # so we simply remove those attributes to prevent Grappler from doing so.
+ graph_def = _convert_to_constants.disable_lower_using_switch_merge(
+ sess.graph_def)
+ config = get_grappler_config(["function"])
+ graph_def = run_graph_optimizations(
+ graph_def, input_tensors, output_tensors, config, graph=sess.graph)
+
+ # If ophints are present, just convert them.
+ hinted_outputs_nodes = find_all_hinted_output_nodes(sess)
+ if hinted_outputs_nodes:
+ return _convert_op_hints_if_present(sess, graph_def, output_tensors,
+ hinted_outputs_nodes)
+
+ if not is_frozen_graph(sess):
+ output_node_names = [tensor.name.split(":")[0] for tensor in output_tensors]
+ return _convert_to_constants.convert_variables_to_constants(
+ sess, graph_def, output_node_names
+ )
+ else:
+ return sess.graph_def
+
+
+def is_frozen_graph(sess):
+ """Determines if the graph is frozen.
+
+ Determines if a graph has previously been frozen by checking for any
+ operations of type Variable*. If variables are found, the graph is not frozen.
+
+ Args:
+ sess: TensorFlow Session.
+
+ Returns:
+ Bool.
+ """
+ for op in sess.graph.get_operations():
+ if op.type.startswith("Variable") or op.type.endswith("VariableOp"):
+ return False
+ return True
+
+
+def build_debug_info_func(original_graph):
+ """Returns a method to retrieve the `GraphDebugInfo` from the original graph.
+
+ Args:
+ original_graph: The original `Graph` containing all the op stack traces.
+
+ Returns:
+ A function which retrieves the stack traces from the original graph and
+ converts them to a `GraphDebugInfo` for a given set of nodes.
+ """
+
+ def f(original_nodes):
+ """Function to create `GraphDebugInfo` for the given `original_nodes`."""
+ if not original_graph:
+ return None
+ # For the given nodes, gets all the op definitions in the original graph.
+ useful_ops = []
+ for func, name in original_nodes:
+ try:
+ if not func:
+ useful_ops.append((func, original_graph.get_operation_by_name(name)))
+ else:
+ sub_func = original_graph._get_function(func) # pylint: disable=protected-access
+ if isinstance(sub_func, function.AtomicFunction): # pylint: disable=protected-access
+ useful_ops.append(
+ (func, sub_func.graph.get_operation_by_name(name)))
+ else:
+ sys.stderr.write(
+ "Use '@tf.function' or '@defun' to decorate the function.\n")
+ continue
+ except KeyError:
+ # New node created by graph optimizer. No stack trace from source code.
+ continue
+ # Convert all the op definitions to stack traces in terms of GraphDebugInfo.
+ return _error_interpolation.create_graph_debug_info_def(useful_ops)
+
+ return f
+
+
+def convert_debug_info_func(saved_debug_info):
+ """Returns a method to retrieve the `GraphDebugInfo` from the original graph.
+
+ Args:
+ saved_debug_info: The `GraphDebugInfo` containing all the debug info.
+
+ Returns:
+ A function which retrieves the stack traces from the original graph and
+ converts them to a `GraphDebugInfo` for a given set of nodes.
+ """
+
+ def f(original_nodes):
+ """Function to create `GraphDebugInfo` for the given `original_nodes`."""
+ del original_nodes
+ return saved_debug_info
+
+ return f
+
+
+def get_debug_info(nodes_to_debug_info_func, converted_graph):
+ """Returns the debug info for the original nodes in the `converted_graph`.
+
+ Args:
+ nodes_to_debug_info_func: The method to collect the op debug info for the
+ nodes.
+ converted_graph: A `GraphDef` after optimization and transformation.
+
+ Returns:
+ `GraphDebugInfo` for all the original nodes in `converted_graph`.
+ """
+ if not nodes_to_debug_info_func:
+ return None
+
+ # Collect all the debug info nodes from the converted_graph
+ original_nodes = set()
+ for node in converted_graph.node:
+ debug_nodes = node.experimental_debug_info.original_node_names
+ debug_funcs = node.experimental_debug_info.original_func_names
+ # If the `original_node_names` are empty, uses the node name directly.
+ if not debug_nodes:
+ original_nodes.add(("", node.name))
+ else:
+ for i in range(len(debug_nodes)):
+ debug_func = "" if i >= len(debug_funcs) else debug_funcs[i]
+ original_nodes.add((debug_func, debug_nodes[i]))
+
+ # Convert the nodes to the debug info proto object.
+ return nodes_to_debug_info_func(original_nodes)
+
+
+def convert_bytes_to_c_source(data,
+ array_name,
+ max_line_width=80,
+ include_guard=None,
+ include_path=None,
+ use_tensorflow_license=False):
+ """Returns strings representing a C constant array containing `data`.
+
+ Args:
+ data: Byte array that will be converted into a C constant.
+ array_name: String to use as the variable name for the constant array.
+ max_line_width: The longest line length, for formatting purposes.
+ include_guard: Name to use for the include guard macro definition.
+ include_path: Optional path to include in the source file.
+ use_tensorflow_license: Whether to include the standard TensorFlow Apache2
+ license in the generated files.
+
+ Returns:
+ Text that can be compiled as a C source file to link in the data as a
+ literal array of values.
+ Text that can be used as a C header file to reference the literal array.
+ """
+
+ starting_pad = " "
+ array_lines = []
+ array_line = starting_pad
+ for value in bytearray(data):
+ if (len(array_line) + 4) > max_line_width:
+ array_lines.append(array_line + "\n")
+ array_line = starting_pad
+ array_line += " 0x%02x," % (value,)
+ if len(array_line) > len(starting_pad):
+ array_lines.append(array_line + "\n")
+ array_values = "".join(array_lines)
+
+ if include_guard is None:
+ include_guard = "TENSORFLOW_LITE_UTIL_" + array_name.upper() + "_DATA_H_"
+
+ if include_path is not None:
+ include_line = "#include \"{include_path}\"\n".format(
+ include_path=include_path)
+ else:
+ include_line = ""
+
+ if use_tensorflow_license:
+ license_text = """
+/* Copyright {year} The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+""".format(year=datetime.date.today().year)
+ else:
+ license_text = ""
+
+ source_template = """{license_text}
+// This is a TensorFlow Lite model file that has been converted into a C data
+// array using the tensorflow.lite.util.convert_bytes_to_c_source() function.
+// This form is useful for compiling into a binary for devices that don't have a
+// file system.
+
+{include_line}
+// We need to keep the data array aligned on some architectures.
+#ifdef __has_attribute
+#define HAVE_ATTRIBUTE(x) __has_attribute(x)
+#else
+#define HAVE_ATTRIBUTE(x) 0
+#endif
+#if HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__))
+#define DATA_ALIGN_ATTRIBUTE __attribute__((aligned(4)))
+#else
+#define DATA_ALIGN_ATTRIBUTE
+#endif
+
+const unsigned char {array_name}[] DATA_ALIGN_ATTRIBUTE = {{
+{array_values}}};
+const int {array_name}_len = {array_length};
+"""
+
+ source_text = source_template.format(
+ array_name=array_name,
+ array_length=len(data),
+ array_values=array_values,
+ license_text=license_text,
+ include_line=include_line)
+
+ header_template = """
+{license_text}
+
+// This is a TensorFlow Lite model file that has been converted into a C data
+// array using the tensorflow.lite.util.convert_bytes_to_c_source() function.
+// This form is useful for compiling into a binary for devices that don't have a
+// file system.
+
+#ifndef {include_guard}
+#define {include_guard}
+
+extern const unsigned char {array_name}[];
+extern const int {array_name}_len;
+
+#endif // {include_guard}
+"""
+
+ header_text = header_template.format(
+ array_name=array_name,
+ include_guard=include_guard,
+ license_text=license_text)
+
+ return source_text, header_text
+
+
+def _convert_model_from_bytearray_to_object(model_bytearray):
+ """Converts a tflite model from a bytearray into a parsable object."""
+ model_object = schema_fb.Model.GetRootAsModel(model_bytearray, 0)
+ model_object = schema_fb.ModelT.InitFromObj(model_object)
+ model_object = copy.deepcopy(model_object)
+ return model_object
+
+
+def _convert_model_from_object_to_bytearray(model_object):
+ """Converts a tflite model from a parsable object into a bytearray."""
+ # Initial size of the buffer, which will grow automatically if needed
+ builder = flatbuffers.Builder(1024)
+ model_offset = model_object.Pack(builder)
+ builder.Finish(model_offset, file_identifier=_TFLITE_FILE_IDENTIFIER)
+ return bytes(builder.Output())
+
+
+def get_quantize_opcode_idx(model):
+ """Returns the quantize op idx."""
+ quant_opcode_idxs = []
+ for idx, opcode in enumerate(model.operatorCodes):
+ builtin_code = schema_util.get_builtin_code_from_operator_code(opcode)
+ if builtin_code == schema_fb.BuiltinOperator.QUANTIZE:
+ quant_opcode_idxs.append(idx)
+ return quant_opcode_idxs
+
+
+def get_dequantize_opcode_idx(model):
+ """Returns the quantize op idx."""
+ quant_opcode_idxs = []
+ for idx, opcode in enumerate(model.operatorCodes):
+ builtin_code = schema_util.get_builtin_code_from_operator_code(opcode)
+ if builtin_code == schema_fb.BuiltinOperator.DEQUANTIZE:
+ quant_opcode_idxs.append(idx)
+ return quant_opcode_idxs
+
+
+def _update_signature_def_tensors(tensor_maps, map_old_to_new_tensors):
+ """Update the tensors in the SignatureDef's TensorMaps."""
+ for i in range(len(tensor_maps)):
+ if tensor_maps[i].tensorIndex in map_old_to_new_tensors:
+ tensor_maps[i].tensorIndex = (
+ map_old_to_new_tensors[tensor_maps[i].tensorIndex])
+
+
+def _remove_tensors_from_model(model, remove_tensors_idxs):
+ """Remove tensors from model."""
+ if not remove_tensors_idxs:
+ return
+ if len(model.subgraphs) > 1:
+ logging.info("Skipping the removal of dangled tensors since the model has "
+ "multiple subgraphs and tensors can be used in the different "
+ "subgraph(s)")
+ return
+ subgraph = model.subgraphs[0]
+ tensors = subgraph.tensors
+ operators = subgraph.operators
+
+ logging.debug("Removing tensors at indices : %s", remove_tensors_idxs)
+ # An optimized check to validate if "remove_tensors_idxs" (eg: [4,5,6]) is an
+ # exact subset, with ordering, of "tensors" indices (eg: [0,1,2,3,4,5,6]).
+ if min(remove_tensors_idxs) == len(tensors) - len(remove_tensors_idxs):
+ logging.debug("Removing tensors only at the end of the tensor list")
+ del tensors[min(remove_tensors_idxs):]
+ else:
+ logging.debug("Removing tensors requires updating the model")
+ # Map the old tensor indices to new tensor indices
+ d_old_to_new_tensors = {}
+ left_shift_by = 0
+ for idx in range(len(tensors)):
+ if idx in remove_tensors_idxs:
+ left_shift_by += 1
+ else:
+ d_old_to_new_tensors[idx] = idx - left_shift_by
+ logging.debug("Old to new tensors map: %s", d_old_to_new_tensors.__str__())
+ # Update tensor indices referenced throughout the model
+ def update_tensors(tensor_idxs):
+ for i, ti in enumerate(tensor_idxs):
+ tensor_idxs[i] = d_old_to_new_tensors.get(ti, -1)
+ update_tensors(subgraph.inputs)
+ update_tensors(subgraph.outputs)
+ for op in operators:
+ update_tensors(op.inputs)
+ update_tensors(op.outputs)
+ if model.signatureDefs:
+ signature_def = model.signatureDefs[0]
+ _update_signature_def_tensors(signature_def.inputs, d_old_to_new_tensors)
+ _update_signature_def_tensors(signature_def.outputs, d_old_to_new_tensors)
+ # Delete the tensors
+ for idx in sorted(remove_tensors_idxs, reverse=True):
+ tensors.pop(idx)
+ logging.debug("Removed tensors marked for deletion")
+
+
+def _modify_model_input_type(model, inference_input_type=dtypes.float32):
+ """Modify model input type."""
+ if inference_input_type == dtypes.float32:
+ return
+
+ if not model.signatureDefs:
+ _modify_model_input_type_per_subgraph(model, 0, -1, inference_input_type)
+ return
+
+ for signature_index, signature_def in enumerate(model.signatureDefs):
+ _modify_model_input_type_per_subgraph(model, signature_def.subgraphIndex,
+ signature_index, inference_input_type)
+
+
+def _modify_model_input_type_per_subgraph(model, subgraph_index,
+ signature_index,
+ inference_input_type):
+ """Modify model input type per subgraph."""
+ subgraph = model.subgraphs[subgraph_index]
+ tensors = subgraph.tensors
+ operators = subgraph.operators
+
+ # Find all quantize operators
+ quant_opcode_idxs = get_quantize_opcode_idx(model)
+ if operators and not quant_opcode_idxs:
+ for input_idx in subgraph.inputs:
+ input_type = _convert_tflite_enum_type_to_tf_type(tensors[input_idx].type)
+ if input_type == dtypes.float32:
+ raise ValueError("Model input is not dequantized.")
+ # None of the inputs have float32, then they must be int16, int8, or bool
+ return
+
+ # Validate that the model input is quantized
+ input_quant_ops = []
+ for op in operators:
+ # Find operators that quantize model input
+ if op.opcodeIndex in quant_opcode_idxs and op.inputs[0] in subgraph.inputs:
+ float_tensor, quant_tensor = tensors[op.inputs[0]], tensors[op.outputs[0]]
+ # If found, validate that the operator's input type is float
+ float_type = _convert_tflite_enum_type_to_tf_type(float_tensor.type)
+ if float_type != dtypes.float32:
+ if float_type == inference_input_type:
+ continue
+ else:
+ raise ValueError(
+ "Initial model input type must be tf.float32. Expected type for "
+ "tensor with name '{}' is tf.float32, instead type is {}".format(
+ float_tensor.name, get_tf_type_name(float_type)))
+ # If found, validate that the operator output is quantized and compatible
+ # with the final model input type
+ quant_type = _convert_tflite_enum_type_to_tf_type(quant_tensor.type)
+ if quant_type not in _MAP_QUANT_TO_IO_TYPES:
+ raise ValueError(
+ "Initial model input is not quantized. Expected type for "
+ "tensor with name '{}' should be in {}, instead type is {}".format(
+ quant_tensor.name,
+ tuple(get_tf_type_name(t) for t in
+ _MAP_QUANT_TO_IO_TYPES.keys()),
+ get_tf_type_name(quant_type)))
+ else:
+ inference_io_types = _MAP_QUANT_TO_IO_TYPES[quant_type]
+ if inference_input_type not in inference_io_types:
+ raise ValueError(
+ "Unsupported `inference_input_type` value. Expected to be in "
+ "{}, instead got {}.".format(
+ tuple(get_tf_type_name(t) for t in inference_io_types),
+ get_tf_type_name(inference_input_type)))
+ input_quant_ops.append(op)
+
+ if len(subgraph.inputs) != len(input_quant_ops):
+ logging.warning(
+ "For model inputs containing unsupported operations which cannot be "
+ "quantized, the `inference_input_type` attribute will default to the "
+ "original type."
+ )
+
+ # Modify model input type
+ if inference_input_type == dtypes.uint8:
+ # Change quant op (float to int8) to quant op (uint8 to int8)
+ for op in input_quant_ops:
+ int8_quantization = tensors[op.outputs[0]].quantization
+ uint8_quantization = schema_fb.QuantizationParametersT()
+ uint8_quantization.scale = [int8_quantization.scale[0]]
+ uint8_quantization.zeroPoint = [int8_quantization.zeroPoint[0] + 128]
+ tensors[op.inputs[0]].quantization = uint8_quantization
+ tensors[op.inputs[0]].type = schema_fb.TensorType.UINT8
+ elif inference_input_type in _MAP_QUANT_TO_IO_TYPES:
+ # Remove the inputs and the quant operator
+ remove_tensors_idxs = set()
+ for op in input_quant_ops:
+ subgraph.inputs[subgraph.inputs == op.inputs[0]] = op.outputs[0]
+ if signature_index >= 0:
+ signature_def = model.signatureDefs[signature_index]
+ for i in range(len(signature_def.inputs)):
+ if signature_def.inputs[i].tensorIndex == op.inputs[0]:
+ signature_def.inputs[i].tensorIndex = op.outputs[0]
+ remove_tensors_idxs.add(op.inputs[0])
+ operators.remove(op)
+ # Remove tensors marked for deletion.
+ _remove_tensors_from_model(model, remove_tensors_idxs)
+ else:
+ raise ValueError(
+ "Unsupported `inference_input_type` value {}.".format(
+ get_tf_type_name(inference_input_type)))
+
+
+def _modify_model_output_type(model, inference_output_type=dtypes.float32):
+ """Modify model output type."""
+ if inference_output_type == dtypes.float32:
+ return
+
+ if not model.signatureDefs:
+ _modify_model_output_type_per_subgraph(model, 0, -1, inference_output_type)
+ return
+
+ for signature_index, signature_def in enumerate(model.signatureDefs):
+ _modify_model_output_type_per_subgraph(model, signature_def.subgraphIndex,
+ signature_index,
+ inference_output_type)
+
+
+def _modify_model_output_type_per_subgraph(model, subgraph_index,
+ signature_index,
+ inference_output_type):
+ """Modify model output type per subgraph."""
+ subgraph = model.subgraphs[subgraph_index]
+ tensors = subgraph.tensors
+ operators = subgraph.operators
+
+ # Find all dequantize operators
+ dequant_opcode_idxs = get_dequantize_opcode_idx(model)
+ if operators and not dequant_opcode_idxs:
+ for output in subgraph.outputs:
+ output_type = _convert_tflite_enum_type_to_tf_type(tensors[output].type)
+ if output_type == dtypes.float32:
+ raise ValueError("Model output is not dequantized.")
+ # None of the outputs have float32, then they must be int16, int8, or bool
+ return
+
+ # Validate that the model output is dequantized
+ output_dequant_ops = []
+ for op in operators:
+ # Find operators that dequantize model output
+ if (op.opcodeIndex in dequant_opcode_idxs and
+ op.outputs[0] in subgraph.outputs):
+ # If found, validate that the operator's output type is float
+ quant_tensor, float_tensor = tensors[op.inputs[0]], tensors[op.outputs[0]]
+ float_type = _convert_tflite_enum_type_to_tf_type(float_tensor.type)
+ if float_type != dtypes.float32:
+ if float_type == inference_output_type:
+ continue
+ else:
+ raise ValueError(
+ "Initial model output type must be tf.float32. Expected type for "
+ "tensor with name '{}' is tf.float32, instead type is {}".format(
+ float_tensor.name, get_tf_type_name(float_type)))
+ # If found, validate that the operator input is quantized and compatible
+ # with the final model output type
+ quant_type = _convert_tflite_enum_type_to_tf_type(quant_tensor.type)
+ if quant_type not in _MAP_QUANT_TO_IO_TYPES:
+ raise ValueError(
+ "Initial model output is not dequantized. Expected type for "
+ "tensor with name '{}' should be in {}, instead type is {}".format(
+ quant_tensor.name,
+ tuple(get_tf_type_name(t) for t in
+ _MAP_QUANT_TO_IO_TYPES.keys()),
+ get_tf_type_name(quant_type)))
+ else:
+ inference_io_types = _MAP_QUANT_TO_IO_TYPES[quant_type]
+ if inference_output_type not in inference_io_types:
+ raise ValueError(
+ "Unsupported `inference_output_type` value. Expected to be in "
+ "{}, instead got {}.".format(
+ tuple(get_tf_type_name(t) for t in inference_io_types),
+ get_tf_type_name(inference_output_type)))
+ output_dequant_ops.append(op)
+
+ if len(subgraph.outputs) != len(output_dequant_ops):
+ logging.warning(
+ "For model outputs containing unsupported operations which cannot be "
+ "quantized, the `inference_output_type` attribute will default to the "
+ "original type."
+ )
+
+ # Modify model output type
+ if inference_output_type == dtypes.uint8:
+ # Find a quantize operator
+ quant_opcode_idx = -1
+ for idx, opcode in enumerate(model.operatorCodes):
+ builtin_code = schema_util.get_builtin_code_from_operator_code(opcode)
+ if builtin_code == schema_fb.BuiltinOperator.QUANTIZE:
+ quant_opcode_idx = idx
+ break
+ # Create a quantize operator, if none exist
+ if quant_opcode_idx == -1:
+ quant_op = schema_fb.OperatorCodeT()
+ quant_op.builtinCode = schema_fb.BuiltinOperator.QUANTIZE
+ quant_op.deprecatedBuiltinCode = schema_fb.BuiltinOperator.QUANTIZE
+ model.operatorCodes.append(quant_op)
+ quant_opcode_idx = len(model.operatorCodes) - 1
+ # Change dequant op (int8 to float) to quant op (int8 to uint8)
+ for op in output_dequant_ops:
+ op.opcodeIndex = quant_opcode_idx
+ int8_quantization = tensors[op.inputs[0]].quantization
+ uint8_quantization = schema_fb.QuantizationParametersT()
+ uint8_quantization.scale = [int8_quantization.scale[0]]
+ uint8_quantization.zeroPoint = [int8_quantization.zeroPoint[0] + 128]
+ tensors[op.outputs[0]].quantization = uint8_quantization
+ tensors[op.outputs[0]].type = schema_fb.TensorType.UINT8
+ elif inference_output_type in _MAP_QUANT_TO_IO_TYPES:
+ # Remove the outputs and the dequant operator
+ remove_tensors_idxs = set()
+ for op in output_dequant_ops:
+ subgraph.outputs[subgraph.outputs == op.outputs[0]] = op.inputs[0]
+ if signature_index >= 0:
+ signature_def = model.signatureDefs[signature_index]
+ for i in range(len(signature_def.outputs)):
+ if signature_def.outputs[i].tensorIndex == op.outputs[0]:
+ signature_def.outputs[i].tensorIndex = op.inputs[0]
+ remove_tensors_idxs.add(op.outputs[0])
+ operators.remove(op)
+ # Remove tensors marked for deletion.
+ _remove_tensors_from_model(model, remove_tensors_idxs)
+ else:
+ raise ValueError(
+ "Unsupported `inference_output_type` value {}.".format(
+ get_tf_type_name(inference_output_type)))
+
+
+def _remove_redundant_quantize_ops(model):
+ """Finds back to back quantize ops and remove the first quantize op."""
+ if not model.signatureDefs:
+ _remove_redundant_quantize_ops_per_subgraph(model, 0, -1)
+ return
+
+ for signature_index, signature_def in enumerate(model.signatureDefs):
+ _remove_redundant_quantize_ops_per_subgraph(model,
+ signature_def.subgraphIndex,
+ signature_index)
+
+
+def _remove_redundant_quantize_ops_per_subgraph(model, subgraph_index,
+ signature_index):
+ """Remove redundant quantize ops per subgraph."""
+ subgraph = model.subgraphs[subgraph_index]
+ tensors = subgraph.tensors
+ operators = subgraph.operators
+
+ # Find all quantize operators.
+ quant_opcode_idxs = get_quantize_opcode_idx(model)
+ dequant_opcode_idxs = get_dequantize_opcode_idx(model)
+
+ # Find all redundant quant tensors.
+ all_quant_ops = []
+ redundant_quant_tensors = {}
+ output_dequant_tensors = {}
+ for op in operators:
+ if op.opcodeIndex in quant_opcode_idxs:
+ all_quant_ops.append(op)
+ input_tensor = tensors[op.inputs[0]]
+ output_tensor = tensors[op.outputs[0]]
+ input_type = _convert_tflite_enum_type_to_tf_type(input_tensor.type)
+ output_type = _convert_tflite_enum_type_to_tf_type(output_tensor.type)
+ # This is a requantize op, so write down its input tensor index.
+ if input_type != dtypes.float32 and output_type != dtypes.float32:
+ redundant_quant_tensors[op.inputs[0]] = op
+ if (op.opcodeIndex in dequant_opcode_idxs and
+ op.outputs[0] in subgraph.outputs):
+ output_dequant_tensors[op.inputs[0]] = op
+
+ # Remove all the quant ops which produce the redundant quant tensors.
+ for op in all_quant_ops:
+ output_tensor_idx = op.outputs[0]
+ if output_tensor_idx in redundant_quant_tensors:
+ requantize_op = redundant_quant_tensors[output_tensor_idx]
+ if model.signatureDefs:
+ signature_def = model.signatureDefs[0]
+ for output in signature_def.outputs:
+ if output.tensorIndex == op.outputs[0]:
+ output.tensorIndex = op.inputs[0]
+ deleted_tensor = requantize_op.inputs[0]
+ # Reset the input of the requantize op to the float input
+ requantize_op.inputs[0] = op.inputs[0]
+ # Migrate other operator users to output tensor of requantize op
+ for op_user in operators:
+ if deleted_tensor in op_user.inputs and op_user != requantize_op:
+ for idx, input_tensor in enumerate(op_user.inputs):
+ if input_tensor == deleted_tensor:
+ op_user.inputs[idx] = requantize_op.outputs[0]
+ operators.remove(op)
+
+ # Remove all the quant ops which connect to the output dequant op.
+ for op in all_quant_ops:
+ output_tensor_idx = op.outputs[0]
+ if output_tensor_idx in output_dequant_tensors:
+ dequant_op = output_dequant_tensors[output_tensor_idx]
+ subgraph.outputs[subgraph.outputs == dequant_op.outputs[0]] = op.inputs[0]
+ if signature_index >= 0:
+ signature_def = model.signatureDefs[signature_index]
+ for output in signature_def.outputs:
+ if output.tensorIndex == dequant_op.outputs[0]:
+ output.tensorIndex = op.inputs[0]
+ operators.remove(op)
+ operators.remove(dequant_op)
+
+
+def modify_model_io_type(
+ model, inference_input_type=dtypes.float32,
+ inference_output_type=dtypes.float32):
+ """Modify the input/output type of a tflite model.
+
+ Args:
+ model: A tflite model.
+ inference_input_type: tf.DType representing modified input type.
+ (default tf.float32. If model input is int8 quantized, it must be in
+ {tf.float32, tf.int8,tf.uint8}, else if model input is int16 quantized,
+ it must be in {tf.float32, tf.int16}, else it must be tf.float32)
+ inference_output_type: tf.DType representing modified output type.
+ (default tf.float32. If model output is int8 dequantized, it must be in
+ {tf.float32, tf.int8,tf.uint8}, else if model output is int16 dequantized,
+ it must be in {tf.float32, tf.int16}, else it must be tf.float32)
+ Returns:
+ A tflite model with modified input/output type.
+
+ Raises:
+ ValueError: If `inference_input_type`/`inference_output_type` is unsupported
+ or a supported integer type is specified for a model whose input/output is
+ not quantized/dequantized.
+ RuntimeError: If the modification was unsuccessful.
+
+ """
+ if (inference_input_type == dtypes.float32 and
+ inference_output_type == dtypes.float32):
+ return model
+
+ model_object = _convert_model_from_bytearray_to_object(model)
+
+ _modify_model_input_type(model_object, inference_input_type)
+
+ _modify_model_output_type(model_object, inference_output_type)
+
+ _remove_redundant_quantize_ops(model_object)
+
+ return _convert_model_from_object_to_bytearray(model_object)
+
+
+def get_sparsity_modes(model_object):
+ """Get sparsity modes used in a tflite model.
+
+ The sparsity modes are listed in conversion_metadata.fbs file.
+
+ Args:
+ model_object: A tflite model in object form.
+
+ Returns:
+ The list of sparsity modes used in the model.
+ """
+ if not model_object or not model_object.metadata:
+ return []
+
+ result = set()
+ for subgraph in model_object.subgraphs:
+ for tensor in subgraph.tensors:
+ if not tensor.sparsity:
+ continue
+
+ # Block map is the list if indexes where the block size is larger than 1.
+ # So empty block map means it is random sparsity.
+ if not tensor.sparsity.blockMap:
+ result.add(
+ conversion_metadata_fb.ModelOptimizationMode.RANDOM_SPARSITY)
+ else:
+ result.add(
+ conversion_metadata_fb.ModelOptimizationMode.BLOCK_SPARSITY)
+
+ return list(result)
+
+
+def get_model_hash(model):
+ """Calculate a 64-bit integer hash for a TensorFlow Lite model based on its structure.
+
+ Args:
+ model: A TensorFlow Lite model object.
+
+ Returns:
+ int: A 64-bit integer hash value representing the model structure.
+ """
+ # TODO(b/344872922): Move the hashing implementation to C++ layer since not
+ # all calls to the converter come via the Python API.
+ hash_value = 0
+
+ for subgraph in model.subgraphs:
+ if subgraph.operators is not None:
+ hash_value = update_hash_with_primitive_value(
+ hash_value, len(subgraph.operators)
+ )
+
+ for operator in subgraph.operators:
+ if operator.inputs is not None:
+ hash_value = update_hash_with_array(hash_value, operator.inputs)
+
+ if operator.outputs is not None:
+ hash_value = update_hash_with_array(hash_value, operator.outputs)
+
+ if subgraph.tensors is not None:
+ hash_value = update_hash_with_primitive_value(
+ hash_value, len(subgraph.tensors)
+ )
+
+ for tensor in subgraph.tensors:
+ if tensor.buffer is not None:
+ buffer = model.buffers[tensor.buffer]
+ if buffer.data is not None:
+ hash_value = update_hash_with_primitive_value(
+ hash_value, len(buffer.data)
+ )
+
+ if tensor.shape is not None:
+ hash_value = update_hash_with_array(hash_value, tensor.shape)
+
+ if subgraph.inputs is not None:
+ hash_value = update_hash_with_primitive_value(
+ hash_value, len(subgraph.inputs)
+ )
+
+ if subgraph.outputs is not None:
+ hash_value = update_hash_with_primitive_value(
+ hash_value, len(subgraph.outputs)
+ )
+
+ return hash_value
+
+
+def update_hash_with_primitive_value(hash_value, value):
+ """Update the hash value using a primitive value.
+
+ Args:
+ hash_value (uint64): The current hash value.
+ value: The primitive value to incorporate into the hash.
+
+ Returns:
+ int: The updated hash value.
+ """
+ hash_const = np.uint64(0x9E3779B97F4A7800)
+ hash_value = np.uint64(hash_value)
+ value = np.uint64(value)
+
+ # Convert to arrays before shifting.
+ hash_value = np.array([hash_value])
+ value = np.array([value])
+
+ # Shift the values, then take the value from the first index.
+ hash_value = np.bitwise_xor(
+ hash_value,
+ (
+ value
+ + hash_const
+ + np.left_shift(hash_value, 10)
+ + np.right_shift(hash_value, 4)
+ ),
+ )[0]
+
+ return hash_value
+
+
+def update_hash_with_array(hash_value, int_array):
+ """Update the hash value using a TFLite int array.
+
+ Args:
+ hash_value (int): The current hash value.
+ int_array: A TFLite int array to incorporate into the hash.
+
+ Returns:
+ int: The updated hash value.
+ """
+ if int_array is not None:
+ for i in int_array:
+ hash_value = update_hash_with_primitive_value(hash_value, i)
+ return hash_value
+
+
+def populate_conversion_metadata(model_object, metadata):
+ """Add or update conversion metadata to a tflite model.
+
+ Args:
+ model_object: A tflite model in object form.
+ metadata: The conversion metadata.
+
+ Returns:
+ A tflite model object with embedded conversion metadata.
+ """
+ try:
+ metadata_builder = flatbuffers.Builder(0)
+ metadata_builder.Finish(metadata.Pack(metadata_builder))
+ buffer_field = schema_fb.BufferT()
+ buffer_field.data = metadata_builder.Output()
+
+ if not model_object.metadata:
+ model_object.metadata = []
+ else:
+ # Check if metadata has already been populated.
+ for meta in model_object.metadata:
+ if meta.name.decode("utf-8") == CONVERSION_METADATA_FIELD_NAME:
+ model_object.buffers[meta.buffer] = buffer_field
+ return model_object
+
+ if not model_object.buffers:
+ model_object.buffers = []
+ model_object.buffers.append(buffer_field)
+ # Creates a new metadata field.
+ metadata_field = schema_fb.MetadataT()
+ metadata_field.name = CONVERSION_METADATA_FIELD_NAME
+ metadata_field.buffer = len(model_object.buffers) - 1
+ model_object.metadata.append(metadata_field)
+
+ return model_object
+ except Exception: # pylint: disable=broad-except
+ return model_object
+
+
+def get_conversion_metadata(model_buffer):
+ """Read conversion metadata from a tflite model.
+
+ Args:
+ model_buffer: A tflite model.
+
+ Returns:
+ The conversion metadata or None if it is not populated.
+ """
+ model_object = flatbuffer_utils.convert_bytearray_to_object(model_buffer)
+ if not model_object or not model_object.metadata:
+ return None
+
+ for meta in model_object.metadata:
+ if meta.name.decode("utf-8") == CONVERSION_METADATA_FIELD_NAME:
+ metadata_buf = model_object.buffers[meta.buffer].data.tobytes()
+ return conversion_metadata_fb.ConversionMetadataT.InitFromObj(
+ conversion_metadata_fb.ConversionMetadata.GetRootAsConversionMetadata(
+ metadata_buf, 0
+ )
+ )
+
+ return None
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..683f451db1a10d6b3301f7e2cbac7af3c2bb83cd
Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/__pycache__/__init__.cpython-310.pyc differ
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/__pycache__/flatbuffer_utils.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/__pycache__/flatbuffer_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ca3d420a8499ec614678b732fdfde8c39016f708
Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/__pycache__/flatbuffer_utils.cpython-310.pyc differ
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/__pycache__/visualize.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/__pycache__/visualize.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a33c0c8844d2111b2d4e63564c2770a39fd6d622
Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/__pycache__/visualize.cpython-310.pyc differ
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/flatbuffer_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/flatbuffer_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..ad90f04b01d892c7ae825a3620dbd0dd185ba884
--- /dev/null
+++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/flatbuffer_utils.py
@@ -0,0 +1,455 @@
+# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Utility functions for FlatBuffers.
+
+All functions that are commonly used to work with FlatBuffers.
+
+Refer to the tensorflow lite flatbuffer schema here:
+tensorflow/lite/schema/schema.fbs
+"""
+
+import copy
+import random
+import re
+import struct
+import sys
+
+import flatbuffers
+
+from tensorflow.lite.python import schema_py_generated as schema_fb
+from tensorflow.lite.python import schema_util
+from tensorflow.python.platform import gfile
+
+_TFLITE_FILE_IDENTIFIER = b'TFL3'
+
+
+def convert_bytearray_to_object(model_bytearray):
+ """Converts a tflite model from a bytearray to an object for parsing."""
+ model_object = schema_fb.Model.GetRootAsModel(model_bytearray, 0)
+ return schema_fb.ModelT.InitFromObj(model_object)
+
+
+def read_model(input_tflite_file):
+ """Reads a tflite model as a python object.
+
+ Args:
+ input_tflite_file: Full path name to the input tflite file
+
+ Raises:
+ RuntimeError: If input_tflite_file path is invalid.
+ IOError: If input_tflite_file cannot be opened.
+
+ Returns:
+ A python object corresponding to the input tflite file.
+ """
+ if not gfile.Exists(input_tflite_file):
+ raise RuntimeError('Input file not found at %r\n' % input_tflite_file)
+ with gfile.GFile(input_tflite_file, 'rb') as input_file_handle:
+ model_bytearray = bytearray(input_file_handle.read())
+ return read_model_from_bytearray(model_bytearray)
+
+
+def read_model_from_bytearray(model_bytearray):
+ """Reads a tflite model as a python object.
+
+ Args:
+ model_bytearray: TFLite model in bytearray format.
+
+ Returns:
+ A python object corresponding to the input tflite file.
+ """
+ model = convert_bytearray_to_object(model_bytearray)
+ if sys.byteorder == 'big':
+ byte_swap_tflite_model_obj(model, 'little', 'big')
+
+ # Offset handling for models > 2GB
+ for buffer in model.buffers:
+ if buffer.offset:
+ buffer.data = model_bytearray[buffer.offset : buffer.offset + buffer.size]
+ buffer.offset = 0
+ buffer.size = 0
+ for subgraph in model.subgraphs:
+ for op in subgraph.operators:
+ if op.largeCustomOptionsOffset:
+ op.customOptions = model_bytearray[
+ op.largeCustomOptionsOffset : op.largeCustomOptionsOffset
+ + op.largeCustomOptionsSize
+ ]
+ op.largeCustomOptionsOffset = 0
+ op.largeCustomOptionsSize = 0
+
+ return model
+
+
+def read_model_with_mutable_tensors(input_tflite_file):
+ """Reads a tflite model as a python object with mutable tensors.
+
+ Similar to read_model() with the addition that the returned object has
+ mutable tensors (read_model() returns an object with immutable tensors).
+
+ NOTE: This API only works for TFLite generated with
+ _experimental_use_buffer_offset=false
+
+ Args:
+ input_tflite_file: Full path name to the input tflite file
+
+ Raises:
+ RuntimeError: If input_tflite_file path is invalid.
+ IOError: If input_tflite_file cannot be opened.
+
+ Returns:
+ A mutable python object corresponding to the input tflite file.
+ """
+ return copy.deepcopy(read_model(input_tflite_file))
+
+
+def convert_object_to_bytearray(model_object, extra_buffer=b''):
+ """Converts a tflite model from an object to a immutable bytearray."""
+ # Initial size of the buffer, which will grow automatically if needed
+ builder = flatbuffers.Builder(1024)
+ model_offset = model_object.Pack(builder)
+ builder.Finish(model_offset, file_identifier=_TFLITE_FILE_IDENTIFIER)
+ model_bytearray = bytes(builder.Output())
+ model_bytearray = model_bytearray + extra_buffer
+ return model_bytearray
+
+
+def write_model(model_object, output_tflite_file):
+ """Writes the tflite model, a python object, into the output file.
+
+ NOTE: This API only works for TFLite generated with
+ _experimental_use_buffer_offset=false
+
+ Args:
+ model_object: A tflite model as a python object
+ output_tflite_file: Full path name to the output tflite file.
+
+ Raises:
+ IOError: If output_tflite_file path is invalid or cannot be opened.
+ """
+ if sys.byteorder == 'big':
+ model_object = copy.deepcopy(model_object)
+ byte_swap_tflite_model_obj(model_object, 'big', 'little')
+ model_bytearray = convert_object_to_bytearray(model_object)
+ with gfile.GFile(output_tflite_file, 'wb') as output_file_handle:
+ output_file_handle.write(model_bytearray)
+
+
+def strip_strings(model):
+ """Strips all nonessential strings from the model to reduce model size.
+
+ We remove the following strings:
+ (find strings by searching ":string" in the tensorflow lite flatbuffer schema)
+ 1. Model description
+ 2. SubGraph name
+ 3. Tensor names
+ We retain OperatorCode custom_code and Metadata name.
+
+ Args:
+ model: The model from which to remove nonessential strings.
+ """
+
+ model.description = None
+ for subgraph in model.subgraphs:
+ subgraph.name = None
+ for tensor in subgraph.tensors:
+ tensor.name = None
+ # We clear all signature_def structure, since without names it is useless.
+ model.signatureDefs = None
+
+
+def type_to_name(tensor_type):
+ """Converts a numerical enum to a readable tensor type."""
+ for name, value in schema_fb.TensorType.__dict__.items():
+ if value == tensor_type:
+ return name
+ return None
+
+
+def randomize_weights(model, random_seed=0, buffers_to_skip=None):
+ """Randomize weights in a model.
+
+ Args:
+ model: The model in which to randomize weights.
+ random_seed: The input to the random number generator (default value is 0).
+ buffers_to_skip: The list of buffer indices to skip. The weights in these
+ buffers are left unmodified.
+ """
+
+ # The input to the random seed generator. The default value is 0.
+ random.seed(random_seed)
+
+ # Parse model buffers which store the model weights
+ buffers = model.buffers
+ buffer_ids = range(1, len(buffers)) # ignore index 0 as it's always None
+ if buffers_to_skip is not None:
+ buffer_ids = [idx for idx in buffer_ids if idx not in buffers_to_skip]
+
+ buffer_types = {}
+ for graph in model.subgraphs:
+ for op in graph.operators:
+ if op.inputs is None:
+ break
+ for input_idx in op.inputs:
+ tensor = graph.tensors[input_idx]
+ buffer_types[tensor.buffer] = type_to_name(tensor.type)
+
+ for i in buffer_ids:
+ buffer_i_data = buffers[i].data
+ buffer_i_size = 0 if buffer_i_data is None else buffer_i_data.size
+ if buffer_i_size == 0:
+ continue
+
+ # Raw data buffers are of type ubyte (or uint8) whose values lie in the
+ # range [0, 255]. Those ubytes (or unint8s) are the underlying
+ # representation of each datatype. For example, a bias tensor of type
+ # int32 appears as a buffer 4 times it's length of type ubyte (or uint8).
+ # For floats, we need to generate a valid float and then pack it into
+ # the raw bytes in place.
+ buffer_type = buffer_types.get(i, 'INT8')
+ if buffer_type.startswith('FLOAT'):
+ format_code = 'e' if buffer_type == 'FLOAT16' else 'f'
+ for offset in range(0, buffer_i_size, struct.calcsize(format_code)):
+ value = random.uniform(-0.5, 0.5) # See http://b/152324470#comment2
+ struct.pack_into(format_code, buffer_i_data, offset, value)
+ else:
+ for j in range(buffer_i_size):
+ buffer_i_data[j] = random.randint(0, 255)
+
+
+def rename_custom_ops(model, map_custom_op_renames):
+ """Rename custom ops so they use the same naming style as builtin ops.
+
+ Args:
+ model: The input tflite model.
+ map_custom_op_renames: A mapping from old to new custom op names.
+ """
+ for op_code in model.operatorCodes:
+ if op_code.customCode:
+ op_code_str = op_code.customCode.decode('ascii')
+ if op_code_str in map_custom_op_renames:
+ op_code.customCode = map_custom_op_renames[op_code_str].encode('ascii')
+
+
+def opcode_to_name(model, op_code):
+ """Converts a TFLite op_code to the human readable name.
+
+ Args:
+ model: The input tflite model.
+ op_code: The op_code to resolve to a readable name.
+
+ Returns:
+ A string containing the human readable op name, or None if not resolvable.
+ """
+ op = model.operatorCodes[op_code]
+ code = max(op.builtinCode, op.deprecatedBuiltinCode)
+ for name, value in vars(schema_fb.BuiltinOperator).items():
+ if value == code:
+ return name
+ return None
+
+
+def xxd_output_to_bytes(input_cc_file):
+ """Converts xxd output C++ source file to bytes (immutable).
+
+ Args:
+ input_cc_file: Full path name to th C++ source file dumped by xxd
+
+ Raises:
+ RuntimeError: If input_cc_file path is invalid.
+ IOError: If input_cc_file cannot be opened.
+
+ Returns:
+ A bytearray corresponding to the input cc file array.
+ """
+ # Match hex values in the string with comma as separator
+ pattern = re.compile(r'\W*(0x[0-9a-fA-F,x ]+).*')
+
+ model_bytearray = bytearray()
+
+ with open(input_cc_file) as file_handle:
+ for line in file_handle:
+ values_match = pattern.match(line)
+
+ if values_match is None:
+ continue
+
+ # Match in the parentheses (hex array only)
+ list_text = values_match.group(1)
+
+ # Extract hex values (text) from the line
+ # e.g. 0x1c, 0x00, 0x00, 0x00, 0x54, 0x46, 0x4c,
+ values_text = filter(None, list_text.split(','))
+
+ # Convert to hex
+ values = [int(x, base=16) for x in values_text]
+ model_bytearray.extend(values)
+
+ return bytes(model_bytearray)
+
+
+def xxd_output_to_object(input_cc_file):
+ """Converts xxd output C++ source file to object.
+
+ Args:
+ input_cc_file: Full path name to th C++ source file dumped by xxd
+
+ Raises:
+ RuntimeError: If input_cc_file path is invalid.
+ IOError: If input_cc_file cannot be opened.
+
+ Returns:
+ A python object corresponding to the input tflite file.
+ """
+ model_bytes = xxd_output_to_bytes(input_cc_file)
+ return convert_bytearray_to_object(model_bytes)
+
+
+def byte_swap_buffer_content(buffer, chunksize, from_endiness, to_endiness):
+ """Helper function for byte-swapping the buffers field."""
+ to_swap = [
+ buffer.data[i : i + chunksize]
+ for i in range(0, len(buffer.data), chunksize)
+ ]
+ buffer.data = b''.join([
+ int.from_bytes(byteswap, from_endiness).to_bytes(chunksize, to_endiness)
+ for byteswap in to_swap
+ ])
+
+
+def byte_swap_string_content(buffer, from_endiness, to_endiness):
+ """Helper function for byte-swapping the string buffer.
+
+ Args:
+ buffer: TFLite string buffer of from_endiness format.
+ from_endiness: The original endianness format of the string buffer.
+ to_endiness: The destined endianness format of the string buffer.
+ """
+ num_of_strings = int.from_bytes(buffer.data[0:4], from_endiness)
+ string_content = bytearray(buffer.data[4 * (num_of_strings + 2) :])
+ prefix_data = b''.join([
+ int.from_bytes(buffer.data[i : i + 4], from_endiness).to_bytes(
+ 4, to_endiness
+ )
+ for i in range(0, (num_of_strings + 1) * 4 + 1, 4)
+ ])
+ buffer.data = prefix_data + string_content
+
+
+def byte_swap_tflite_model_obj(model, from_endiness, to_endiness):
+ """Byte swaps the buffers field in a TFLite model.
+
+ Args:
+ model: TFLite model object of from_endiness format.
+ from_endiness: The original endianness format of the buffers in model.
+ to_endiness: The destined endianness format of the buffers in model.
+ """
+ if model is None:
+ return
+ # Get all the constant buffers, byte swapping them as per their data types
+ buffer_swapped = []
+ types_of_16_bits = [
+ schema_fb.TensorType.FLOAT16,
+ schema_fb.TensorType.INT16,
+ schema_fb.TensorType.UINT16,
+ ]
+ types_of_32_bits = [
+ schema_fb.TensorType.FLOAT32,
+ schema_fb.TensorType.INT32,
+ schema_fb.TensorType.COMPLEX64,
+ schema_fb.TensorType.UINT32,
+ ]
+ types_of_64_bits = [
+ schema_fb.TensorType.INT64,
+ schema_fb.TensorType.FLOAT64,
+ schema_fb.TensorType.COMPLEX128,
+ schema_fb.TensorType.UINT64,
+ ]
+ for subgraph in model.subgraphs:
+ for tensor in subgraph.tensors:
+ if (
+ tensor.buffer > 0
+ and tensor.buffer < len(model.buffers)
+ and tensor.buffer not in buffer_swapped
+ and model.buffers[tensor.buffer].data is not None
+ ):
+ if tensor.type == schema_fb.TensorType.STRING:
+ byte_swap_string_content(
+ model.buffers[tensor.buffer], from_endiness, to_endiness
+ )
+ elif tensor.type in types_of_16_bits:
+ byte_swap_buffer_content(
+ model.buffers[tensor.buffer], 2, from_endiness, to_endiness
+ )
+ elif tensor.type in types_of_32_bits:
+ byte_swap_buffer_content(
+ model.buffers[tensor.buffer], 4, from_endiness, to_endiness
+ )
+ elif tensor.type in types_of_64_bits:
+ byte_swap_buffer_content(
+ model.buffers[tensor.buffer], 8, from_endiness, to_endiness
+ )
+ else:
+ continue
+ buffer_swapped.append(tensor.buffer)
+
+
+def byte_swap_tflite_buffer(tflite_model, from_endiness, to_endiness):
+ """Generates a new model byte array after byte swapping its buffers field.
+
+ Args:
+ tflite_model: TFLite flatbuffer in a byte array.
+ from_endiness: The original endianness format of the buffers in
+ tflite_model.
+ to_endiness: The destined endianness format of the buffers in tflite_model.
+
+ Returns:
+ TFLite flatbuffer in a byte array, after being byte swapped to to_endiness
+ format.
+ """
+ if tflite_model is None:
+ return None
+ # Load TFLite Flatbuffer byte array into an object.
+ model = convert_bytearray_to_object(tflite_model)
+
+ # Byte swapping the constant buffers as per their data types
+ byte_swap_tflite_model_obj(model, from_endiness, to_endiness)
+
+ # Return a TFLite flatbuffer as a byte array.
+ return convert_object_to_bytearray(model)
+
+
+def count_resource_variables(model):
+ """Calculates the number of unique resource variables in a model.
+
+ Args:
+ model: the input tflite model, either as bytearray or object.
+
+ Returns:
+ An integer number representing the number of unique resource variables.
+ """
+ if not isinstance(model, schema_fb.ModelT):
+ model = convert_bytearray_to_object(model)
+ unique_shared_names = set()
+ for subgraph in model.subgraphs:
+ if subgraph.operators is None:
+ continue
+ for op in subgraph.operators:
+ builtin_code = schema_util.get_builtin_code_from_operator_code(
+ model.operatorCodes[op.opcodeIndex]
+ )
+ if builtin_code == schema_fb.BuiltinOperator.VAR_HANDLE:
+ unique_shared_names.add(op.builtinOptions.sharedName)
+ return len(unique_shared_names)
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bde304a40502d1202f02015c32be5ddb4ad926e5
Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/__pycache__/__init__.cpython-310.pyc differ
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7b8f175860085f24009e93000fa4fc8b6f568fcd
Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/__pycache__/__init__.cpython-310.pyc differ
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/python/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/python/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/python/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/python/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9e8d0543d2599ce972fe0f827d3a3fd36f8d6a4c
Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/python/__pycache__/__init__.cpython-310.pyc differ
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/python/__pycache__/debugger.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/python/__pycache__/debugger.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1dab442f74c787a74fed216cee6a81aed3719ca0
Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/python/__pycache__/debugger.cpython-310.pyc differ
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/python/debugger.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/python/debugger.py
new file mode 100644
index 0000000000000000000000000000000000000000..c748d6ef62b706be01c704f36786428b4f675bfe
--- /dev/null
+++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/python/debugger.py
@@ -0,0 +1,549 @@
+# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Python TF-Lite QuantizationDebugger."""
+import collections
+import csv
+import re
+from typing import (Any, Callable, Dict, IO, Iterable, List, Mapping, Optional,
+ Sequence, Tuple)
+
+import numpy as np
+
+from tensorflow.lite.python import convert
+from tensorflow.lite.python import interpreter as _interpreter
+from tensorflow.lite.python.metrics import metrics as metrics_stub # type: ignore
+from tensorflow.python.util import tf_export
+
+
+# TODO(b/198099651): move converter implementation out of lite.py
+TFLiteConverter = Any # importing tf.lite creates circular dependency
+
+# Returns metrics based on difference of values for quantized/float ops.
+_DEFAULT_LAYER_DEBUG_METRICS = {
+ 'num_elements': lambda diffs: diffs.size,
+ 'stddev': np.std,
+ 'mean_error': np.average,
+ 'max_abs_error': lambda diffs: np.max(np.abs(diffs)),
+ 'mean_squared_error': lambda diffs: np.average(diffs**2),
+}
+
+_NUMERIC_VERIFY_OP_NAME = 'NumericVerify'
+
+
+def _get_quant_params(
+ tensor_detail: Mapping[str, Any]) -> Optional[Tuple[float, int]]:
+ """Returns first scale and zero point from tensor detail, if present."""
+ quant_params = tensor_detail['quantization_parameters']
+ if not quant_params:
+ return None
+ if quant_params['scales'] and quant_params['zero_points']:
+ return (quant_params['scales'][0], quant_params['zero_points'][0])
+ return None
+
+
+@tf_export.tf_export('lite.experimental.QuantizationDebugOptions')
+class QuantizationDebugOptions:
+ """Debug options to set up a given QuantizationDebugger."""
+
+ def __init__(self,
+ layer_debug_metrics: Optional[Mapping[str,
+ Callable[[np.ndarray],
+ float]]] = None,
+ model_debug_metrics: Optional[Mapping[
+ str, Callable[[Sequence[np.ndarray], Sequence[np.ndarray]],
+ float]]] = None,
+ layer_direct_compare_metrics: Optional[Mapping[str, Callable[
+ [Sequence[np.ndarray], Sequence[np.ndarray], float, int],
+ float]]] = None,
+ denylisted_ops: Optional[List[str]] = None,
+ denylisted_nodes: Optional[List[str]] = None,
+ fully_quantize: bool = False) -> None:
+ """Initializes debugger options.
+
+ Args:
+ layer_debug_metrics: a dict to specify layer debug functions
+ {function_name_str: function} where the function accepts result of
+ NumericVerify Op, which is value difference between float and
+ dequantized op results. The function returns single scalar value.
+ model_debug_metrics: a dict to specify model debug functions
+ {function_name_str: function} where the function accepts outputs from
+ two models, and returns single scalar value for a metric. (e.g.
+ accuracy, IoU)
+ layer_direct_compare_metrics: a dict to specify layer debug functions
+ {function_name_str: function}. The signature is different from that of
+ `layer_debug_metrics`, and this one gets passed (original float value,
+ original quantized value, scale, zero point). The function's
+ implementation is responsible for correctly dequantize the quantized
+ value to compare. Use this one when comparing diff is not enough.
+ (Note) quantized value is passed as int8, so cast to int32 is needed.
+ denylisted_ops: a list of op names which is expected to be removed from
+ quantization.
+ denylisted_nodes: a list of op's output tensor names to be removed from
+ quantization.
+ fully_quantize: Bool indicating whether to fully quantize the model.
+ Besides model body, the input/output will be quantized as well.
+ Corresponding to mlir_quantize's fully_quantize parameter.
+
+ Raises:
+ ValueError: when there are duplicate keys
+ """
+ self.layer_debug_metrics = layer_debug_metrics
+ self.model_debug_metrics = model_debug_metrics
+ self.layer_direct_compare_metrics = layer_direct_compare_metrics
+
+ keys = []
+ for metrics in [
+ layer_debug_metrics, model_debug_metrics, layer_direct_compare_metrics
+ ]:
+ if metrics is not None:
+ keys.extend(metrics.keys())
+ if len(keys) != len(set(keys)):
+ raise ValueError('Provided metrics have duplicate keys.')
+
+ self.denylisted_ops = denylisted_ops
+ self.denylisted_nodes = denylisted_nodes
+ self.fully_quantize = fully_quantize
+
+
+@tf_export.tf_export('lite.experimental.QuantizationDebugger')
+class QuantizationDebugger:
+ """Debugger for Quantized TensorFlow Lite debug mode models.
+
+ This can run the TensorFlow Lite converted models equipped with debug ops and
+ collect debug information. This debugger calculates statistics from
+ user-defined post-processing functions as well as default ones.
+ """
+
+ def __init__(self,
+ quant_debug_model_path: Optional[str] = None,
+ quant_debug_model_content: Optional[bytes] = None,
+ float_model_path: Optional[str] = None,
+ float_model_content: Optional[bytes] = None,
+ debug_dataset: Optional[Callable[
+ [], Iterable[Sequence[np.ndarray]]]] = None,
+ debug_options: Optional[QuantizationDebugOptions] = None,
+ converter: Optional[TFLiteConverter] = None) -> None:
+ """Runs the TFLite debugging model with given debug options.
+
+ Args:
+ quant_debug_model_path: Path to the quantized debug TFLite model file.
+ quant_debug_model_content: Content of the quantized debug TFLite model.
+ float_model_path: Path to float TFLite model file.
+ float_model_content: Content of the float TFLite model.
+ debug_dataset: a factory function that returns dataset generator which is
+ used to generate input samples (list of np.ndarray) for the model. The
+ generated elements must have same types and shape as inputs to the
+ model.
+ debug_options: Debug options to debug the given model.
+ converter: Optional, use converter instead of quantized model.
+
+ Raises:
+ ValueError: If the debugger was unable to be created.
+
+ Attributes:
+ layer_statistics: results of error metrics for each NumericVerify op
+ results. in {layer_name: {metric_name: metric}} format.
+ model_statistics: results of error metrics for difference between float
+ and quantized models. in {metric_name: metric} format.
+ """
+ self._data_gen = debug_dataset
+ self._debug_options = debug_options or QuantizationDebugOptions()
+ self.converter = None
+ self.calibrated_model = None
+ self.float_model = None
+ self._float_interpreter = None
+ if converter is not None:
+ if self._debug_options.model_debug_metrics:
+ old_optimizations = converter.optimizations
+ self.converter = self._set_converter_options_for_float(converter)
+ self.float_model = self.converter.convert()
+ converter.optimizations = old_optimizations
+
+ self.converter = self._set_converter_options_for_calibration(converter)
+ self.calibrated_model = self.converter.convert()
+ # Converter should be already set up with all options
+ self._init_from_converter(
+ self._debug_options,
+ self.converter,
+ self.calibrated_model,
+ float_model=self.float_model)
+ else:
+ self._quant_interpreter = _interpreter.Interpreter(
+ quant_debug_model_path,
+ quant_debug_model_content,
+ experimental_preserve_all_tensors=(
+ self._debug_options.layer_direct_compare_metrics is not None))
+ if self._debug_options.model_debug_metrics:
+ self._float_interpreter = _interpreter.Interpreter(
+ float_model_path, float_model_content)
+ self._initialize_stats()
+
+ @property
+ def options(self) -> QuantizationDebugOptions:
+ return self._debug_options
+
+ @options.setter
+ def options(self, options: QuantizationDebugOptions) -> None:
+ self._debug_options = options
+ if not self.converter or not self.calibrated_model:
+ return
+ self._init_from_converter(
+ self._debug_options,
+ self.converter,
+ self.calibrated_model,
+ float_model=self.float_model)
+ self._initialize_stats()
+
+ def _initialize_stats(self):
+ """Helper function initializes stats."""
+ # TODO(b/177749613) : Fix the dependency on tf.lite._get_ops_details()
+ # Following code is needed to get op's name from the output tensor index,
+ # since NumericVerify op only provides its quantized input tensor index.
+ self._defining_op = dict()
+ for op_info in self._quant_interpreter._get_ops_details(): # pylint: disable=protected-access
+ self._defining_op.update(
+ {tensor_idx: op_info['index'] for tensor_idx in op_info['outputs']})
+
+ self._numeric_verify_tensor_details = None
+ self._numeric_verify_op_details = None
+ if not self._get_numeric_verify_tensor_details():
+ raise ValueError('Please check if the quantized model is in debug mode')
+
+ self._layer_debug_metrics = _DEFAULT_LAYER_DEBUG_METRICS.copy()
+ if self._debug_options.layer_debug_metrics:
+ self._layer_debug_metrics.update(self._debug_options.layer_debug_metrics)
+
+ self.layer_statistics = None
+ self.model_statistics = None
+
+ self._metrics = metrics_stub.TFLiteMetrics()
+ self._metrics.increase_counter_debugger_creation()
+
+ def _get_quantized_model(self, is_debug: bool) -> bytes:
+ if not self.converter:
+ raise ValueError('No converter found, use this function with the '
+ 'converter option in the constructor.')
+
+ return convert.mlir_quantize(
+ self.calibrated_model,
+ disable_per_channel=self.converter._experimental_disable_per_channel, # pylint: disable=protected-access
+ fully_quantize=self._debug_options.fully_quantize,
+ enable_numeric_verify=is_debug,
+ denylisted_ops=self._debug_options.denylisted_ops,
+ denylisted_nodes=self._debug_options.denylisted_nodes)
+
+ def get_nondebug_quantized_model(self) -> bytes:
+ """Returns a non-instrumented quantized model.
+
+ Convert the quantized model with the initialized converter and
+ return bytes for nondebug model. The model will not be instrumented with
+ numeric verification operations.
+
+ Returns:
+ Model bytes corresponding to the model.
+ Raises:
+ ValueError: if converter is not passed to the debugger.
+ """
+ return self._get_quantized_model(is_debug=False)
+
+ def get_debug_quantized_model(self) -> bytes:
+ """Returns an instrumented quantized model.
+
+ Convert the quantized model with the initialized converter and
+ return bytes for model. The model will be instrumented with numeric
+ verification operations and should only be used for debugging.
+
+ Returns:
+ Model bytes corresponding to the model.
+ Raises:
+ ValueError: if converter is not passed to the debugger.
+ """
+ return self._get_quantized_model(is_debug=True)
+
+ def _init_from_converter(self,
+ options: QuantizationDebugOptions,
+ converter: TFLiteConverter,
+ calibrated_model: Optional[bytes] = None,
+ float_model: Optional[bytes] = None) -> None:
+ """Convert the model and apply options.
+
+ Converts the quantized model and initializes a quantized model interpreter
+ with the quantized model. Returns a float model interpreter if float model
+ is provided.
+
+ Args:
+ options: a QuantizationDebugOptions object.
+ converter: an initialized tf.lite.TFLiteConverter.
+ calibrated_model: Calibrated model bytes.
+ float_model: Float model bytes.
+ """
+ self.quant_model = convert.mlir_quantize(
+ calibrated_model,
+ disable_per_channel=converter._experimental_disable_per_channel, # pylint: disable=protected-access
+ fully_quantize=options.fully_quantize,
+ enable_numeric_verify=True,
+ denylisted_ops=options.denylisted_ops,
+ denylisted_nodes=options.denylisted_nodes)
+ self._quant_interpreter = _interpreter.Interpreter(
+ model_content=self.quant_model)
+ self._float_interpreter = None
+ if float_model is not None:
+ self._float_interpreter = _interpreter.Interpreter(
+ model_content=float_model)
+
+ def _set_converter_options_for_float(
+ self, converter: TFLiteConverter) -> TFLiteConverter:
+ """Verify converter options and set required experimental options."""
+ if converter.optimizations:
+ converter.optimizations = []
+ return converter
+
+ def _set_converter_options_for_calibration(
+ self, converter: TFLiteConverter) -> TFLiteConverter:
+ """Verify converter options and set required experimental options."""
+ if not converter.optimizations:
+ raise ValueError(
+ 'converter object must set optimizations to lite.Optimize.DEFAULT')
+ if not converter.representative_dataset:
+ raise ValueError('converter object must set representative_dataset')
+
+ converter.experimental_mlir_quantizer = True
+ converter._experimental_calibrate_only = True # pylint: disable=protected-access
+ return converter
+
+ def run(self) -> None:
+ """Runs models and gets metrics."""
+ self.layer_statistics = self._collect_layer_statistics()
+ if self._debug_options.model_debug_metrics:
+ self.model_statistics = self._collect_model_statistics()
+
+ def _collect_layer_statistics(self) -> Dict[str, Dict[str, float]]:
+ """Collects layer statistics by applying layer debug metrics.
+
+ For all data from the given RepresentativeDataset, collect statistics per
+ example by getting the NumericVerify op results in _quant_interpreter
+ and calculating layer debug metrics on the results.
+
+ Returns:
+ aggregated per-layer statistics of NumericVerify results.
+ {layer_name: {metric_name: metric}}
+ """
+ layer_statistics = collections.defaultdict(
+ lambda: collections.defaultdict(list))
+
+ initialize = True
+ for tensor_data in self._data_gen():
+ self._set_input_tensors(self._quant_interpreter, tensor_data, initialize)
+ initialize = False
+
+ # Run the model.
+ self._quant_interpreter.invoke()
+
+ # Collect the statistics of this invoke result.
+ for tensor_detail in self._get_numeric_verify_tensor_details():
+ tensor_name = tensor_detail['name'] # pytype: disable=unsupported-operands # dynamic-method-lookup
+ diffs = self._quant_interpreter.get_tensor(tensor_detail['index']) # pytype: disable=unsupported-operands # dynamic-method-lookup
+ for metric_name, metric_fn in self._layer_debug_metrics.items():
+ layer_statistics[tensor_name][metric_name].append(metric_fn(diffs))
+
+ if self._debug_options.layer_direct_compare_metrics is not None:
+ for tensor_detail in self._get_numeric_verify_tensor_details():
+ tensor_name = tensor_detail['name'] # pytype: disable=unsupported-operands # dynamic-method-lookup
+ op_idx = self._defining_op[tensor_detail['index']] # pytype: disable=unsupported-operands # dynamic-method-lookup
+ op_detail = self._quant_interpreter._get_op_details(op_idx) # pylint: disable=protected-access
+ q_idx, f_idx = op_detail['inputs']
+ quant_input_detail = self._quant_interpreter._get_tensor_details( # pylint: disable=protected-access
+ q_idx, subgraph_index=0)
+ for (metric_name, metric_fn
+ ) in self._debug_options.layer_direct_compare_metrics.items():
+ layer_statistics[tensor_name][metric_name].append(
+ metric_fn(
+ self._quant_interpreter.get_tensor(f_idx),
+ self._quant_interpreter.get_tensor(q_idx),
+ quant_input_detail['quantization_parameters']['scales'][0],
+ quant_input_detail['quantization_parameters']['zero_points']
+ [0]))
+
+ # Calculate final aggregated metrics for each layer.
+ for metrics in layer_statistics.values():
+ for metric_name in metrics:
+ metrics[metric_name] = np.nanmean(metrics[metric_name])
+
+ return layer_statistics
+
+ def _collect_model_statistics(self) -> Dict[str, float]:
+ """Collects model output metrics.
+
+ For all data from the given RepresentativeDataset, collect all model output
+ results from float model & quantized debug model, and calculate metrics
+ by using model output functions. As a result, self.model_results is filled,
+
+ where self.model_results[model_output_function_name] = `aggregated model
+ output function value` (a scalar).
+
+ Returns:
+ aggregated per-model output discrepancy metrics.
+ {metric_name: aggregated_metric}
+ """
+
+ model_statistics = collections.defaultdict(list)
+
+ initialize = True
+ for tensor_data in self._data_gen():
+ # Run quantized debug model and collect output results.
+ self._set_input_tensors(self._quant_interpreter, tensor_data, initialize)
+ self._quant_interpreter.invoke()
+ quant_tensor_data = self._get_output_tensors(self._quant_interpreter)
+
+ # Run float model if it's initialized.
+ float_tensor_data = []
+ if self._float_interpreter:
+ self._set_input_tensors(
+ self._float_interpreter, tensor_data, initialize)
+ self._float_interpreter.invoke()
+ float_tensor_data = self._get_output_tensors(self._float_interpreter)
+
+ initialize = False
+
+ # Calculate the metrics.
+ for (metric_name,
+ metric_fn) in self._debug_options.model_debug_metrics.items():
+ model_statistics[metric_name].append(
+ metric_fn(float_tensor_data, quant_tensor_data))
+
+ # Calculate final aggregated metrics for each outputs.
+ return {
+ metric_name: np.mean(metric)
+ for metric_name, metric in model_statistics.items()
+ }
+
+ def _set_input_tensors(self, interpreter: _interpreter.Interpreter,
+ tensor_data: Sequence[np.ndarray],
+ initialize: bool) -> None:
+ """Sets input tensors into TFLite model Interpreter.
+
+ Args:
+ interpreter: a tf.lite.Interpreter object with allocated tensors.
+ tensor_data: a list of Numpy array data.
+ initialize: set to true when input is first set for the interpreter, to
+ set input shapes and allocate tensors.
+
+ Raises:
+ ValueError: when inputs can't be set, or size of provided inputs does not
+ match size of model inputs.
+ """
+ input_details = interpreter.get_input_details()
+ if len(input_details) != len(tensor_data):
+ raise ValueError(
+ 'Number of inputs provided ({}) does not match number of inputs to '
+ 'the model ({})'.format(len(tensor_data), len(input_details)))
+
+ if initialize:
+ for input_detail, tensor in zip(input_details, tensor_data):
+ interpreter.resize_tensor_input(input_detail['index'], tensor.shape)
+ interpreter.allocate_tensors()
+
+ for input_detail, tensor in zip(input_details, tensor_data):
+ if tensor.dtype == np.float32 and input_detail['dtype'] == np.int8:
+ quant_params = _get_quant_params(input_detail)
+ if quant_params:
+ scale, zero_point = quant_params
+ tensor = np.round((tensor / scale) + zero_point).astype(np.int8)
+ interpreter.set_tensor(input_detail['index'], tensor)
+
+ def _get_output_tensors(
+ self, interpreter: _interpreter.Interpreter) -> List[np.ndarray]:
+ """Returns output tensors of given TFLite model Interpreter.
+
+ Args:
+ interpreter: a tf.lite.Interpreter object with allocated tensors.
+
+ Returns:
+ a list of numpy arrays representing output tensor results.
+ """
+
+ outputs = []
+ for output_detail in interpreter.get_output_details():
+ tensor = interpreter.get_tensor(output_detail['index'])
+ if output_detail['dtype'] == np.int8:
+ quant_params = _get_quant_params(output_detail)
+ if quant_params:
+ scale, zero_point = quant_params
+ tensor = ((tensor.astype(np.float32) - zero_point) * scale).astype(
+ np.float32)
+ outputs.append(tensor)
+
+ return outputs
+
+ def _get_numeric_verify_tensor_details(self) -> List[str]:
+ """Returns all names of all tensors from NumericVerify op."""
+ # pylint: disable=protected-access
+ if not self._numeric_verify_tensor_details:
+ self._numeric_verify_tensor_details = []
+ self._numeric_verify_op_details = {}
+ for op_info in self._quant_interpreter._get_ops_details():
+ if op_info['op_name'] == _NUMERIC_VERIFY_OP_NAME:
+ self._numeric_verify_tensor_details.append(
+ self._quant_interpreter._get_tensor_details(
+ op_info['outputs'][0], subgraph_index=0))
+ tensor_name = self._numeric_verify_tensor_details[-1]['name']
+ self._numeric_verify_op_details[tensor_name] = op_info
+ # pylint: enable=protected-access
+ return self._numeric_verify_tensor_details
+
+ def _get_operand_name_and_index(self,
+ numeric_verify_name: str) -> Tuple[str, int]:
+ """Gets the index and name of NumericVerify Op's quantized input tensor.
+
+ Args:
+ numeric_verify_name: name of the NumericVerify op's output tensor. It has
+ format of `NumericVerify/{quantized_tensor_name}:{quantized_tensor_idx}`
+
+ Returns:
+ Tuple of (tensor_name, tensor_idx) for quantized op's output tensor.
+ """
+ tensor_name, tensor_idx = numeric_verify_name.rsplit(':', 1)
+ float_tensor_name = tensor_name[len(_NUMERIC_VERIFY_OP_NAME) + 1:]
+ if re.match(r'\d', float_tensor_name[-1]):
+ float_tensor_name = float_tensor_name[:-1]
+
+ return (float_tensor_name, int(tensor_idx))
+
+ def layer_statistics_dump(self, file: IO[str]) -> None:
+ """Dumps layer statistics into file, in csv format.
+
+ Args:
+ file: file, or file-like object to write.
+ """
+ # order of `fields` is the order of fields in csv.
+ fields = ['op_name', 'tensor_idx'] + list(self._layer_debug_metrics.keys())
+ if self._debug_options.layer_direct_compare_metrics is not None:
+ fields += list(self._debug_options.layer_direct_compare_metrics.keys())
+ fields += ['scale', 'zero_point', 'tensor_name']
+ writer = csv.DictWriter(file, fields)
+ writer.writeheader()
+ if self.layer_statistics:
+ for name, metrics in self.layer_statistics.items():
+ data = metrics.copy()
+ (data['tensor_name'], _) = self._get_operand_name_and_index(name)
+ data['tensor_idx'] = self._numeric_verify_op_details[name]['inputs'][0]
+ data['op_name'] = self._quant_interpreter._get_op_details( # pylint: disable=protected-access
+ self._defining_op[data['tensor_idx']])['op_name']
+ details = self._quant_interpreter._get_tensor_details( # pylint: disable=protected-access
+ data['tensor_idx'], subgraph_index=0)
+ data['scale'], data['zero_point'] = (
+ details['quantization_parameters']['scales'][0],
+ details['quantization_parameters']['zero_points'][0])
+ writer.writerow(data)
diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/visualize.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/visualize.py
new file mode 100644
index 0000000000000000000000000000000000000000..3dca3f62b798212ab5d589ed5c4901e072a308ef
--- /dev/null
+++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/visualize.py
@@ -0,0 +1,549 @@
+#!/usr/bin/env python
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""This tool creates an html visualization of a TensorFlow Lite graph.
+
+Example usage:
+
+python visualize.py foo.tflite foo.html
+"""
+
+import json
+import os
+import re
+import sys
+import numpy as np
+
+# pylint: disable=g-import-not-at-top
+if not os.path.splitext(__file__)[0].endswith(
+ os.path.join("tflite_runtime", "visualize")):
+ # This file is part of tensorflow package.
+ from tensorflow.lite.python import schema_py_generated as schema_fb
+else:
+ # This file is part of tflite_runtime package.
+ from tflite_runtime import schema_py_generated as schema_fb
+
+# A CSS description for making the visualizer
+_CSS = """
+
+
+
+
+
+
+
+
+"""
+
+_D3_HTML_TEMPLATE = """
+
+"""
+
+
+def TensorTypeToName(tensor_type):
+ """Converts a numerical enum to a readable tensor type."""
+ for name, value in schema_fb.TensorType.__dict__.items():
+ if value == tensor_type:
+ return name
+ return None
+
+
+def BuiltinCodeToName(code):
+ """Converts a builtin op code enum to a readable name."""
+ for name, value in schema_fb.BuiltinOperator.__dict__.items():
+ if value == code:
+ return name
+ return None
+
+
+def NameListToString(name_list):
+ """Converts a list of integers to the equivalent ASCII string."""
+ if isinstance(name_list, str):
+ return name_list
+ else:
+ result = ""
+ if name_list is not None:
+ for val in name_list:
+ result = result + chr(int(val))
+ return result
+
+
+class OpCodeMapper:
+ """Maps an opcode index to an op name."""
+
+ def __init__(self, data):
+ self.code_to_name = {}
+ for idx, d in enumerate(data["operator_codes"]):
+ self.code_to_name[idx] = BuiltinCodeToName(d["builtin_code"])
+ if self.code_to_name[idx] == "CUSTOM":
+ self.code_to_name[idx] = NameListToString(d["custom_code"])
+
+ def __call__(self, x):
+ if x not in self.code_to_name:
+ s = ""
+ else:
+ s = self.code_to_name[x]
+ return "%s (%d)" % (s, x)
+
+
+class DataSizeMapper:
+ """For buffers, report the number of bytes."""
+
+ def __call__(self, x):
+ if x is not None:
+ return "%d bytes" % len(x)
+ else:
+ return "--"
+
+
+class TensorMapper:
+ """Maps a list of tensor indices to a tooltip hoverable indicator of more."""
+
+ def __init__(self, subgraph_data):
+ self.data = subgraph_data
+
+ def __call__(self, x):
+ html = ""
+ if x is None:
+ return html
+
+ html += ""
+ for i in x:
+ tensor = self.data["tensors"][i]
+ html += str(i) + " "
+ html += NameListToString(tensor["name"]) + " "
+ html += TensorTypeToName(tensor["type"]) + " "
+ html += (repr(tensor["shape"]) if "shape" in tensor else "[]")
+ html += (repr(tensor["shape_signature"])
+ if "shape_signature" in tensor else "[]") + " "
+ html += ""
+ html += repr(x)
+ html += ""
+ return html
+
+
+def GenerateGraph(subgraph_idx, g, opcode_mapper):
+ """Produces the HTML required to have a d3 visualization of the dag."""
+
+ def TensorName(idx):
+ return "t%d" % idx
+
+ def OpName(idx):
+ return "o%d" % idx
+
+ edges = []
+ nodes = []
+ first = {}
+ second = {}
+ pixel_mult = 200 # TODO(aselle): multiplier for initial placement
+ width_mult = 170 # TODO(aselle): multiplier for initial placement
+ for op_index, op in enumerate(g["operators"] or []):
+ if op["inputs"] is not None:
+ for tensor_input_position, tensor_index in enumerate(op["inputs"]):
+ if tensor_index not in first:
+ first[tensor_index] = ((op_index - 0.5 + 1) * pixel_mult,
+ (tensor_input_position + 1) * width_mult)
+ edges.append({
+ "source": TensorName(tensor_index),
+ "target": OpName(op_index)
+ })
+ if op["outputs"] is not None:
+ for tensor_output_position, tensor_index in enumerate(op["outputs"]):
+ if tensor_index not in second:
+ second[tensor_index] = ((op_index + 0.5 + 1) * pixel_mult,
+ (tensor_output_position + 1) * width_mult)
+ edges.append({
+ "target": TensorName(tensor_index),
+ "source": OpName(op_index)
+ })
+
+ nodes.append({
+ "id": OpName(op_index),
+ "name": opcode_mapper(op["opcode_index"]),
+ "group": 2,
+ "x": pixel_mult,
+ "y": (op_index + 1) * pixel_mult
+ })
+ for tensor_index, tensor in enumerate(g["tensors"]):
+ initial_y = (
+ first[tensor_index] if tensor_index in first else
+ second[tensor_index] if tensor_index in second else (0, 0))
+
+ nodes.append({
+ "id": TensorName(tensor_index),
+ "name": "%r (%d)" % (getattr(tensor, "shape", []), tensor_index),
+ "group": 1,
+ "x": initial_y[1],
+ "y": initial_y[0]
+ })
+ graph_str = json.dumps({"nodes": nodes, "edges": edges})
+
+ html = _D3_HTML_TEMPLATE % (graph_str, subgraph_idx)
+ return html
+
+
+def GenerateTableHtml(items, keys_to_print, display_index=True):
+ """Given a list of object values and keys to print, make an HTML table.
+
+ Args:
+ items: Items to print an array of dicts.
+ keys_to_print: (key, display_fn). `key` is a key in the object. i.e.
+ items[0][key] should exist. display_fn is the mapping function on display.
+ i.e. the displayed html cell will have the string returned by
+ `mapping_fn(items[0][key])`.
+ display_index: add a column which is the index of each row in `items`.
+
+ Returns:
+ An html table.
+ """
+ html = ""
+ # Print the list of items
+ html += "
\n"
+ html += "
\n"
+ if display_index:
+ html += "
index
"
+ for h, mapper in keys_to_print:
+ html += "
%s
" % h
+ html += "
\n"
+ for idx, tensor in enumerate(items):
+ html += "
\n"
+ if display_index:
+ html += "
%d
" % idx
+ # print tensor.keys()
+ for h, mapper in keys_to_print:
+ val = tensor[h] if h in tensor else None
+ val = val if mapper is None else mapper(val)
+ html += "
%s
\n" % val
+
+ html += "
\n"
+ html += "
\n"
+ return html
+
+
+def CamelCaseToSnakeCase(camel_case_input):
+ """Converts an identifier in CamelCase to snake_case."""
+ s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel_case_input)
+ return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
+
+
+def FlatbufferToDict(fb, preserve_as_numpy):
+ """Converts a hierarchy of FB objects into a nested dict.
+
+ We avoid transforming big parts of the flat buffer into python arrays. This
+ speeds conversion from ten minutes to a few seconds on big graphs.
+
+ Args:
+ fb: a flat buffer structure. (i.e. ModelT)
+ preserve_as_numpy: true if all downstream np.arrays should be preserved.
+ false if all downstream np.array should become python arrays
+ Returns:
+ A dictionary representing the flatbuffer rather than a flatbuffer object.
+ """
+ if isinstance(fb, int) or isinstance(fb, float) or isinstance(fb, str):
+ return fb
+ elif hasattr(fb, "__dict__"):
+ result = {}
+ for attribute_name in dir(fb):
+ attribute = fb.__getattribute__(attribute_name)
+ if not callable(attribute) and attribute_name[0] != "_":
+ snake_name = CamelCaseToSnakeCase(attribute_name)
+ preserve = True if attribute_name == "buffers" else preserve_as_numpy
+ result[snake_name] = FlatbufferToDict(attribute, preserve)
+ return result
+ elif isinstance(fb, np.ndarray):
+ return fb if preserve_as_numpy else fb.tolist()
+ elif hasattr(fb, "__len__"):
+ return [FlatbufferToDict(entry, preserve_as_numpy) for entry in fb]
+ else:
+ return fb
+
+
+def CreateDictFromFlatbuffer(buffer_data):
+ model_obj = schema_fb.Model.GetRootAsModel(buffer_data, 0)
+ model = schema_fb.ModelT.InitFromObj(model_obj)
+ return FlatbufferToDict(model, preserve_as_numpy=False)
+
+
+def create_html(tflite_input, input_is_filepath=True): # pylint: disable=invalid-name
+ """Returns html description with the given tflite model.
+
+ Args:
+ tflite_input: TFLite flatbuffer model path or model object.
+ input_is_filepath: Tells if tflite_input is a model path or a model object.
+
+ Returns:
+ Dump of the given tflite model in HTML format.
+
+ Raises:
+ RuntimeError: If the input is not valid.
+ """
+
+ # Convert the model into a JSON flatbuffer using flatc (build if doesn't
+ # exist.
+ if input_is_filepath:
+ if not os.path.exists(tflite_input):
+ raise RuntimeError("Invalid filename %r" % tflite_input)
+ if tflite_input.endswith(".tflite") or tflite_input.endswith(".bin"):
+ with open(tflite_input, "rb") as file_handle:
+ file_data = bytearray(file_handle.read())
+ data = CreateDictFromFlatbuffer(file_data)
+ elif tflite_input.endswith(".json"):
+ data = json.load(open(tflite_input))
+ else:
+ raise RuntimeError("Input file was not .tflite or .json")
+ else:
+ data = CreateDictFromFlatbuffer(tflite_input)
+ html = ""
+ html += _CSS
+ html += "
TensorFlow Lite Model"
+
+ data["filename"] = tflite_input if input_is_filepath else (
+ "Null (used model object)") # Avoid special case
+
+ toplevel_stuff = [("filename", None), ("version", None),
+ ("description", None)]
+
+ html += "
\n"
+ for key, mapping in toplevel_stuff:
+ if not mapping:
+ mapping = lambda x: x
+ html += "
%s
%s
\n" % (key, mapping(data.get(key)))
+ html += "
\n"
+
+ # Spec on what keys to display
+ buffer_keys_to_display = [("data", DataSizeMapper())]
+ operator_keys_to_display = [("builtin_code", BuiltinCodeToName),
+ ("custom_code", NameListToString),
+ ("version", None)]
+
+ # Update builtin code fields.
+ for d in data["operator_codes"]:
+ d["builtin_code"] = max(d["builtin_code"], d["deprecated_builtin_code"])
+
+ for subgraph_idx, g in enumerate(data["subgraphs"]):
+ # Subgraph local specs on what to display
+ html += "