diff --git a/.gitattributes b/.gitattributes index 0c7cf931b55de1929b0aaeb8ef192799a588f71a..0f3eb782591b5e94b7ea3c15b3425513877e36cb 100644 --- a/.gitattributes +++ b/.gitattributes @@ -197,3 +197,4 @@ SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/gr SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/grappler/_pywrap_tf_item.so filter=lfs diff=lfs merge=lfs -text SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/profiler/internal/_pywrap_profiler.so filter=lfs diff=lfs merge=lfs -text SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/client/_pywrap_tf_session.so filter=lfs diff=lfs merge=lfs -text +SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/impl/testing/pybind_for_testing.so filter=lfs diff=lfs merge=lfs -text diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa9c24592d6e6f5f7d1845d3dc55bfbbd13d0116 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67cd0726ffc7892f2851b8dcd31e3388022a280d Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e04a9009cebf0eab8c8b33da02a1b5d75878676a Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25b8f4aaeac0114e1bf2b2c40c4145db0d7a5994 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__pycache__/gen_rpc_ops.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__pycache__/gen_rpc_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bacbfaa95c07566f16f391ffd0bfdb75c3ea697 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__pycache__/gen_rpc_ops.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/gen_rpc_ops.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/gen_rpc_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..beb3f4ce5316f83453d1276033c621931c98b4bd --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/gen_rpc_ops.py @@ -0,0 +1,763 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('delete_rpc_future_resource') +def delete_rpc_future_resource(handle: Annotated[Any, _atypes.Resource], deleter: Annotated[Any, _atypes.Variant], name=None): + r"""TODO: add doc. + + Args: + handle: A `Tensor` of type `resource`. + deleter: A `Tensor` of type `variant`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DeleteRpcFutureResource", name, handle, deleter) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_delete_rpc_future_resource( + (handle, deleter, name,), None) + if _result is not NotImplemented: + return _result + return delete_rpc_future_resource_eager_fallback( + handle, deleter, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + delete_rpc_future_resource, (), dict(handle=handle, + deleter=deleter, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_delete_rpc_future_resource( + (handle, deleter, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DeleteRpcFutureResource", handle=handle, deleter=deleter, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + delete_rpc_future_resource, (), dict(handle=handle, deleter=deleter, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + return _op +DeleteRpcFutureResource = tf_export("raw_ops.DeleteRpcFutureResource")(_ops.to_raw_op(delete_rpc_future_resource)) +_dispatcher_for_delete_rpc_future_resource = delete_rpc_future_resource._tf_type_based_dispatcher.Dispatch + + +def delete_rpc_future_resource_eager_fallback(handle: Annotated[Any, _atypes.Resource], deleter: Annotated[Any, _atypes.Variant], name, ctx): + handle = _ops.convert_to_tensor(handle, _dtypes.resource) + deleter = _ops.convert_to_tensor(deleter, _dtypes.variant) + _inputs_flat = [handle, deleter] + _attrs = None + _result = _execute.execute(b"DeleteRpcFutureResource", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + +_RpcCallOutput = collections.namedtuple( + "RpcCall", + ["future", "deleter"]) + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('rpc_call') +def rpc_call(client: Annotated[Any, _atypes.Resource], method_name: Annotated[Any, _atypes.String], args, timeout_in_ms: Annotated[Any, _atypes.Int64], name=None): + r"""TODO: add doc. + + Args: + client: A `Tensor` of type `resource`. + method_name: A `Tensor` of type `string`. + args: A list of `Tensor` objects. + timeout_in_ms: A `Tensor` of type `int64`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (future, deleter). + + future: A `Tensor` of type `resource`. + deleter: A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RpcCall", name, client, method_name, args, timeout_in_ms) + _result = _RpcCallOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_rpc_call( + (client, method_name, args, timeout_in_ms, name,), None) + if _result is not NotImplemented: + return _result + return rpc_call_eager_fallback( + client, method_name, args, timeout_in_ms, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_call, (), dict(client=client, method_name=method_name, + args=args, timeout_in_ms=timeout_in_ms, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_rpc_call( + (client, method_name, args, timeout_in_ms, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RpcCall", client=client, method_name=method_name, args=args, + timeout_in_ms=timeout_in_ms, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_call, (), dict(client=client, method_name=method_name, + args=args, timeout_in_ms=timeout_in_ms, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Tin", _op.get_attr("Tin")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RpcCall", _inputs_flat, _attrs, _result) + _result = _RpcCallOutput._make(_result) + return _result + +RpcCall = tf_export("raw_ops.RpcCall")(_ops.to_raw_op(rpc_call)) +_dispatcher_for_rpc_call = rpc_call._tf_type_based_dispatcher.Dispatch + + +def rpc_call_eager_fallback(client: Annotated[Any, _atypes.Resource], method_name: Annotated[Any, _atypes.String], args, timeout_in_ms: Annotated[Any, _atypes.Int64], name, ctx): + _attr_Tin, args = _execute.convert_to_mixed_eager_tensors(args, ctx) + client = _ops.convert_to_tensor(client, _dtypes.resource) + method_name = _ops.convert_to_tensor(method_name, _dtypes.string) + timeout_in_ms = _ops.convert_to_tensor(timeout_in_ms, _dtypes.int64) + _inputs_flat = [client, method_name] + list(args) + [timeout_in_ms] + _attrs = ("Tin", _attr_Tin) + _result = _execute.execute(b"RpcCall", 2, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RpcCall", _inputs_flat, _attrs, _result) + _result = _RpcCallOutput._make(_result) + return _result + +_RpcCheckStatusOutput = collections.namedtuple( + "RpcCheckStatus", + ["error_code", "error"]) + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('rpc_check_status') +def rpc_check_status(status_or: Annotated[Any, _atypes.Resource], name=None): + r"""TODO: add doc. + + Args: + status_or: A `Tensor` of type `resource`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (error_code, error). + + error_code: A `Tensor` of type `int64`. + error: A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RpcCheckStatus", name, status_or) + _result = _RpcCheckStatusOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_rpc_check_status( + (status_or, name,), None) + if _result is not NotImplemented: + return _result + return rpc_check_status_eager_fallback( + status_or, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_check_status, (), dict(status_or=status_or, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_rpc_check_status( + (status_or, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RpcCheckStatus", status_or=status_or, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_check_status, (), dict(status_or=status_or, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "RpcCheckStatus", _inputs_flat, _attrs, _result) + _result = _RpcCheckStatusOutput._make(_result) + return _result + +RpcCheckStatus = tf_export("raw_ops.RpcCheckStatus")(_ops.to_raw_op(rpc_check_status)) +_dispatcher_for_rpc_check_status = rpc_check_status._tf_type_based_dispatcher.Dispatch + + +def rpc_check_status_eager_fallback(status_or: Annotated[Any, _atypes.Resource], name, ctx): + status_or = _ops.convert_to_tensor(status_or, _dtypes.resource) + _inputs_flat = [status_or] + _attrs = None + _result = _execute.execute(b"RpcCheckStatus", 2, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RpcCheckStatus", _inputs_flat, _attrs, _result) + _result = _RpcCheckStatusOutput._make(_result) + return _result + +_RpcClientOutput = collections.namedtuple( + "RpcClient", + ["client", "method_specs"]) + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('rpc_client') +def rpc_client(server_address: Annotated[Any, _atypes.String], timeout_in_ms: Annotated[Any, _atypes.Int64], shared_name:str="", list_registered_methods:bool=False, name=None): + r"""TODO: add doc. + + Args: + server_address: A `Tensor` of type `string`. + timeout_in_ms: A `Tensor` of type `int64`. + shared_name: An optional `string`. Defaults to `""`. + list_registered_methods: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (client, method_specs). + + client: A `Tensor` of type `resource`. + method_specs: A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RpcClient", name, server_address, timeout_in_ms, "shared_name", + shared_name, "list_registered_methods", list_registered_methods) + _result = _RpcClientOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_rpc_client( + (server_address, timeout_in_ms, shared_name, + list_registered_methods, name,), None) + if _result is not NotImplemented: + return _result + return rpc_client_eager_fallback( + server_address, timeout_in_ms, shared_name=shared_name, + list_registered_methods=list_registered_methods, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_client, (), dict(server_address=server_address, + timeout_in_ms=timeout_in_ms, + shared_name=shared_name, + list_registered_methods=list_registered_methods, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_rpc_client( + (server_address, timeout_in_ms, shared_name, list_registered_methods, + name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + if list_registered_methods is None: + list_registered_methods = False + list_registered_methods = _execute.make_bool(list_registered_methods, "list_registered_methods") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RpcClient", server_address=server_address, + timeout_in_ms=timeout_in_ms, shared_name=shared_name, + list_registered_methods=list_registered_methods, + name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_client, (), dict(server_address=server_address, + timeout_in_ms=timeout_in_ms, + shared_name=shared_name, + list_registered_methods=list_registered_methods, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("shared_name", _op.get_attr("shared_name"), + "list_registered_methods", + _op._get_attr_bool("list_registered_methods")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RpcClient", _inputs_flat, _attrs, _result) + _result = _RpcClientOutput._make(_result) + return _result + +RpcClient = tf_export("raw_ops.RpcClient")(_ops.to_raw_op(rpc_client)) +_dispatcher_for_rpc_client = rpc_client._tf_type_based_dispatcher.Dispatch + + +def rpc_client_eager_fallback(server_address: Annotated[Any, _atypes.String], timeout_in_ms: Annotated[Any, _atypes.Int64], shared_name: str, list_registered_methods: bool, name, ctx): + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + if list_registered_methods is None: + list_registered_methods = False + list_registered_methods = _execute.make_bool(list_registered_methods, "list_registered_methods") + server_address = _ops.convert_to_tensor(server_address, _dtypes.string) + timeout_in_ms = _ops.convert_to_tensor(timeout_in_ms, _dtypes.int64) + _inputs_flat = [server_address, timeout_in_ms] + _attrs = ("shared_name", shared_name, "list_registered_methods", + list_registered_methods) + _result = _execute.execute(b"RpcClient", 2, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RpcClient", _inputs_flat, _attrs, _result) + _result = _RpcClientOutput._make(_result) + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('rpc_get_value') +def rpc_get_value(status_or: Annotated[Any, _atypes.Resource], Tout, name=None): + r"""TODO: add doc. + + Args: + status_or: A `Tensor` of type `resource`. + Tout: A list of `tf.DTypes`. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects of type `Tout`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RpcGetValue", name, status_or, "Tout", Tout) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_rpc_get_value( + (status_or, Tout, name,), None) + if _result is not NotImplemented: + return _result + return rpc_get_value_eager_fallback( + status_or, Tout=Tout, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_get_value, (), dict(status_or=status_or, Tout=Tout, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_rpc_get_value( + (status_or, Tout, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + if not isinstance(Tout, (list, tuple)): + raise TypeError( + "Expected list for 'Tout' argument to " + "'rpc_get_value' Op, not %r." % Tout) + Tout = [_execute.make_type(_t, "Tout") for _t in Tout] + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RpcGetValue", status_or=status_or, Tout=Tout, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_get_value, (), dict(status_or=status_or, Tout=Tout, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if not _result: + return _op + if _execute.must_record_gradient(): + _attrs = ("Tout", _op.get_attr("Tout")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RpcGetValue", _inputs_flat, _attrs, _result) + return _result + +RpcGetValue = tf_export("raw_ops.RpcGetValue")(_ops.to_raw_op(rpc_get_value)) +_dispatcher_for_rpc_get_value = rpc_get_value._tf_type_based_dispatcher.Dispatch + + +def rpc_get_value_eager_fallback(status_or: Annotated[Any, _atypes.Resource], Tout, name, ctx): + if not isinstance(Tout, (list, tuple)): + raise TypeError( + "Expected list for 'Tout' argument to " + "'rpc_get_value' Op, not %r." % Tout) + Tout = [_execute.make_type(_t, "Tout") for _t in Tout] + status_or = _ops.convert_to_tensor(status_or, _dtypes.resource) + _inputs_flat = [status_or] + _attrs = ("Tout", Tout) + _result = _execute.execute(b"RpcGetValue", len(Tout), inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RpcGetValue", _inputs_flat, _attrs, _result) + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('rpc_server') +def rpc_server(server_address: Annotated[Any, _atypes.String], name=None) -> Annotated[Any, _atypes.Resource]: + r"""TODO: add doc. + + Args: + server_address: A `Tensor` of type `string`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RpcServer", name, server_address) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_rpc_server( + (server_address, name,), None) + if _result is not NotImplemented: + return _result + return rpc_server_eager_fallback( + server_address, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_server, (), dict(server_address=server_address, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_rpc_server( + (server_address, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RpcServer", server_address=server_address, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_server, (), dict(server_address=server_address, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "RpcServer", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RpcServer = tf_export("raw_ops.RpcServer")(_ops.to_raw_op(rpc_server)) +_dispatcher_for_rpc_server = rpc_server._tf_type_based_dispatcher.Dispatch + + +def rpc_server_eager_fallback(server_address: Annotated[Any, _atypes.String], name, ctx) -> Annotated[Any, _atypes.Resource]: + server_address = _ops.convert_to_tensor(server_address, _dtypes.string) + _inputs_flat = [server_address] + _attrs = None + _result = _execute.execute(b"RpcServer", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RpcServer", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('rpc_server_register') +def rpc_server_register(server: Annotated[Any, _atypes.Resource], method_name: Annotated[Any, _atypes.String], captured_inputs, f, output_specs: str, input_specs:str="", name=None): + r"""TODO: add doc. + + Args: + server: A `Tensor` of type `resource`. + method_name: A `Tensor` of type `string`. + captured_inputs: A list of `Tensor` objects. + f: A function decorated with @Defun. + output_specs: A `string`. + input_specs: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RpcServerRegister", name, server, method_name, captured_inputs, + "f", f, "input_specs", input_specs, "output_specs", output_specs) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_rpc_server_register( + (server, method_name, captured_inputs, f, output_specs, input_specs, + name,), None) + if _result is not NotImplemented: + return _result + return rpc_server_register_eager_fallback( + server, method_name, captured_inputs, f=f, input_specs=input_specs, + output_specs=output_specs, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_server_register, (), dict(server=server, + method_name=method_name, + captured_inputs=captured_inputs, + f=f, output_specs=output_specs, + input_specs=input_specs, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_rpc_server_register( + (server, method_name, captured_inputs, f, output_specs, input_specs, + name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + output_specs = _execute.make_str(output_specs, "output_specs") + if input_specs is None: + input_specs = "" + input_specs = _execute.make_str(input_specs, "input_specs") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RpcServerRegister", server=server, method_name=method_name, + captured_inputs=captured_inputs, f=f, + output_specs=output_specs, + input_specs=input_specs, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_server_register, (), dict(server=server, + method_name=method_name, + captured_inputs=captured_inputs, f=f, + output_specs=output_specs, + input_specs=input_specs, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + return _op +RpcServerRegister = tf_export("raw_ops.RpcServerRegister")(_ops.to_raw_op(rpc_server_register)) +_dispatcher_for_rpc_server_register = rpc_server_register._tf_type_based_dispatcher.Dispatch + + +def rpc_server_register_eager_fallback(server: Annotated[Any, _atypes.Resource], method_name: Annotated[Any, _atypes.String], captured_inputs, f, output_specs: str, input_specs: str, name, ctx): + output_specs = _execute.make_str(output_specs, "output_specs") + if input_specs is None: + input_specs = "" + input_specs = _execute.make_str(input_specs, "input_specs") + _attr_Tin, captured_inputs = _execute.convert_to_mixed_eager_tensors(captured_inputs, ctx) + server = _ops.convert_to_tensor(server, _dtypes.resource) + method_name = _ops.convert_to_tensor(method_name, _dtypes.string) + _inputs_flat = [server, method_name] + list(captured_inputs) + _attrs = ("Tin", _attr_Tin, "f", f, "input_specs", input_specs, + "output_specs", output_specs) + _result = _execute.execute(b"RpcServerRegister", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('rpc_server_start') +def rpc_server_start(server: Annotated[Any, _atypes.Resource], name=None): + r"""TODO: add doc. + + Args: + server: A `Tensor` of type `resource`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RpcServerStart", name, server) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_rpc_server_start( + (server, name,), None) + if _result is not NotImplemented: + return _result + return rpc_server_start_eager_fallback( + server, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_server_start, (), dict(server=server, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_rpc_server_start( + (server, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RpcServerStart", server=server, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_server_start, (), dict(server=server, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + return _op +RpcServerStart = tf_export("raw_ops.RpcServerStart")(_ops.to_raw_op(rpc_server_start)) +_dispatcher_for_rpc_server_start = rpc_server_start._tf_type_based_dispatcher.Dispatch + + +def rpc_server_start_eager_fallback(server: Annotated[Any, _atypes.Resource], name, ctx): + server = _ops.convert_to_tensor(server, _dtypes.resource) + _inputs_flat = [server] + _attrs = None + _result = _execute.execute(b"RpcServerStart", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25ba7eee4766e3bcad03a8f8d47b20e961a8de59 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/tf_rpc_service_pb2.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/tf_rpc_service_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6ad43b0dfbcf991d2ae995b4056e94d701d6e06 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/tf_rpc_service_pb2.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/tf_rpc_service_pb2_grpc.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/tf_rpc_service_pb2_grpc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28ea08746738f2278c077c5d8754a0d1e33b278b Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/tf_rpc_service_pb2_grpc.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/tf_rpc_service_pb2.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/tf_rpc_service_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..39fa8850a62a590e1f1fd5d521d99462ec88a6ce --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/tf_rpc_service_pb2.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorflow/distribute/experimental/rpc/proto/tf_rpc_service.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from tensorflow.core.framework import tensor_pb2 as tensorflow_dot_core_dot_framework_dot_tensor__pb2 +from tensorflow.core.protobuf import struct_pb2 as tensorflow_dot_core_dot_protobuf_dot_struct__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\nAtensorflow/distribute/experimental/rpc/proto/tf_rpc_service.proto\x12\x0etensorflow.rpc\x1a&tensorflow/core/framework/tensor.proto\x1a%tensorflow/core/protobuf/struct.proto\"M\n\x0b\x43\x61llRequest\x12\x0e\n\x06method\x18\x01 \x01(\t\x12.\n\rinput_tensors\x18\x02 \x03(\x0b\x32\x17.tensorflow.TensorProto\"?\n\x0c\x43\x61llResponse\x12/\n\x0eoutput_tensors\x18\x01 \x03(\x0b\x32\x17.tensorflow.TensorProto\"\r\n\x0bListRequest\"\x87\x01\n\x10RegisteredMethod\x12\x0e\n\x06method\x18\x01 \x01(\t\x12\x30\n\x0binput_specs\x18\x02 \x01(\x0b\x32\x1b.tensorflow.StructuredValue\x12\x31\n\x0coutput_specs\x18\x03 \x01(\x0b\x32\x1b.tensorflow.StructuredValue\"L\n\x0cListResponse\x12<\n\x12registered_methods\x18\x01 \x03(\x0b\x32 .tensorflow.rpc.RegisteredMethod2\x96\x01\n\nRpcService\x12\x43\n\x04\x43\x61ll\x12\x1b.tensorflow.rpc.CallRequest\x1a\x1c.tensorflow.rpc.CallResponse\"\x00\x12\x43\n\x04List\x12\x1b.tensorflow.rpc.ListRequest\x1a\x1c.tensorflow.rpc.ListResponse\"\x00\x62\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.distribute.experimental.rpc.proto.tf_rpc_service_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + _CALLREQUEST._serialized_start=164 + _CALLREQUEST._serialized_end=241 + _CALLRESPONSE._serialized_start=243 + _CALLRESPONSE._serialized_end=306 + _LISTREQUEST._serialized_start=308 + _LISTREQUEST._serialized_end=321 + _REGISTEREDMETHOD._serialized_start=324 + _REGISTEREDMETHOD._serialized_end=459 + _LISTRESPONSE._serialized_start=461 + _LISTRESPONSE._serialized_end=537 + _RPCSERVICE._serialized_start=540 + _RPCSERVICE._serialized_end=690 +# @@protoc_insertion_point(module_scope) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/tf_rpc_service_pb2_grpc.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/tf_rpc_service_pb2_grpc.py new file mode 100644 index 0000000000000000000000000000000000000000..fe854a3a079e616b21c1effaedfbfa409857573d --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/tf_rpc_service_pb2_grpc.py @@ -0,0 +1,63 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +import grpc + +from tensorflow.distribute.experimental.rpc.proto import tf_rpc_service_pb2 as tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2 + + +class RpcServiceStub(object): + # missing associated documentation comment in .proto file + pass + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.Call = channel.unary_unary( + '/tensorflow.rpc.RpcService/Call', + request_serializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.CallRequest.SerializeToString, + response_deserializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.CallResponse.FromString, + ) + self.List = channel.unary_unary( + '/tensorflow.rpc.RpcService/List', + request_serializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.ListRequest.SerializeToString, + response_deserializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.ListResponse.FromString, + ) + + +class RpcServiceServicer(object): + # missing associated documentation comment in .proto file + pass + + def Call(self, request, context): + """RPC for invoking a registered function on remote server. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def List(self, request, context): + """RPC for listing available methods in a server. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_RpcServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'Call': grpc.unary_unary_rpc_method_handler( + servicer.Call, + request_deserializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.CallRequest.FromString, + response_serializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.CallResponse.SerializeToString, + ), + 'List': grpc.unary_unary_rpc_method_handler( + servicer.List, + request_deserializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.ListRequest.FromString, + response_serializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.ListResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'tensorflow.rpc.RpcService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b04d0b786602831f83e9260dae1265f447b7d8ba Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer.py new file mode 100644 index 0000000000000000000000000000000000000000..110d8014786b686f0c4f4051233712671d8d0c1d --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/analyzer.py @@ -0,0 +1,107 @@ +# Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""This tool analyzes a TensorFlow Lite graph.""" + +import os + +# pylint: disable=g-import-not-at-top +if not os.path.splitext(__file__)[0].endswith( + os.path.join("tflite_runtime", "analyzer")): + # This file is part of tensorflow package. + from tensorflow.compiler.mlir.lite.python import wrap_converter + from tensorflow.lite.python.analyzer_wrapper import _pywrap_analyzer_wrapper as _analyzer_wrapper + from tensorflow.python.util.tf_export import tf_export as _tf_export +else: + # This file is part of tflite_runtime package. + from tflite_runtime import _pywrap_analyzer_wrapper as _analyzer_wrapper + + def _tf_export(*x, **kwargs): + del x, kwargs + return lambda x: x + + +@_tf_export("lite.experimental.Analyzer") +class ModelAnalyzer(): + """Provides a collection of TFLite model analyzer tools. + + Example: + + ```python + model = tf.keras.applications.MobileNetV3Large() + fb_model = tf.lite.TFLiteConverterV2.from_keras_model(model).convert() + tf.lite.experimental.Analyzer.analyze(model_content=fb_model) + # === TFLite ModelAnalyzer === + # + # Your TFLite model has ‘1’ subgraph(s). In the subgraph description below, + # T# represents the Tensor numbers. For example, in Subgraph#0, the MUL op + # takes tensor #0 and tensor #19 as input and produces tensor #136 as output. + # + # Subgraph#0 main(T#0) -> [T#263] + # Op#0 MUL(T#0, T#19) -> [T#136] + # Op#1 ADD(T#136, T#18) -> [T#137] + # Op#2 CONV_2D(T#137, T#44, T#93) -> [T#138] + # Op#3 HARD_SWISH(T#138) -> [T#139] + # Op#4 DEPTHWISE_CONV_2D(T#139, T#94, T#24) -> [T#140] + # ... + ``` + + WARNING: Experimental interface, subject to change. + """ + + @staticmethod + def analyze(model_path=None, + model_content=None, + gpu_compatibility=False, + **kwargs): + """Analyzes the given tflite_model with dumping model structure. + + This tool provides a way to understand users' TFLite flatbuffer model by + dumping internal graph structure. It also provides additional features + like checking GPU delegate compatibility. + + WARNING: Experimental interface, subject to change. + The output format is not guaranteed to stay stable, so don't + write scripts to this. + + Args: + model_path: TFLite flatbuffer model path. + model_content: TFLite flatbuffer model object. + gpu_compatibility: Whether to check GPU delegate compatibility. + **kwargs: Experimental keyword arguments to analyze API. + + Returns: + Print analyzed report via console output. + """ + if not model_path and not model_content: + raise ValueError("neither `model_path` nor `model_content` is provided") + if model_path: + print(f"=== {model_path} ===\n") + tflite_model = model_path + input_is_filepath = True + else: + print("=== TFLite ModelAnalyzer ===\n") + tflite_model = model_content + input_is_filepath = False + + if kwargs.get("experimental_use_mlir", False): + print( + wrap_converter.wrapped_flat_buffer_file_to_mlir( + tflite_model, input_is_filepath + ) + ) + else: + print( + _analyzer_wrapper.ModelAnalyzer(tflite_model, input_is_filepath, + gpu_compatibility)) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/conversion_metadata_schema_py_generated.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/conversion_metadata_schema_py_generated.py new file mode 100644 index 0000000000000000000000000000000000000000..85a460abc866dd51239e550cd96b598569af78c5 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/conversion_metadata_schema_py_generated.py @@ -0,0 +1,568 @@ +import flatbuffers + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +from flatbuffers.compat import import_numpy +np = import_numpy() + +class ModelType(object): + NONE = 0 + TF_SAVED_MODEL = 1 + KERAS_MODEL = 2 + TF_CONCRETE_FUNCTIONS = 3 + TF_GRAPH_DEF = 4 + TF_SESSION = 5 + JAX = 6 + PYTORCH = 7 + + +class ModelOptimizationMode(object): + PTQ_FLOAT16 = 1001 + PTQ_DYNAMIC_RANGE = 1002 + PTQ_FULL_INTEGER = 1003 + PTQ_INT16 = 1004 + QUANTIZATION_AWARE_TRAINING = 2000 + RANDOM_SPARSITY = 3001 + BLOCK_SPARSITY = 3002 + STRUCTURED_SPARSITY = 3003 + + +class Environment(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Environment() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsEnvironment(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + # Environment + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Environment + def TensorflowVersion(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # Environment + def ApiVersion(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) + return 0 + + # Environment + def ModelType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Environment + def ModelHash(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos) + return 0 + +def EnvironmentStart(builder): + builder.StartObject(4) + +def EnvironmentAddTensorflowVersion(builder, tensorflowVersion): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(tensorflowVersion), 0) + +def EnvironmentAddApiVersion(builder, apiVersion): + builder.PrependUint32Slot(1, apiVersion, 0) + +def EnvironmentAddModelType(builder, modelType): + builder.PrependInt32Slot(2, modelType, 0) + +def EnvironmentAddModelHash(builder, modelHash): + builder.PrependUint64Slot(3, modelHash, 0) + +def EnvironmentEnd(builder): + return builder.EndObject() + + + +class EnvironmentT(object): + + # EnvironmentT + def __init__(self): + self.tensorflowVersion = None # type: str + self.apiVersion = 0 # type: int + self.modelType = 0 # type: int + self.modelHash = 0 # type: int + + @classmethod + def InitFromBuf(cls, buf, pos): + environment = Environment() + environment.Init(buf, pos) + return cls.InitFromObj(environment) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, environment): + x = EnvironmentT() + x._UnPack(environment) + return x + + # EnvironmentT + def _UnPack(self, environment): + if environment is None: + return + self.tensorflowVersion = environment.TensorflowVersion() + self.apiVersion = environment.ApiVersion() + self.modelType = environment.ModelType() + self.modelHash = environment.ModelHash() + + # EnvironmentT + def Pack(self, builder): + if self.tensorflowVersion is not None: + tensorflowVersion = builder.CreateString(self.tensorflowVersion) + EnvironmentStart(builder) + if self.tensorflowVersion is not None: + EnvironmentAddTensorflowVersion(builder, tensorflowVersion) + EnvironmentAddApiVersion(builder, self.apiVersion) + EnvironmentAddModelType(builder, self.modelType) + EnvironmentAddModelHash(builder, self.modelHash) + environment = EnvironmentEnd(builder) + return environment + + +class SparsityBlockSize(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SparsityBlockSize() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsSparsityBlockSize(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + # SparsityBlockSize + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SparsityBlockSize + def Values(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Uint32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # SparsityBlockSize + def ValuesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint32Flags, o) + return 0 + + # SparsityBlockSize + def ValuesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SparsityBlockSize + def ValuesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + +def SparsityBlockSizeStart(builder): + builder.StartObject(1) + +def SparsityBlockSizeAddValues(builder, values): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0) + +def SparsityBlockSizeStartValuesVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def SparsityBlockSizeEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class SparsityBlockSizeT(object): + + # SparsityBlockSizeT + def __init__(self): + self.values = None # type: List[int] + + @classmethod + def InitFromBuf(cls, buf, pos): + sparsityBlockSize = SparsityBlockSize() + sparsityBlockSize.Init(buf, pos) + return cls.InitFromObj(sparsityBlockSize) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, sparsityBlockSize): + x = SparsityBlockSizeT() + x._UnPack(sparsityBlockSize) + return x + + # SparsityBlockSizeT + def _UnPack(self, sparsityBlockSize): + if sparsityBlockSize is None: + return + if not sparsityBlockSize.ValuesIsNone(): + if np is None: + self.values = [] + for i in range(sparsityBlockSize.ValuesLength()): + self.values.append(sparsityBlockSize.Values(i)) + else: + self.values = sparsityBlockSize.ValuesAsNumpy() + + # SparsityBlockSizeT + def Pack(self, builder): + if self.values is not None: + if np is not None and type(self.values) is np.ndarray: + values = builder.CreateNumpyVector(self.values) + else: + SparsityBlockSizeStartValuesVector(builder, len(self.values)) + for i in reversed(range(len(self.values))): + builder.PrependUint32(self.values[i]) + values = builder.EndVector() + SparsityBlockSizeStart(builder) + if self.values is not None: + SparsityBlockSizeAddValues(builder, values) + sparsityBlockSize = SparsityBlockSizeEnd(builder) + return sparsityBlockSize + + +class ConversionOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ConversionOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsConversionOptions(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + # ConversionOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ConversionOptions + def ModelOptimizationModes(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # ConversionOptions + def ModelOptimizationModesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # ConversionOptions + def ModelOptimizationModesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # ConversionOptions + def ModelOptimizationModesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # ConversionOptions + def AllowCustomOps(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # ConversionOptions + def EnableSelectTfOps(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # ConversionOptions + def ForceSelectTfOps(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # ConversionOptions + def SparsityBlockSizes(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + obj = SparsityBlockSize() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # ConversionOptions + def SparsityBlockSizesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # ConversionOptions + def SparsityBlockSizesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + return o == 0 + +def ConversionOptionsStart(builder): + builder.StartObject(5) + +def ConversionOptionsAddModelOptimizationModes(builder, modelOptimizationModes): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(modelOptimizationModes), 0) + +def ConversionOptionsStartModelOptimizationModesVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def ConversionOptionsAddAllowCustomOps(builder, allowCustomOps): + builder.PrependBoolSlot(1, allowCustomOps, 0) + +def ConversionOptionsAddEnableSelectTfOps(builder, enableSelectTfOps): + builder.PrependBoolSlot(2, enableSelectTfOps, 0) + +def ConversionOptionsAddForceSelectTfOps(builder, forceSelectTfOps): + builder.PrependBoolSlot(3, forceSelectTfOps, 0) + +def ConversionOptionsAddSparsityBlockSizes(builder, sparsityBlockSizes): + builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(sparsityBlockSizes), 0) + +def ConversionOptionsStartSparsityBlockSizesVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + +def ConversionOptionsEnd(builder): + return builder.EndObject() + + +try: + from typing import List +except: + pass + +class ConversionOptionsT(object): + + # ConversionOptionsT + def __init__(self): + self.modelOptimizationModes = None # type: List[int] + self.allowCustomOps = False # type: bool + self.enableSelectTfOps = False # type: bool + self.forceSelectTfOps = False # type: bool + self.sparsityBlockSizes = None # type: List[SparsityBlockSizeT] + + @classmethod + def InitFromBuf(cls, buf, pos): + conversionOptions = ConversionOptions() + conversionOptions.Init(buf, pos) + return cls.InitFromObj(conversionOptions) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, conversionOptions): + x = ConversionOptionsT() + x._UnPack(conversionOptions) + return x + + # ConversionOptionsT + def _UnPack(self, conversionOptions): + if conversionOptions is None: + return + if not conversionOptions.ModelOptimizationModesIsNone(): + if np is None: + self.modelOptimizationModes = [] + for i in range(conversionOptions.ModelOptimizationModesLength()): + self.modelOptimizationModes.append(conversionOptions.ModelOptimizationModes(i)) + else: + self.modelOptimizationModes = conversionOptions.ModelOptimizationModesAsNumpy() + self.allowCustomOps = conversionOptions.AllowCustomOps() + self.enableSelectTfOps = conversionOptions.EnableSelectTfOps() + self.forceSelectTfOps = conversionOptions.ForceSelectTfOps() + if not conversionOptions.SparsityBlockSizesIsNone(): + self.sparsityBlockSizes = [] + for i in range(conversionOptions.SparsityBlockSizesLength()): + if conversionOptions.SparsityBlockSizes(i) is None: + self.sparsityBlockSizes.append(None) + else: + sparsityBlockSize_ = SparsityBlockSizeT.InitFromObj(conversionOptions.SparsityBlockSizes(i)) + self.sparsityBlockSizes.append(sparsityBlockSize_) + + # ConversionOptionsT + def Pack(self, builder): + if self.modelOptimizationModes is not None: + if np is not None and type(self.modelOptimizationModes) is np.ndarray: + modelOptimizationModes = builder.CreateNumpyVector(self.modelOptimizationModes) + else: + ConversionOptionsStartModelOptimizationModesVector(builder, len(self.modelOptimizationModes)) + for i in reversed(range(len(self.modelOptimizationModes))): + builder.PrependInt32(self.modelOptimizationModes[i]) + modelOptimizationModes = builder.EndVector() + if self.sparsityBlockSizes is not None: + sparsityBlockSizeslist = [] + for i in range(len(self.sparsityBlockSizes)): + sparsityBlockSizeslist.append(self.sparsityBlockSizes[i].Pack(builder)) + ConversionOptionsStartSparsityBlockSizesVector(builder, len(self.sparsityBlockSizes)) + for i in reversed(range(len(self.sparsityBlockSizes))): + builder.PrependUOffsetTRelative(sparsityBlockSizeslist[i]) + sparsityBlockSizes = builder.EndVector() + ConversionOptionsStart(builder) + if self.modelOptimizationModes is not None: + ConversionOptionsAddModelOptimizationModes(builder, modelOptimizationModes) + ConversionOptionsAddAllowCustomOps(builder, self.allowCustomOps) + ConversionOptionsAddEnableSelectTfOps(builder, self.enableSelectTfOps) + ConversionOptionsAddForceSelectTfOps(builder, self.forceSelectTfOps) + if self.sparsityBlockSizes is not None: + ConversionOptionsAddSparsityBlockSizes(builder, sparsityBlockSizes) + conversionOptions = ConversionOptionsEnd(builder) + return conversionOptions + + +class ConversionMetadata(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ConversionMetadata() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsConversionMetadata(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + # ConversionMetadata + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ConversionMetadata + def Environment(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + x = self._tab.Indirect(o + self._tab.Pos) + obj = Environment() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # ConversionMetadata + def Options(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + x = self._tab.Indirect(o + self._tab.Pos) + obj = ConversionOptions() + obj.Init(self._tab.Bytes, x) + return obj + return None + +def ConversionMetadataStart(builder): + builder.StartObject(2) + +def ConversionMetadataAddEnvironment(builder, environment): + builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(environment), 0) + +def ConversionMetadataAddOptions(builder, options): + builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(options), 0) + +def ConversionMetadataEnd(builder): + return builder.EndObject() + + +try: + from typing import Optional +except: + pass + +class ConversionMetadataT(object): + + # ConversionMetadataT + def __init__(self): + self.environment = None # type: Optional[EnvironmentT] + self.options = None # type: Optional[ConversionOptionsT] + + @classmethod + def InitFromBuf(cls, buf, pos): + conversionMetadata = ConversionMetadata() + conversionMetadata.Init(buf, pos) + return cls.InitFromObj(conversionMetadata) + + @classmethod + def InitFromPackedBuf(cls, buf, pos=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) + return cls.InitFromBuf(buf, pos+n) + + @classmethod + def InitFromObj(cls, conversionMetadata): + x = ConversionMetadataT() + x._UnPack(conversionMetadata) + return x + + # ConversionMetadataT + def _UnPack(self, conversionMetadata): + if conversionMetadata is None: + return + if conversionMetadata.Environment() is not None: + self.environment = EnvironmentT.InitFromObj(conversionMetadata.Environment()) + if conversionMetadata.Options() is not None: + self.options = ConversionOptionsT.InitFromObj(conversionMetadata.Options()) + + # ConversionMetadataT + def Pack(self, builder): + if self.environment is not None: + environment = self.environment.Pack(builder) + if self.options is not None: + options = self.options.Pack(builder) + ConversionMetadataStart(builder) + if self.environment is not None: + ConversionMetadataAddEnvironment(builder, environment) + if self.options is not None: + ConversionMetadataAddOptions(builder, options) + conversionMetadata = ConversionMetadataEnd(builder) + return conversionMetadata + + diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/convert_phase.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/convert_phase.py new file mode 100644 index 0000000000000000000000000000000000000000..1f123698e7ab1851904bc8344110628ad205b3f4 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/convert_phase.py @@ -0,0 +1,219 @@ +# Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for collecting TFLite metrics.""" + +import collections +import enum +import functools +from typing import Text + +from tensorflow.compiler.mlir.lite.metrics import converter_error_data_pb2 +from tensorflow.lite.python.metrics import metrics + + +class Component(enum.Enum): + """Enum class defining name of the converter components.""" + # Validate the given input and prepare and optimize TensorFlow Model. + PREPARE_TF_MODEL = "PREPARE_TF_MODEL" + + # Convert to TFLite model format. + CONVERT_TF_TO_TFLITE_MODEL = "CONVERT_TF_TO_TFLITE_MODEL" + + # RUN quantization and sparsification. + OPTIMIZE_TFLITE_MODEL = "OPTIMIZE_TFLITE_MODEL" + + +SubComponentItem = collections.namedtuple("SubComponentItem", + ["name", "component"]) + + +class SubComponent(SubComponentItem, enum.Enum): + """Enum class defining name of the converter subcomponents. + + This enum only defines the subcomponents in Python, there might be more + subcomponents defined in C++. + """ + + def __str__(self): + return self.value.name + + @property + def name(self): + return self.value.name + + @property + def component(self): + return self.value.component + + # The subcomponent name is unspecified. + UNSPECIFIED = SubComponentItem("UNSPECIFIED", None) + + # Valid the given input and parameters. + VALIDATE_INPUTS = SubComponentItem("VALIDATE_INPUTS", + Component.PREPARE_TF_MODEL) + + # Load GraphDef from SavedModel. + LOAD_SAVED_MODEL = SubComponentItem("LOAD_SAVED_MODEL", + Component.PREPARE_TF_MODEL) + + # Convert a SavedModel to frozen graph. + FREEZE_SAVED_MODEL = SubComponentItem("FREEZE_SAVED_MODEL", + Component.PREPARE_TF_MODEL) + + # Save a Keras model to SavedModel. + CONVERT_KERAS_TO_SAVED_MODEL = SubComponentItem( + "CONVERT_KERAS_TO_SAVED_MODEL", Component.PREPARE_TF_MODEL) + + # Save Concrete functions to SavedModel. + CONVERT_CONCRETE_FUNCTIONS_TO_SAVED_MODEL = SubComponentItem( + "CONVERT_CONCRETE_FUNCTIONS_TO_SAVED_MODEL", Component.PREPARE_TF_MODEL) + + # Convert a Keras model to a frozen graph. + FREEZE_KERAS_MODEL = SubComponentItem("FREEZE_KERAS_MODEL", + Component.PREPARE_TF_MODEL) + + # Replace all the variables with constants in a ConcreteFunction. + FREEZE_CONCRETE_FUNCTION = SubComponentItem("FREEZE_CONCRETE_FUNCTION", + Component.PREPARE_TF_MODEL) + + # Run grappler optimization. + OPTIMIZE_TF_MODEL = SubComponentItem("OPTIMIZE_TF_MODEL", + Component.PREPARE_TF_MODEL) + + # Convert using the old TOCO converter. + CONVERT_GRAPHDEF_USING_DEPRECATED_CONVERTER = SubComponentItem( + "CONVERT_GRAPHDEF_USING_DEPRECATED_CONVERTER", + Component.CONVERT_TF_TO_TFLITE_MODEL) + + # Convert a GraphDef to TFLite model. + CONVERT_GRAPHDEF = SubComponentItem("CONVERT_GRAPHDEF", + Component.CONVERT_TF_TO_TFLITE_MODEL) + + # Convert a SavedModel to TFLite model. + CONVERT_SAVED_MODEL = SubComponentItem("CONVERT_SAVED_MODEL", + Component.CONVERT_TF_TO_TFLITE_MODEL) + + # Convert a Jax HLO to TFLite model. + CONVERT_JAX_HLO = SubComponentItem("CONVERT_JAX_HLO", + Component.CONVERT_TF_TO_TFLITE_MODEL) + + # Do quantization by the deprecated quantizer. + QUANTIZE_USING_DEPRECATED_QUANTIZER = SubComponentItem( + "QUANTIZE_USING_DEPRECATED_QUANTIZER", Component.OPTIMIZE_TFLITE_MODEL) + + # Do calibration. + CALIBRATE = SubComponentItem("CALIBRATE", Component.OPTIMIZE_TFLITE_MODEL) + + # Do quantization by MLIR. + QUANTIZE = SubComponentItem("QUANTIZE", Component.OPTIMIZE_TFLITE_MODEL) + + # Do sparsification by MLIR. + SPARSIFY = SubComponentItem("SPARSIFY", Component.OPTIMIZE_TFLITE_MODEL) + + +class ConverterError(Exception): + """Raised when an error occurs during model conversion.""" + + def __init__(self, message): + super(ConverterError, self).__init__(message) + self.errors = [] + self._parse_error_message(message) + + def append_error(self, + error_data: converter_error_data_pb2.ConverterErrorData): + self.errors.append(error_data) + + def _parse_error_message(self, message): + """If the message matches a pattern, assigns the associated error code. + + It is difficult to assign an error code to some errrors in MLIR side, Ex: + errors thrown by other components than TFLite or not using mlir::emitError. + This function try to detect them by the error message and assign the + corresponding error code. + + Args: + message: The error message of this exception. + """ + error_code_mapping = { + "Failed to functionalize Control Flow V1 ops. Consider using Control " + "Flow V2 ops instead. See https://www.tensorflow.org/api_docs/python/" + "tf/compat/v1/enable_control_flow_v2.": + converter_error_data_pb2.ConverterErrorData + .ERROR_UNSUPPORTED_CONTROL_FLOW_V1, + } + for pattern, error_code in error_code_mapping.items(): + if pattern in message: + error_data = converter_error_data_pb2.ConverterErrorData() + error_data.error_message = message + error_data.error_code = error_code + self.append_error(error_data) + return + + +def convert_phase(component, subcomponent=SubComponent.UNSPECIFIED): + """The decorator to identify converter component and subcomponent. + + Args: + component: Converter component name. + subcomponent: Converter subcomponent name. + + Returns: + Forward the result from the wrapped function. + + Raises: + ValueError: if component and subcomponent name is not valid. + """ + if component not in Component: + raise ValueError("Given component name not found") + if subcomponent not in SubComponent: + raise ValueError("Given subcomponent name not found") + if (subcomponent != SubComponent.UNSPECIFIED and + subcomponent.component != component): + raise ValueError("component and subcomponent name don't match") + + def report_error(error_data: converter_error_data_pb2.ConverterErrorData): + # Always overwrites the component information, but only overwrites the + # subcomponent if it is not available. + error_data.component = component.value + if not error_data.subcomponent: + error_data.subcomponent = subcomponent.name + tflite_metrics = metrics.TFLiteConverterMetrics() + tflite_metrics.set_converter_error(error_data) + + def report_error_message(error_message: Text): + error_data = converter_error_data_pb2.ConverterErrorData() + error_data.error_message = error_message + report_error(error_data) + + def actual_decorator(func): + + @functools.wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except ConverterError as converter_error: + if converter_error.errors: + for error_data in converter_error.errors: + report_error(error_data) + else: + report_error_message(str(converter_error)) + raise converter_error from None # Re-throws the exception. + except Exception as error: + report_error_message(str(error)) + raise error from None # Re-throws the exception. + + return wrapper + + return actual_decorator diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/schema_util.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/schema_util.py new file mode 100644 index 0000000000000000000000000000000000000000..e898a47318d38a388b8ca661bef89dda53222593 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/schema_util.py @@ -0,0 +1,45 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Schema utilities to get builtin code from operator code.""" + +from tensorflow.python.util import all_util + + +def get_builtin_code_from_operator_code(opcode): + """Return the builtin code of the given operator code. + + The following method is introduced to resolve op builtin code shortage + problem. The new builtin operator will be assigned to the extended builtin + code field in the flatbuffer schema. Those methods helps to hide builtin code + details. + + Args: + opcode: Operator code. + + Returns: + The builtin code of the given operator code. + """ + # Access BuiltinCode() method first if available. + if hasattr(opcode, 'BuiltinCode') and callable(opcode.BuiltinCode): + return max(opcode.BuiltinCode(), opcode.DeprecatedBuiltinCode()) + + return max(opcode.builtinCode, opcode.deprecatedBuiltinCode) + + +_allowed_symbols = [ + 'get_builtin_code_from_operator_code', +] + +all_util.remove_undocumented(__name__, _allowed_symbols) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/tflite_convert.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/tflite_convert.py new file mode 100644 index 0000000000000000000000000000000000000000..653a23ded0c8dcfe52b4c5a56ded6c6dc8c1f9df --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/tflite_convert.py @@ -0,0 +1,696 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Python command line interface for converting TF models to TFLite models.""" + +import argparse +import os +import sys +import warnings + +from absl import app +import tensorflow as tf + +from tensorflow.lite.python import lite +from tensorflow.lite.python.convert import register_custom_opdefs +from tensorflow.lite.toco import toco_flags_pb2 as _toco_flags_pb2 +from tensorflow.lite.toco.logging import gen_html +from tensorflow.python import tf2 +from tensorflow.python.framework import dtypes +from tensorflow.python.platform import gfile +from tensorflow.python.util import keras_deps + +# Needed to enable TF2 by default. + +_ = tf.keras.models.save_model # ensure necessary imports are executed + + +def _parse_array(values, type_fn=str): + if values is not None: + return [type_fn(val) for val in values.split(",") if val] + return None + + +def _parse_set(values): + if values is not None: + return set([item for item in values.split(",") if item]) + return None + + +def _parse_inference_type(value, flag): + """Converts the inference type to the value of the constant. + + Args: + value: str representing the inference type. + flag: str representing the flag name. + + Returns: + tf.dtype. + + Raises: + ValueError: Unsupported value. + """ + if value == "FLOAT": + return dtypes.float32 + if value == "INT8": + return dtypes.int8 + if value == "UINT8" or value == "QUANTIZED_UINT8": + return dtypes.uint8 + raise ValueError( + "Unsupported value for `{}` flag. Expected FLOAT, INT8, UINT8, or " + "QUANTIZED_UINT8 instead got {}.".format(flag, value)) + + +class _ParseBooleanFlag(argparse.Action): + """Helper class to parse boolean flag that optionally accepts truth value.""" + + def __init__(self, option_strings, dest, nargs=None, **kwargs): + if nargs != "?": + # This should never happen. This class is only used once below with + # nargs="?". + raise ValueError( + "This parser only supports nargs='?' (0 or 1 additional arguments)") + super(_ParseBooleanFlag, self).__init__( + option_strings, dest, nargs=nargs, **kwargs) + + def __call__(self, parser, namespace, values, option_string=None): + if values is None: + # Handling `--boolean_flag`. + # Without additional arguments, it implies true. + flag_value = True + elif values.lower() == "true": + # Handling `--boolean_flag=true`. + # (Case insensitive after the equal sign) + flag_value = True + elif values.lower() == "false": + # Handling `--boolean_flag=false`. + # (Case insensitive after the equal sign) + flag_value = False + else: + raise ValueError("Invalid argument to --{}. Must use flag alone," + " or specify true/false.".format(self.dest)) + setattr(namespace, self.dest, flag_value) + + +def _get_tflite_converter(flags): + """Makes a TFLiteConverter object based on the flags provided. + + Args: + flags: argparse.Namespace object containing TFLite flags. + + Returns: + TFLiteConverter object. + + Raises: + ValueError: Invalid flags. + """ + # Parse input and output arrays. + input_arrays = _parse_array(flags.input_arrays) + input_shapes = None + if flags.input_shapes: + input_shapes_list = [ + _parse_array(shape, type_fn=int) + for shape in flags.input_shapes.split(":") + ] + input_shapes = dict(list(zip(input_arrays, input_shapes_list))) + output_arrays = _parse_array(flags.output_arrays) + + converter_kwargs = { + "input_arrays": input_arrays, + "input_shapes": input_shapes, + "output_arrays": output_arrays + } + + # Create TFLiteConverter. + if flags.graph_def_file: + converter_fn = lite.TFLiteConverter.from_frozen_graph + converter_kwargs["graph_def_file"] = flags.graph_def_file + elif flags.saved_model_dir: + converter_fn = lite.TFLiteConverter.from_saved_model + converter_kwargs["saved_model_dir"] = flags.saved_model_dir + converter_kwargs["tag_set"] = _parse_set(flags.saved_model_tag_set) + converter_kwargs["signature_key"] = flags.saved_model_signature_key + elif flags.keras_model_file: + converter_fn = lite.TFLiteConverter.from_keras_model_file + converter_kwargs["model_file"] = flags.keras_model_file + else: + raise ValueError("--graph_def_file, --saved_model_dir, or " + "--keras_model_file must be specified.") + + return converter_fn(**converter_kwargs) + + +def _convert_tf1_model(flags): + """Calls function to convert the TensorFlow 1.X model into a TFLite model. + + Args: + flags: argparse.Namespace object. + + Raises: + ValueError: Invalid flags. + """ + # Register custom opdefs before converter object creation. + if flags.custom_opdefs: + register_custom_opdefs(_parse_array(flags.custom_opdefs)) + + # Create converter. + converter = _get_tflite_converter(flags) + if flags.inference_type: + converter.inference_type = _parse_inference_type(flags.inference_type, + "inference_type") + if flags.inference_input_type: + converter.inference_input_type = _parse_inference_type( + flags.inference_input_type, "inference_input_type") + if flags.output_format: + converter.output_format = _toco_flags_pb2.FileFormat.Value( + flags.output_format) + + if flags.mean_values and flags.std_dev_values: + input_arrays = converter.get_input_arrays() + std_dev_values = _parse_array(flags.std_dev_values, type_fn=float) + + # In quantized inference, mean_value has to be integer so that the real + # value 0.0 is exactly representable. + if converter.inference_type == dtypes.float32: + mean_values = _parse_array(flags.mean_values, type_fn=float) + else: + mean_values = _parse_array(flags.mean_values, type_fn=int) + quant_stats = list(zip(mean_values, std_dev_values)) + if ((not flags.input_arrays and len(input_arrays) > 1) or + (len(input_arrays) != len(quant_stats))): + raise ValueError("Mismatching --input_arrays, --std_dev_values, and " + "--mean_values. The flags must have the same number of " + "items. The current input arrays are '{0}'. " + "--input_arrays must be present when specifying " + "--std_dev_values and --mean_values with multiple input " + "tensors in order to map between names and " + "values.".format(",".join(input_arrays))) + converter.quantized_input_stats = dict(list(zip(input_arrays, quant_stats))) + if (flags.default_ranges_min is not None) and (flags.default_ranges_max is + not None): + converter.default_ranges_stats = (flags.default_ranges_min, + flags.default_ranges_max) + + if flags.drop_control_dependency: + converter.drop_control_dependency = flags.drop_control_dependency + if flags.reorder_across_fake_quant: + converter.reorder_across_fake_quant = flags.reorder_across_fake_quant + if flags.change_concat_input_ranges: + converter.change_concat_input_ranges = ( + flags.change_concat_input_ranges == "TRUE") + + if flags.allow_custom_ops: + converter.allow_custom_ops = flags.allow_custom_ops + + if flags.target_ops: + ops_set_options = lite.OpsSet.get_options() + converter.target_spec.supported_ops = set() + for option in flags.target_ops.split(","): + if option not in ops_set_options: + raise ValueError("Invalid value for --target_ops. Options: " + "{0}".format(",".join(ops_set_options))) + converter.target_spec.supported_ops.add(lite.OpsSet(option)) + + if flags.experimental_select_user_tf_ops: + if lite.OpsSet.SELECT_TF_OPS not in converter.target_spec.supported_ops: + raise ValueError("--experimental_select_user_tf_ops can only be set if " + "--target_ops contains SELECT_TF_OPS.") + user_op_set = set() + for op_name in flags.experimental_select_user_tf_ops.split(","): + user_op_set.add(op_name) + converter.target_spec.experimental_select_user_tf_ops = list(user_op_set) + + if flags.post_training_quantize: + converter.optimizations = [lite.Optimize.DEFAULT] + if converter.inference_type != dtypes.float32: + print("--post_training_quantize quantizes a graph of inference_type " + "FLOAT. Overriding inference_type to FLOAT.") + converter.inference_type = dtypes.float32 + + if flags.quantize_to_float16: + converter.target_spec.supported_types = [dtypes.float16] + if not flags.post_training_quantize: + print("--quantize_to_float16 will only take effect with the " + "--post_training_quantize flag enabled.") + + if flags.dump_graphviz_dir: + converter.dump_graphviz_dir = flags.dump_graphviz_dir + if flags.dump_graphviz_video: + converter.dump_graphviz_vode = flags.dump_graphviz_video + if flags.conversion_summary_dir: + converter.conversion_summary_dir = flags.conversion_summary_dir + + converter.experimental_new_converter = flags.experimental_new_converter + + if flags.experimental_new_quantizer is not None: + converter.experimental_new_quantizer = flags.experimental_new_quantizer + + # Convert model. + output_data = converter.convert() + with gfile.GFile(flags.output_file, "wb") as f: + f.write(output_data) + + +def _convert_tf2_model(flags): + """Calls function to convert the TensorFlow 2.0 model into a TFLite model. + + Args: + flags: argparse.Namespace object. + + Raises: + ValueError: Unsupported file format. + """ + # Load the model. + if flags.saved_model_dir: + converter = lite.TFLiteConverterV2.from_saved_model( + flags.saved_model_dir, + signature_keys=_parse_array(flags.saved_model_signature_key), + tags=_parse_set(flags.saved_model_tag_set)) + elif flags.keras_model_file: + model = keras_deps.get_load_model_function()(flags.keras_model_file) + converter = lite.TFLiteConverterV2.from_keras_model(model) + + converter.experimental_new_converter = flags.experimental_new_converter + + if flags.experimental_new_quantizer is not None: + converter.experimental_new_quantizer = flags.experimental_new_quantizer + + # Convert the model. + tflite_model = converter.convert() + with gfile.GFile(flags.output_file, "wb") as f: + f.write(tflite_model) + + +def _check_tf1_flags(flags, unparsed): + """Checks the parsed and unparsed flags to ensure they are valid in 1.X. + + Raises an error if previously support unparsed flags are found. Raises an + error for parsed flags that don't meet the required conditions. + + Args: + flags: argparse.Namespace object containing TFLite flags. + unparsed: List of unparsed flags. + + Raises: + ValueError: Invalid flags. + """ + + # Check unparsed flags for common mistakes based on previous TOCO. + def _get_message_unparsed(flag, orig_flag, new_flag): + if flag.startswith(orig_flag): + return "\n Use {0} instead of {1}".format(new_flag, orig_flag) + return "" + + if unparsed: + output = "" + for flag in unparsed: + output += _get_message_unparsed(flag, "--input_file", "--graph_def_file") + output += _get_message_unparsed(flag, "--savedmodel_directory", + "--saved_model_dir") + output += _get_message_unparsed(flag, "--std_value", "--std_dev_values") + output += _get_message_unparsed(flag, "--batch_size", "--input_shapes") + output += _get_message_unparsed(flag, "--dump_graphviz", + "--dump_graphviz_dir") + if output: + raise ValueError(output) + + # Check that flags are valid. + if flags.graph_def_file and (not flags.input_arrays or + not flags.output_arrays): + raise ValueError("--input_arrays and --output_arrays are required with " + "--graph_def_file") + + if flags.input_shapes: + if not flags.input_arrays: + raise ValueError("--input_shapes must be used with --input_arrays") + if flags.input_shapes.count(":") != flags.input_arrays.count(","): + raise ValueError("--input_shapes and --input_arrays must have the same " + "number of items") + + if flags.std_dev_values or flags.mean_values: + if bool(flags.std_dev_values) != bool(flags.mean_values): + raise ValueError("--std_dev_values and --mean_values must be used " + "together") + if flags.std_dev_values.count(",") != flags.mean_values.count(","): + raise ValueError("--std_dev_values, --mean_values must have the same " + "number of items") + + if (flags.default_ranges_min is None) != (flags.default_ranges_max is None): + raise ValueError("--default_ranges_min and --default_ranges_max must be " + "used together") + + if flags.dump_graphviz_video and not flags.dump_graphviz_dir: + raise ValueError("--dump_graphviz_video must be used with " + "--dump_graphviz_dir") + + if flags.custom_opdefs and not flags.experimental_new_converter: + raise ValueError("--custom_opdefs must be used with " + "--experimental_new_converter") + if flags.custom_opdefs and not flags.allow_custom_ops: + raise ValueError("--custom_opdefs must be used with --allow_custom_ops") + if (flags.experimental_select_user_tf_ops and + not flags.experimental_new_converter): + raise ValueError("--experimental_select_user_tf_ops must be used with " + "--experimental_new_converter") + + +def _check_tf2_flags(flags): + """Checks the parsed and unparsed flags to ensure they are valid in 2.X. + + Args: + flags: argparse.Namespace object containing TFLite flags. + + Raises: + ValueError: Invalid flags. + """ + if not flags.keras_model_file and not flags.saved_model_dir: + raise ValueError("one of the arguments --saved_model_dir " + "--keras_model_file is required") + + +def _get_tf1_flags(parser): + """Returns ArgumentParser for tflite_convert for TensorFlow 1.X. + + Args: + parser: ArgumentParser + """ + # Input file flags. + input_file_group = parser.add_mutually_exclusive_group(required=True) + input_file_group.add_argument( + "--graph_def_file", + type=str, + help="Full filepath of file containing frozen TensorFlow GraphDef.") + input_file_group.add_argument( + "--saved_model_dir", + type=str, + help="Full filepath of directory containing the SavedModel.") + input_file_group.add_argument( + "--keras_model_file", + type=str, + help="Full filepath of HDF5 file containing tf.Keras model.") + + # Model format flags. + parser.add_argument( + "--output_format", + type=str.upper, + choices=["TFLITE", "GRAPHVIZ_DOT"], + help="Output file format.") + parser.add_argument( + "--inference_type", + type=str.upper, + default="FLOAT", + help=("Target data type of real-number arrays in the output file. " + "Must be either FLOAT, INT8 or UINT8.")) + parser.add_argument( + "--inference_input_type", + type=str.upper, + help=("Target data type of real-number input arrays. Allows for a " + "different type for input arrays in the case of quantization. " + "Must be either FLOAT, INT8 or UINT8.")) + + # Input and output arrays flags. + parser.add_argument( + "--input_arrays", + type=str, + help="Names of the input arrays, comma-separated.") + parser.add_argument( + "--input_shapes", + type=str, + help="Shapes corresponding to --input_arrays, colon-separated.") + parser.add_argument( + "--output_arrays", + type=str, + help="Names of the output arrays, comma-separated.") + + # SavedModel related flags. + parser.add_argument( + "--saved_model_tag_set", + type=str, + help=("Comma-separated set of tags identifying the MetaGraphDef within " + "the SavedModel to analyze. All tags must be present. In order to " + "pass in an empty tag set, pass in \"\". (default \"serve\")")) + parser.add_argument( + "--saved_model_signature_key", + type=str, + help=("Key identifying the SignatureDef containing inputs and outputs. " + "(default DEFAULT_SERVING_SIGNATURE_DEF_KEY)")) + + # Quantization flags. + parser.add_argument( + "--std_dev_values", + type=str, + help=("Standard deviation of training data for each input tensor, " + "comma-separated floats. Used for quantized input tensors. " + "(default None)")) + parser.add_argument( + "--mean_values", + type=str, + help=("Mean of training data for each input tensor, comma-separated " + "floats. Used for quantized input tensors. (default None)")) + parser.add_argument( + "--default_ranges_min", + type=float, + help=("Default value for min bound of min/max range values used for all " + "arrays without a specified range, Intended for experimenting with " + "quantization via \"dummy quantization\". (default None)")) + parser.add_argument( + "--default_ranges_max", + type=float, + help=("Default value for max bound of min/max range values used for all " + "arrays without a specified range, Intended for experimenting with " + "quantization via \"dummy quantization\". (default None)")) + # quantize_weights is DEPRECATED. + parser.add_argument( + "--quantize_weights", + dest="post_training_quantize", + action="store_true", + help=argparse.SUPPRESS) + parser.add_argument( + "--post_training_quantize", + dest="post_training_quantize", + action="store_true", + help=( + "Boolean indicating whether to quantize the weights of the " + "converted float model. Model size will be reduced and there will " + "be latency improvements (at the cost of accuracy). (default False)")) + parser.add_argument( + "--quantize_to_float16", + dest="quantize_to_float16", + action="store_true", + help=("Boolean indicating whether to quantize weights to fp16 instead of " + "the default int8 when post-training quantization " + "(--post_training_quantize) is enabled. (default False)")) + # Graph manipulation flags. + parser.add_argument( + "--drop_control_dependency", + action="store_true", + help=("Boolean indicating whether to drop control dependencies silently. " + "This is due to TensorFlow not supporting control dependencies. " + "(default True)")) + parser.add_argument( + "--reorder_across_fake_quant", + action="store_true", + help=("Boolean indicating whether to reorder FakeQuant nodes in " + "unexpected locations. Used when the location of the FakeQuant " + "nodes is preventing graph transformations necessary to convert " + "the graph. Results in a graph that differs from the quantized " + "training graph, potentially causing differing arithmetic " + "behavior. (default False)")) + # Usage for this flag is --change_concat_input_ranges=true or + # --change_concat_input_ranges=false in order to make it clear what the flag + # is set to. This keeps the usage consistent with other usages of the flag + # where the default is different. The default value here is False. + parser.add_argument( + "--change_concat_input_ranges", + type=str.upper, + choices=["TRUE", "FALSE"], + help=("Boolean to change behavior of min/max ranges for inputs and " + "outputs of the concat operator for quantized models. Changes the " + "ranges of concat operator overlap when true. (default False)")) + + # Permitted ops flags. + parser.add_argument( + "--allow_custom_ops", + action=_ParseBooleanFlag, + nargs="?", + help=("Boolean indicating whether to allow custom operations. When false " + "any unknown operation is an error. When true, custom ops are " + "created for any op that is unknown. The developer will need to " + "provide these to the TensorFlow Lite runtime with a custom " + "resolver. (default False)")) + parser.add_argument( + "--custom_opdefs", + type=str, + help=("String representing a list of custom ops OpDefs delineated with " + "commas that are included in the GraphDef. Required when using " + "custom operations with --experimental_new_converter.")) + parser.add_argument( + "--target_ops", + type=str, + help=("Experimental flag, subject to change. Set of OpsSet options " + "indicating which converter to use. Options: {0}. One or more " + "option may be specified. (default set([OpsSet.TFLITE_BUILTINS]))" + "".format(",".join(lite.OpsSet.get_options())))) + parser.add_argument( + "--experimental_select_user_tf_ops", + type=str, + help=("Experimental flag, subject to change. Comma separated list of " + "user's defined TensorFlow operators required in the runtime.")) + + # Logging flags. + parser.add_argument( + "--dump_graphviz_dir", + type=str, + help=("Full filepath of folder to dump the graphs at various stages of " + "processing GraphViz .dot files. Preferred over --output_format=" + "GRAPHVIZ_DOT in order to keep the requirements of the output " + "file.")) + parser.add_argument( + "--dump_graphviz_video", + action="store_true", + help=("Boolean indicating whether to dump the graph after every graph " + "transformation")) + parser.add_argument( + "--conversion_summary_dir", + type=str, + help=("Full filepath to store the conversion logs, which includes " + "graphviz of the model before/after the conversion, an HTML report " + "and the conversion proto buffers. This will only be generated " + "when passing --experimental_new_converter")) + + +def _get_tf2_flags(parser): + """Returns ArgumentParser for tflite_convert for TensorFlow 2.0. + + Args: + parser: ArgumentParser + """ + # Input file flags. + input_file_group = parser.add_mutually_exclusive_group() + input_file_group.add_argument( + "--saved_model_dir", + type=str, + help="Full path of the directory containing the SavedModel.") + input_file_group.add_argument( + "--keras_model_file", + type=str, + help="Full filepath of HDF5 file containing tf.Keras model.") + # SavedModel related flags. + parser.add_argument( + "--saved_model_tag_set", + type=str, + help=("Comma-separated set of tags identifying the MetaGraphDef within " + "the SavedModel to analyze. All tags must be present. In order to " + "pass in an empty tag set, pass in \"\". (default \"serve\")")) + parser.add_argument( + "--saved_model_signature_key", + type=str, + help=("Key identifying the SignatureDef containing inputs and outputs. " + "(default DEFAULT_SERVING_SIGNATURE_DEF_KEY)")) + + # Enables 1.X converter in 2.X. + parser.add_argument( + "--enable_v1_converter", + action="store_true", + help=("Enables the TensorFlow V1 converter in 2.0")) + + +def _get_parser(use_v2_converter): + """Returns an ArgumentParser for tflite_convert. + + Args: + use_v2_converter: Indicates which converter to return. + Return: ArgumentParser. + """ + parser = argparse.ArgumentParser( + description=("Command line tool to run TensorFlow Lite Converter.")) + + # Output file flag. + parser.add_argument( + "--output_file", + type=str, + help="Full filepath of the output file.", + required=True) + + if use_v2_converter: + _get_tf2_flags(parser) + else: + _get_tf1_flags(parser) + + parser.add_argument( + "--experimental_new_converter", + action=_ParseBooleanFlag, + nargs="?", + default=True, + help=("Experimental flag, subject to change. Enables MLIR-based " + "conversion instead of TOCO conversion. (default True)")) + + parser.add_argument( + "--experimental_new_quantizer", + action=_ParseBooleanFlag, + nargs="?", + help=("Experimental flag, subject to change. Enables MLIR-based " + "quantizer instead of flatbuffer conversion. (default True)")) + return parser + + +def run_main(_): + """Main in tflite_convert.py.""" + use_v2_converter = tf2.enabled() + parser = _get_parser(use_v2_converter) + tflite_flags, unparsed = parser.parse_known_args(args=sys.argv[1:]) + + # If the user is running TensorFlow 2.X but has passed in enable_v1_converter + # then parse the flags again with the 1.X converter flags. + if tf2.enabled() and tflite_flags.enable_v1_converter: + use_v2_converter = False + parser = _get_parser(use_v2_converter) + tflite_flags, unparsed = parser.parse_known_args(args=sys.argv[1:]) + + # Checks if the flags are valid. + try: + if use_v2_converter: + _check_tf2_flags(tflite_flags) + else: + _check_tf1_flags(tflite_flags, unparsed) + except ValueError as e: + parser.print_usage() + file_name = os.path.basename(sys.argv[0]) + sys.stderr.write("{0}: error: {1}\n".format(file_name, str(e))) + sys.exit(1) + + # Convert the model according to the user provided flag. + if use_v2_converter: + _convert_tf2_model(tflite_flags) + else: + try: + _convert_tf1_model(tflite_flags) + finally: + if tflite_flags.conversion_summary_dir: + if tflite_flags.experimental_new_converter: + gen_html.gen_conversion_log_html(tflite_flags.conversion_summary_dir, + tflite_flags.post_training_quantize, + tflite_flags.output_file) + else: + warnings.warn( + "Conversion summary will only be generated when enabling" + " the new converter via --experimental_new_converter. ") + + +def main(): + app.run(main=run_main, argv=sys.argv[:1]) + + +if __name__ == "__main__": + main() diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/util.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/util.py new file mode 100644 index 0000000000000000000000000000000000000000..c0692655c3f127804bfa10fe96d746531569998a --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/python/util.py @@ -0,0 +1,1177 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functions used by multiple converter files.""" + +import copy +import datetime +import sys + +from absl import logging +import flatbuffers +import numpy as np + +from tensorflow.core.protobuf import config_pb2 as _config_pb2 +from tensorflow.core.protobuf import meta_graph_pb2 as _meta_graph_pb2 +from tensorflow.lite.python import conversion_metadata_schema_py_generated as conversion_metadata_fb +from tensorflow.lite.python import schema_py_generated as schema_fb +from tensorflow.lite.python import schema_util +from tensorflow.lite.python import tflite_keras_util as _tflite_keras_util +from tensorflow.lite.python.op_hint import convert_op_hints_to_stubs +from tensorflow.lite.python.op_hint import find_all_hinted_output_nodes +from tensorflow.lite.tools import flatbuffer_utils +from tensorflow.python.eager import function +from tensorflow.python.framework import convert_to_constants as _convert_to_constants +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import error_interpolation as _error_interpolation +from tensorflow.python.grappler import tf_optimizer +from tensorflow.python.training.saver import export_meta_graph as _export_meta_graph + +# The field name of conversion metadata in the flatbuffer file. +CONVERSION_METADATA_FIELD_NAME = "CONVERSION_METADATA" + +# Keras functions used by TFLite +model_input_signature = _tflite_keras_util.model_input_signature +trace_model_call = _tflite_keras_util.trace_model_call +get_save_spec = _tflite_keras_util.get_save_spec + +# Jax functions used by TFLite +# pylint: disable=g-import-not-at-top +# pylint: disable=unused-import +try: + from jax import jit as _jit +except ImportError: + _jit = None +# pylint: enable=g-import-not-at-top +# pylint: enable=unused-import + +# Defined as per TFLite schema +_MAP_TFLITE_ENUM_TO_TF_TYPES = { + 0: dtypes.float32, + 1: dtypes.float16, + 2: dtypes.int32, + 3: dtypes.uint8, + 4: dtypes.int64, + 5: dtypes.string, + 6: dtypes.bool, + 7: dtypes.int16, + 8: dtypes.complex64, + 9: dtypes.int8, + 10: dtypes.float64, + 11: dtypes.complex128, + 16: dtypes.uint32, +} + +_TFLITE_FILE_IDENTIFIER = b"TFL3" + +_MAP_QUANT_TO_IO_TYPES = { + dtypes.int8: {dtypes.int8, dtypes.uint8}, + dtypes.int16: {dtypes.int16}, +} + + +def _convert_tflite_enum_type_to_tf_type(tflite_enum_type): + """Converts tflite enum type (eg: 0) to tf type (eg: tf.float32). + + Args: + tflite_enum_type: tflite enum type (eg: 0, that corresponds to float32) + + Raises: + ValueError: If an invalid tflite enum type is provided. + + Returns: + tf type (eg: tf.float32) + """ + tf_type = _MAP_TFLITE_ENUM_TO_TF_TYPES.get(tflite_enum_type) + if tf_type is None: + raise ValueError( + "Unsupported enum {}. The valid map of enum to tf types is : {}" + .format(tflite_enum_type, _MAP_TFLITE_ENUM_TO_TF_TYPES)) + return tf_type + + +def get_tf_type_name(tf_type): + """Converts tf.dtype (eg: tf.float32) to str (eg: "tf.float32").""" + return "tf." + tf_type.name if tf_type else None + + +def get_tensor_name(tensor): + """Returns name of the input tensor. + + Args: + tensor: tf.Tensor + + Returns: + str + """ + parts = tensor.name.split(":") + if len(parts) > 2: + raise ValueError("Tensor name invalid. Expect 0 or 1 colon, got {0}".format( + len(parts) - 1)) + + # To be consistent with the tensor naming scheme in tensorflow, we need + # drop the ':0' suffix for the first tensor. + if len(parts) > 1 and parts[1] != "0": + return tensor.name + return parts[0] + + +def get_tensors_from_tensor_names(graph, tensor_names): + """Gets the Tensors associated with the `tensor_names` in the provided graph. + + Args: + graph: TensorFlow Graph. + tensor_names: List of strings that represent names of tensors in the graph. + + Returns: + A list of Tensor objects in the same order the names are provided. + + Raises: + ValueError: + tensor_names contains an invalid tensor name. + """ + # Get the list of all of the tensors. + tensor_name_to_tensor = {} + for op in graph.get_operations(): + for tensor in op.values(): + tensor_name_to_tensor[get_tensor_name(tensor)] = tensor + + # Get the tensors associated with tensor_names. + tensors = [] + invalid_tensors = [] + for name in tensor_names: + if not isinstance(name, str): + raise ValueError("Invalid type for a tensor name in the provided graph. " + "Expected type for a tensor name is 'str', instead got " + "type '{}' for tensor name '{}'".format( + type(name), name)) + + tensor = tensor_name_to_tensor.get(name) + if tensor is None: + invalid_tensors.append(name) + else: + tensors.append(tensor) + + # Throw ValueError if any user input names are not valid tensors. + if invalid_tensors: + raise ValueError("Invalid tensors '{}' were found.".format( + ",".join(invalid_tensors))) + return tensors + + +def set_tensor_shapes(tensors, shapes): + """Sets Tensor shape for each tensor if the shape is defined. + + Args: + tensors: TensorFlow tensor.Tensor. + shapes: Dict of strings representing input tensor names to list of + integers representing input shapes (e.g., {"foo": : [1, 16, 16, 3]}). + + Raises: + ValueError: + `shapes` contains an invalid tensor. + `shapes` contains an invalid shape for a valid tensor. + """ + if shapes: + tensor_names_to_tensor = { + get_tensor_name(tensor): tensor for tensor in tensors + } + for name, shape in shapes.items(): + if name not in tensor_names_to_tensor: + raise ValueError("Invalid tensor \'{}\' found in tensor shapes " + "map.".format(name)) + if shape is not None: + tensor = tensor_names_to_tensor[name] + try: + tensor.set_shape(shape) + except ValueError as error: + message = ("The shape of tensor '{0}' cannot be changed from {1} to " + "{2}. {3}".format(name, tensor.shape, shape, str(error))) + raise ValueError(message) + + +def get_grappler_config(optimizers_list): + """Creates a tf.compat.v1.ConfigProto for configuring Grappler. + + Args: + optimizers_list: List of strings that represents the list of optimizers. + + Returns: + tf.ConfigProto. + """ + config = _config_pb2.ConfigProto() + rewrite_options = config.graph_options.rewrite_options + for optimizer in optimizers_list: + rewrite_options.optimizers.append(optimizer) + return config + + +def run_graph_optimizations(graph_def, + input_arrays, + output_arrays, + config, + graph=None): + """Apply standard TensorFlow optimizations to the graph_def. + + Args: + graph_def: Frozen GraphDef to be optimized. + input_arrays: List of arrays that are considered inputs of the graph. + output_arrays: List of arrays that are considered outputs of the graph. + config: tf.ConfigProto. + graph: TensorFlow Graph. Required when Eager mode is enabled. (default None) + + Returns: + A new, optimized GraphDef. + """ + meta_graph = _export_meta_graph(graph_def=graph_def, graph=graph) + + signature = _meta_graph_pb2.SignatureDef() + for array in input_arrays: + signature.inputs[array.name].name = array.name + signature.inputs[array.name].dtype = array.dtype.as_datatype_enum + signature.inputs[array.name].tensor_shape.CopyFrom(array.shape.as_proto()) + + for array in output_arrays: + signature.outputs[array.name].name = array.name + signature.outputs[array.name].dtype = array.dtype.as_datatype_enum + signature.outputs[array.name].tensor_shape.CopyFrom(array.shape.as_proto()) + + meta_graph.signature_def["not_used_key"].CopyFrom(signature) + + # We need to add a collection called 'train_op' so that grappler + # knows what the outputs are. + fetch_collection = _meta_graph_pb2.CollectionDef() + for array in input_arrays + output_arrays: + fetch_collection.node_list.value.append(array.name) + meta_graph.collection_def["train_op"].CopyFrom(fetch_collection) + + return tf_optimizer.OptimizeGraph(config, meta_graph) + + +def _convert_op_hints_if_present(sess, graph_def, output_tensors, + hinted_outputs_nodes): + if is_frozen_graph(sess): + raise ValueError("Try to convert op hints, needs unfrozen graph.") + output_arrays = [get_tensor_name(tensor) for tensor in output_tensors] + graph_def = _convert_to_constants.convert_variables_to_constants( + sess, graph_def, output_arrays + hinted_outputs_nodes) + graph_def = convert_op_hints_to_stubs(graph_def=graph_def) + return graph_def + + +def freeze_graph(sess, input_tensors, output_tensors): + """Returns a frozen GraphDef. + + Runs a Grappler pass and freezes a graph with Variables in it. Otherwise the + existing GraphDef is returned. The Grappler pass is only run on models that + are frozen in order to inline the functions in the graph. + If OpHints is present, it will try to convert the OpHint graph. + + Args: + sess: TensorFlow Session. + input_tensors: List of input tensors. + output_tensors: List of output tensors (only .name is used from this). + + Returns: + Frozen GraphDef. + """ + # Runs a Grappler pass in order to inline any functions in the graph. + # Asides from inlining any simple function, Grappler will also try to lower + # while loop into switch merge representation which is undesired for Ophints, + # so we simply remove those attributes to prevent Grappler from doing so. + graph_def = _convert_to_constants.disable_lower_using_switch_merge( + sess.graph_def) + config = get_grappler_config(["function"]) + graph_def = run_graph_optimizations( + graph_def, input_tensors, output_tensors, config, graph=sess.graph) + + # If ophints are present, just convert them. + hinted_outputs_nodes = find_all_hinted_output_nodes(sess) + if hinted_outputs_nodes: + return _convert_op_hints_if_present(sess, graph_def, output_tensors, + hinted_outputs_nodes) + + if not is_frozen_graph(sess): + output_node_names = [tensor.name.split(":")[0] for tensor in output_tensors] + return _convert_to_constants.convert_variables_to_constants( + sess, graph_def, output_node_names + ) + else: + return sess.graph_def + + +def is_frozen_graph(sess): + """Determines if the graph is frozen. + + Determines if a graph has previously been frozen by checking for any + operations of type Variable*. If variables are found, the graph is not frozen. + + Args: + sess: TensorFlow Session. + + Returns: + Bool. + """ + for op in sess.graph.get_operations(): + if op.type.startswith("Variable") or op.type.endswith("VariableOp"): + return False + return True + + +def build_debug_info_func(original_graph): + """Returns a method to retrieve the `GraphDebugInfo` from the original graph. + + Args: + original_graph: The original `Graph` containing all the op stack traces. + + Returns: + A function which retrieves the stack traces from the original graph and + converts them to a `GraphDebugInfo` for a given set of nodes. + """ + + def f(original_nodes): + """Function to create `GraphDebugInfo` for the given `original_nodes`.""" + if not original_graph: + return None + # For the given nodes, gets all the op definitions in the original graph. + useful_ops = [] + for func, name in original_nodes: + try: + if not func: + useful_ops.append((func, original_graph.get_operation_by_name(name))) + else: + sub_func = original_graph._get_function(func) # pylint: disable=protected-access + if isinstance(sub_func, function.AtomicFunction): # pylint: disable=protected-access + useful_ops.append( + (func, sub_func.graph.get_operation_by_name(name))) + else: + sys.stderr.write( + "Use '@tf.function' or '@defun' to decorate the function.\n") + continue + except KeyError: + # New node created by graph optimizer. No stack trace from source code. + continue + # Convert all the op definitions to stack traces in terms of GraphDebugInfo. + return _error_interpolation.create_graph_debug_info_def(useful_ops) + + return f + + +def convert_debug_info_func(saved_debug_info): + """Returns a method to retrieve the `GraphDebugInfo` from the original graph. + + Args: + saved_debug_info: The `GraphDebugInfo` containing all the debug info. + + Returns: + A function which retrieves the stack traces from the original graph and + converts them to a `GraphDebugInfo` for a given set of nodes. + """ + + def f(original_nodes): + """Function to create `GraphDebugInfo` for the given `original_nodes`.""" + del original_nodes + return saved_debug_info + + return f + + +def get_debug_info(nodes_to_debug_info_func, converted_graph): + """Returns the debug info for the original nodes in the `converted_graph`. + + Args: + nodes_to_debug_info_func: The method to collect the op debug info for the + nodes. + converted_graph: A `GraphDef` after optimization and transformation. + + Returns: + `GraphDebugInfo` for all the original nodes in `converted_graph`. + """ + if not nodes_to_debug_info_func: + return None + + # Collect all the debug info nodes from the converted_graph + original_nodes = set() + for node in converted_graph.node: + debug_nodes = node.experimental_debug_info.original_node_names + debug_funcs = node.experimental_debug_info.original_func_names + # If the `original_node_names` are empty, uses the node name directly. + if not debug_nodes: + original_nodes.add(("", node.name)) + else: + for i in range(len(debug_nodes)): + debug_func = "" if i >= len(debug_funcs) else debug_funcs[i] + original_nodes.add((debug_func, debug_nodes[i])) + + # Convert the nodes to the debug info proto object. + return nodes_to_debug_info_func(original_nodes) + + +def convert_bytes_to_c_source(data, + array_name, + max_line_width=80, + include_guard=None, + include_path=None, + use_tensorflow_license=False): + """Returns strings representing a C constant array containing `data`. + + Args: + data: Byte array that will be converted into a C constant. + array_name: String to use as the variable name for the constant array. + max_line_width: The longest line length, for formatting purposes. + include_guard: Name to use for the include guard macro definition. + include_path: Optional path to include in the source file. + use_tensorflow_license: Whether to include the standard TensorFlow Apache2 + license in the generated files. + + Returns: + Text that can be compiled as a C source file to link in the data as a + literal array of values. + Text that can be used as a C header file to reference the literal array. + """ + + starting_pad = " " + array_lines = [] + array_line = starting_pad + for value in bytearray(data): + if (len(array_line) + 4) > max_line_width: + array_lines.append(array_line + "\n") + array_line = starting_pad + array_line += " 0x%02x," % (value,) + if len(array_line) > len(starting_pad): + array_lines.append(array_line + "\n") + array_values = "".join(array_lines) + + if include_guard is None: + include_guard = "TENSORFLOW_LITE_UTIL_" + array_name.upper() + "_DATA_H_" + + if include_path is not None: + include_line = "#include \"{include_path}\"\n".format( + include_path=include_path) + else: + include_line = "" + + if use_tensorflow_license: + license_text = """ +/* Copyright {year} The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +""".format(year=datetime.date.today().year) + else: + license_text = "" + + source_template = """{license_text} +// This is a TensorFlow Lite model file that has been converted into a C data +// array using the tensorflow.lite.util.convert_bytes_to_c_source() function. +// This form is useful for compiling into a binary for devices that don't have a +// file system. + +{include_line} +// We need to keep the data array aligned on some architectures. +#ifdef __has_attribute +#define HAVE_ATTRIBUTE(x) __has_attribute(x) +#else +#define HAVE_ATTRIBUTE(x) 0 +#endif +#if HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__)) +#define DATA_ALIGN_ATTRIBUTE __attribute__((aligned(4))) +#else +#define DATA_ALIGN_ATTRIBUTE +#endif + +const unsigned char {array_name}[] DATA_ALIGN_ATTRIBUTE = {{ +{array_values}}}; +const int {array_name}_len = {array_length}; +""" + + source_text = source_template.format( + array_name=array_name, + array_length=len(data), + array_values=array_values, + license_text=license_text, + include_line=include_line) + + header_template = """ +{license_text} + +// This is a TensorFlow Lite model file that has been converted into a C data +// array using the tensorflow.lite.util.convert_bytes_to_c_source() function. +// This form is useful for compiling into a binary for devices that don't have a +// file system. + +#ifndef {include_guard} +#define {include_guard} + +extern const unsigned char {array_name}[]; +extern const int {array_name}_len; + +#endif // {include_guard} +""" + + header_text = header_template.format( + array_name=array_name, + include_guard=include_guard, + license_text=license_text) + + return source_text, header_text + + +def _convert_model_from_bytearray_to_object(model_bytearray): + """Converts a tflite model from a bytearray into a parsable object.""" + model_object = schema_fb.Model.GetRootAsModel(model_bytearray, 0) + model_object = schema_fb.ModelT.InitFromObj(model_object) + model_object = copy.deepcopy(model_object) + return model_object + + +def _convert_model_from_object_to_bytearray(model_object): + """Converts a tflite model from a parsable object into a bytearray.""" + # Initial size of the buffer, which will grow automatically if needed + builder = flatbuffers.Builder(1024) + model_offset = model_object.Pack(builder) + builder.Finish(model_offset, file_identifier=_TFLITE_FILE_IDENTIFIER) + return bytes(builder.Output()) + + +def get_quantize_opcode_idx(model): + """Returns the quantize op idx.""" + quant_opcode_idxs = [] + for idx, opcode in enumerate(model.operatorCodes): + builtin_code = schema_util.get_builtin_code_from_operator_code(opcode) + if builtin_code == schema_fb.BuiltinOperator.QUANTIZE: + quant_opcode_idxs.append(idx) + return quant_opcode_idxs + + +def get_dequantize_opcode_idx(model): + """Returns the quantize op idx.""" + quant_opcode_idxs = [] + for idx, opcode in enumerate(model.operatorCodes): + builtin_code = schema_util.get_builtin_code_from_operator_code(opcode) + if builtin_code == schema_fb.BuiltinOperator.DEQUANTIZE: + quant_opcode_idxs.append(idx) + return quant_opcode_idxs + + +def _update_signature_def_tensors(tensor_maps, map_old_to_new_tensors): + """Update the tensors in the SignatureDef's TensorMaps.""" + for i in range(len(tensor_maps)): + if tensor_maps[i].tensorIndex in map_old_to_new_tensors: + tensor_maps[i].tensorIndex = ( + map_old_to_new_tensors[tensor_maps[i].tensorIndex]) + + +def _remove_tensors_from_model(model, remove_tensors_idxs): + """Remove tensors from model.""" + if not remove_tensors_idxs: + return + if len(model.subgraphs) > 1: + logging.info("Skipping the removal of dangled tensors since the model has " + "multiple subgraphs and tensors can be used in the different " + "subgraph(s)") + return + subgraph = model.subgraphs[0] + tensors = subgraph.tensors + operators = subgraph.operators + + logging.debug("Removing tensors at indices : %s", remove_tensors_idxs) + # An optimized check to validate if "remove_tensors_idxs" (eg: [4,5,6]) is an + # exact subset, with ordering, of "tensors" indices (eg: [0,1,2,3,4,5,6]). + if min(remove_tensors_idxs) == len(tensors) - len(remove_tensors_idxs): + logging.debug("Removing tensors only at the end of the tensor list") + del tensors[min(remove_tensors_idxs):] + else: + logging.debug("Removing tensors requires updating the model") + # Map the old tensor indices to new tensor indices + d_old_to_new_tensors = {} + left_shift_by = 0 + for idx in range(len(tensors)): + if idx in remove_tensors_idxs: + left_shift_by += 1 + else: + d_old_to_new_tensors[idx] = idx - left_shift_by + logging.debug("Old to new tensors map: %s", d_old_to_new_tensors.__str__()) + # Update tensor indices referenced throughout the model + def update_tensors(tensor_idxs): + for i, ti in enumerate(tensor_idxs): + tensor_idxs[i] = d_old_to_new_tensors.get(ti, -1) + update_tensors(subgraph.inputs) + update_tensors(subgraph.outputs) + for op in operators: + update_tensors(op.inputs) + update_tensors(op.outputs) + if model.signatureDefs: + signature_def = model.signatureDefs[0] + _update_signature_def_tensors(signature_def.inputs, d_old_to_new_tensors) + _update_signature_def_tensors(signature_def.outputs, d_old_to_new_tensors) + # Delete the tensors + for idx in sorted(remove_tensors_idxs, reverse=True): + tensors.pop(idx) + logging.debug("Removed tensors marked for deletion") + + +def _modify_model_input_type(model, inference_input_type=dtypes.float32): + """Modify model input type.""" + if inference_input_type == dtypes.float32: + return + + if not model.signatureDefs: + _modify_model_input_type_per_subgraph(model, 0, -1, inference_input_type) + return + + for signature_index, signature_def in enumerate(model.signatureDefs): + _modify_model_input_type_per_subgraph(model, signature_def.subgraphIndex, + signature_index, inference_input_type) + + +def _modify_model_input_type_per_subgraph(model, subgraph_index, + signature_index, + inference_input_type): + """Modify model input type per subgraph.""" + subgraph = model.subgraphs[subgraph_index] + tensors = subgraph.tensors + operators = subgraph.operators + + # Find all quantize operators + quant_opcode_idxs = get_quantize_opcode_idx(model) + if operators and not quant_opcode_idxs: + for input_idx in subgraph.inputs: + input_type = _convert_tflite_enum_type_to_tf_type(tensors[input_idx].type) + if input_type == dtypes.float32: + raise ValueError("Model input is not dequantized.") + # None of the inputs have float32, then they must be int16, int8, or bool + return + + # Validate that the model input is quantized + input_quant_ops = [] + for op in operators: + # Find operators that quantize model input + if op.opcodeIndex in quant_opcode_idxs and op.inputs[0] in subgraph.inputs: + float_tensor, quant_tensor = tensors[op.inputs[0]], tensors[op.outputs[0]] + # If found, validate that the operator's input type is float + float_type = _convert_tflite_enum_type_to_tf_type(float_tensor.type) + if float_type != dtypes.float32: + if float_type == inference_input_type: + continue + else: + raise ValueError( + "Initial model input type must be tf.float32. Expected type for " + "tensor with name '{}' is tf.float32, instead type is {}".format( + float_tensor.name, get_tf_type_name(float_type))) + # If found, validate that the operator output is quantized and compatible + # with the final model input type + quant_type = _convert_tflite_enum_type_to_tf_type(quant_tensor.type) + if quant_type not in _MAP_QUANT_TO_IO_TYPES: + raise ValueError( + "Initial model input is not quantized. Expected type for " + "tensor with name '{}' should be in {}, instead type is {}".format( + quant_tensor.name, + tuple(get_tf_type_name(t) for t in + _MAP_QUANT_TO_IO_TYPES.keys()), + get_tf_type_name(quant_type))) + else: + inference_io_types = _MAP_QUANT_TO_IO_TYPES[quant_type] + if inference_input_type not in inference_io_types: + raise ValueError( + "Unsupported `inference_input_type` value. Expected to be in " + "{}, instead got {}.".format( + tuple(get_tf_type_name(t) for t in inference_io_types), + get_tf_type_name(inference_input_type))) + input_quant_ops.append(op) + + if len(subgraph.inputs) != len(input_quant_ops): + logging.warning( + "For model inputs containing unsupported operations which cannot be " + "quantized, the `inference_input_type` attribute will default to the " + "original type." + ) + + # Modify model input type + if inference_input_type == dtypes.uint8: + # Change quant op (float to int8) to quant op (uint8 to int8) + for op in input_quant_ops: + int8_quantization = tensors[op.outputs[0]].quantization + uint8_quantization = schema_fb.QuantizationParametersT() + uint8_quantization.scale = [int8_quantization.scale[0]] + uint8_quantization.zeroPoint = [int8_quantization.zeroPoint[0] + 128] + tensors[op.inputs[0]].quantization = uint8_quantization + tensors[op.inputs[0]].type = schema_fb.TensorType.UINT8 + elif inference_input_type in _MAP_QUANT_TO_IO_TYPES: + # Remove the inputs and the quant operator + remove_tensors_idxs = set() + for op in input_quant_ops: + subgraph.inputs[subgraph.inputs == op.inputs[0]] = op.outputs[0] + if signature_index >= 0: + signature_def = model.signatureDefs[signature_index] + for i in range(len(signature_def.inputs)): + if signature_def.inputs[i].tensorIndex == op.inputs[0]: + signature_def.inputs[i].tensorIndex = op.outputs[0] + remove_tensors_idxs.add(op.inputs[0]) + operators.remove(op) + # Remove tensors marked for deletion. + _remove_tensors_from_model(model, remove_tensors_idxs) + else: + raise ValueError( + "Unsupported `inference_input_type` value {}.".format( + get_tf_type_name(inference_input_type))) + + +def _modify_model_output_type(model, inference_output_type=dtypes.float32): + """Modify model output type.""" + if inference_output_type == dtypes.float32: + return + + if not model.signatureDefs: + _modify_model_output_type_per_subgraph(model, 0, -1, inference_output_type) + return + + for signature_index, signature_def in enumerate(model.signatureDefs): + _modify_model_output_type_per_subgraph(model, signature_def.subgraphIndex, + signature_index, + inference_output_type) + + +def _modify_model_output_type_per_subgraph(model, subgraph_index, + signature_index, + inference_output_type): + """Modify model output type per subgraph.""" + subgraph = model.subgraphs[subgraph_index] + tensors = subgraph.tensors + operators = subgraph.operators + + # Find all dequantize operators + dequant_opcode_idxs = get_dequantize_opcode_idx(model) + if operators and not dequant_opcode_idxs: + for output in subgraph.outputs: + output_type = _convert_tflite_enum_type_to_tf_type(tensors[output].type) + if output_type == dtypes.float32: + raise ValueError("Model output is not dequantized.") + # None of the outputs have float32, then they must be int16, int8, or bool + return + + # Validate that the model output is dequantized + output_dequant_ops = [] + for op in operators: + # Find operators that dequantize model output + if (op.opcodeIndex in dequant_opcode_idxs and + op.outputs[0] in subgraph.outputs): + # If found, validate that the operator's output type is float + quant_tensor, float_tensor = tensors[op.inputs[0]], tensors[op.outputs[0]] + float_type = _convert_tflite_enum_type_to_tf_type(float_tensor.type) + if float_type != dtypes.float32: + if float_type == inference_output_type: + continue + else: + raise ValueError( + "Initial model output type must be tf.float32. Expected type for " + "tensor with name '{}' is tf.float32, instead type is {}".format( + float_tensor.name, get_tf_type_name(float_type))) + # If found, validate that the operator input is quantized and compatible + # with the final model output type + quant_type = _convert_tflite_enum_type_to_tf_type(quant_tensor.type) + if quant_type not in _MAP_QUANT_TO_IO_TYPES: + raise ValueError( + "Initial model output is not dequantized. Expected type for " + "tensor with name '{}' should be in {}, instead type is {}".format( + quant_tensor.name, + tuple(get_tf_type_name(t) for t in + _MAP_QUANT_TO_IO_TYPES.keys()), + get_tf_type_name(quant_type))) + else: + inference_io_types = _MAP_QUANT_TO_IO_TYPES[quant_type] + if inference_output_type not in inference_io_types: + raise ValueError( + "Unsupported `inference_output_type` value. Expected to be in " + "{}, instead got {}.".format( + tuple(get_tf_type_name(t) for t in inference_io_types), + get_tf_type_name(inference_output_type))) + output_dequant_ops.append(op) + + if len(subgraph.outputs) != len(output_dequant_ops): + logging.warning( + "For model outputs containing unsupported operations which cannot be " + "quantized, the `inference_output_type` attribute will default to the " + "original type." + ) + + # Modify model output type + if inference_output_type == dtypes.uint8: + # Find a quantize operator + quant_opcode_idx = -1 + for idx, opcode in enumerate(model.operatorCodes): + builtin_code = schema_util.get_builtin_code_from_operator_code(opcode) + if builtin_code == schema_fb.BuiltinOperator.QUANTIZE: + quant_opcode_idx = idx + break + # Create a quantize operator, if none exist + if quant_opcode_idx == -1: + quant_op = schema_fb.OperatorCodeT() + quant_op.builtinCode = schema_fb.BuiltinOperator.QUANTIZE + quant_op.deprecatedBuiltinCode = schema_fb.BuiltinOperator.QUANTIZE + model.operatorCodes.append(quant_op) + quant_opcode_idx = len(model.operatorCodes) - 1 + # Change dequant op (int8 to float) to quant op (int8 to uint8) + for op in output_dequant_ops: + op.opcodeIndex = quant_opcode_idx + int8_quantization = tensors[op.inputs[0]].quantization + uint8_quantization = schema_fb.QuantizationParametersT() + uint8_quantization.scale = [int8_quantization.scale[0]] + uint8_quantization.zeroPoint = [int8_quantization.zeroPoint[0] + 128] + tensors[op.outputs[0]].quantization = uint8_quantization + tensors[op.outputs[0]].type = schema_fb.TensorType.UINT8 + elif inference_output_type in _MAP_QUANT_TO_IO_TYPES: + # Remove the outputs and the dequant operator + remove_tensors_idxs = set() + for op in output_dequant_ops: + subgraph.outputs[subgraph.outputs == op.outputs[0]] = op.inputs[0] + if signature_index >= 0: + signature_def = model.signatureDefs[signature_index] + for i in range(len(signature_def.outputs)): + if signature_def.outputs[i].tensorIndex == op.outputs[0]: + signature_def.outputs[i].tensorIndex = op.inputs[0] + remove_tensors_idxs.add(op.outputs[0]) + operators.remove(op) + # Remove tensors marked for deletion. + _remove_tensors_from_model(model, remove_tensors_idxs) + else: + raise ValueError( + "Unsupported `inference_output_type` value {}.".format( + get_tf_type_name(inference_output_type))) + + +def _remove_redundant_quantize_ops(model): + """Finds back to back quantize ops and remove the first quantize op.""" + if not model.signatureDefs: + _remove_redundant_quantize_ops_per_subgraph(model, 0, -1) + return + + for signature_index, signature_def in enumerate(model.signatureDefs): + _remove_redundant_quantize_ops_per_subgraph(model, + signature_def.subgraphIndex, + signature_index) + + +def _remove_redundant_quantize_ops_per_subgraph(model, subgraph_index, + signature_index): + """Remove redundant quantize ops per subgraph.""" + subgraph = model.subgraphs[subgraph_index] + tensors = subgraph.tensors + operators = subgraph.operators + + # Find all quantize operators. + quant_opcode_idxs = get_quantize_opcode_idx(model) + dequant_opcode_idxs = get_dequantize_opcode_idx(model) + + # Find all redundant quant tensors. + all_quant_ops = [] + redundant_quant_tensors = {} + output_dequant_tensors = {} + for op in operators: + if op.opcodeIndex in quant_opcode_idxs: + all_quant_ops.append(op) + input_tensor = tensors[op.inputs[0]] + output_tensor = tensors[op.outputs[0]] + input_type = _convert_tflite_enum_type_to_tf_type(input_tensor.type) + output_type = _convert_tflite_enum_type_to_tf_type(output_tensor.type) + # This is a requantize op, so write down its input tensor index. + if input_type != dtypes.float32 and output_type != dtypes.float32: + redundant_quant_tensors[op.inputs[0]] = op + if (op.opcodeIndex in dequant_opcode_idxs and + op.outputs[0] in subgraph.outputs): + output_dequant_tensors[op.inputs[0]] = op + + # Remove all the quant ops which produce the redundant quant tensors. + for op in all_quant_ops: + output_tensor_idx = op.outputs[0] + if output_tensor_idx in redundant_quant_tensors: + requantize_op = redundant_quant_tensors[output_tensor_idx] + if model.signatureDefs: + signature_def = model.signatureDefs[0] + for output in signature_def.outputs: + if output.tensorIndex == op.outputs[0]: + output.tensorIndex = op.inputs[0] + deleted_tensor = requantize_op.inputs[0] + # Reset the input of the requantize op to the float input + requantize_op.inputs[0] = op.inputs[0] + # Migrate other operator users to output tensor of requantize op + for op_user in operators: + if deleted_tensor in op_user.inputs and op_user != requantize_op: + for idx, input_tensor in enumerate(op_user.inputs): + if input_tensor == deleted_tensor: + op_user.inputs[idx] = requantize_op.outputs[0] + operators.remove(op) + + # Remove all the quant ops which connect to the output dequant op. + for op in all_quant_ops: + output_tensor_idx = op.outputs[0] + if output_tensor_idx in output_dequant_tensors: + dequant_op = output_dequant_tensors[output_tensor_idx] + subgraph.outputs[subgraph.outputs == dequant_op.outputs[0]] = op.inputs[0] + if signature_index >= 0: + signature_def = model.signatureDefs[signature_index] + for output in signature_def.outputs: + if output.tensorIndex == dequant_op.outputs[0]: + output.tensorIndex = op.inputs[0] + operators.remove(op) + operators.remove(dequant_op) + + +def modify_model_io_type( + model, inference_input_type=dtypes.float32, + inference_output_type=dtypes.float32): + """Modify the input/output type of a tflite model. + + Args: + model: A tflite model. + inference_input_type: tf.DType representing modified input type. + (default tf.float32. If model input is int8 quantized, it must be in + {tf.float32, tf.int8,tf.uint8}, else if model input is int16 quantized, + it must be in {tf.float32, tf.int16}, else it must be tf.float32) + inference_output_type: tf.DType representing modified output type. + (default tf.float32. If model output is int8 dequantized, it must be in + {tf.float32, tf.int8,tf.uint8}, else if model output is int16 dequantized, + it must be in {tf.float32, tf.int16}, else it must be tf.float32) + Returns: + A tflite model with modified input/output type. + + Raises: + ValueError: If `inference_input_type`/`inference_output_type` is unsupported + or a supported integer type is specified for a model whose input/output is + not quantized/dequantized. + RuntimeError: If the modification was unsuccessful. + + """ + if (inference_input_type == dtypes.float32 and + inference_output_type == dtypes.float32): + return model + + model_object = _convert_model_from_bytearray_to_object(model) + + _modify_model_input_type(model_object, inference_input_type) + + _modify_model_output_type(model_object, inference_output_type) + + _remove_redundant_quantize_ops(model_object) + + return _convert_model_from_object_to_bytearray(model_object) + + +def get_sparsity_modes(model_object): + """Get sparsity modes used in a tflite model. + + The sparsity modes are listed in conversion_metadata.fbs file. + + Args: + model_object: A tflite model in object form. + + Returns: + The list of sparsity modes used in the model. + """ + if not model_object or not model_object.metadata: + return [] + + result = set() + for subgraph in model_object.subgraphs: + for tensor in subgraph.tensors: + if not tensor.sparsity: + continue + + # Block map is the list if indexes where the block size is larger than 1. + # So empty block map means it is random sparsity. + if not tensor.sparsity.blockMap: + result.add( + conversion_metadata_fb.ModelOptimizationMode.RANDOM_SPARSITY) + else: + result.add( + conversion_metadata_fb.ModelOptimizationMode.BLOCK_SPARSITY) + + return list(result) + + +def get_model_hash(model): + """Calculate a 64-bit integer hash for a TensorFlow Lite model based on its structure. + + Args: + model: A TensorFlow Lite model object. + + Returns: + int: A 64-bit integer hash value representing the model structure. + """ + # TODO(b/344872922): Move the hashing implementation to C++ layer since not + # all calls to the converter come via the Python API. + hash_value = 0 + + for subgraph in model.subgraphs: + if subgraph.operators is not None: + hash_value = update_hash_with_primitive_value( + hash_value, len(subgraph.operators) + ) + + for operator in subgraph.operators: + if operator.inputs is not None: + hash_value = update_hash_with_array(hash_value, operator.inputs) + + if operator.outputs is not None: + hash_value = update_hash_with_array(hash_value, operator.outputs) + + if subgraph.tensors is not None: + hash_value = update_hash_with_primitive_value( + hash_value, len(subgraph.tensors) + ) + + for tensor in subgraph.tensors: + if tensor.buffer is not None: + buffer = model.buffers[tensor.buffer] + if buffer.data is not None: + hash_value = update_hash_with_primitive_value( + hash_value, len(buffer.data) + ) + + if tensor.shape is not None: + hash_value = update_hash_with_array(hash_value, tensor.shape) + + if subgraph.inputs is not None: + hash_value = update_hash_with_primitive_value( + hash_value, len(subgraph.inputs) + ) + + if subgraph.outputs is not None: + hash_value = update_hash_with_primitive_value( + hash_value, len(subgraph.outputs) + ) + + return hash_value + + +def update_hash_with_primitive_value(hash_value, value): + """Update the hash value using a primitive value. + + Args: + hash_value (uint64): The current hash value. + value: The primitive value to incorporate into the hash. + + Returns: + int: The updated hash value. + """ + hash_const = np.uint64(0x9E3779B97F4A7800) + hash_value = np.uint64(hash_value) + value = np.uint64(value) + + # Convert to arrays before shifting. + hash_value = np.array([hash_value]) + value = np.array([value]) + + # Shift the values, then take the value from the first index. + hash_value = np.bitwise_xor( + hash_value, + ( + value + + hash_const + + np.left_shift(hash_value, 10) + + np.right_shift(hash_value, 4) + ), + )[0] + + return hash_value + + +def update_hash_with_array(hash_value, int_array): + """Update the hash value using a TFLite int array. + + Args: + hash_value (int): The current hash value. + int_array: A TFLite int array to incorporate into the hash. + + Returns: + int: The updated hash value. + """ + if int_array is not None: + for i in int_array: + hash_value = update_hash_with_primitive_value(hash_value, i) + return hash_value + + +def populate_conversion_metadata(model_object, metadata): + """Add or update conversion metadata to a tflite model. + + Args: + model_object: A tflite model in object form. + metadata: The conversion metadata. + + Returns: + A tflite model object with embedded conversion metadata. + """ + try: + metadata_builder = flatbuffers.Builder(0) + metadata_builder.Finish(metadata.Pack(metadata_builder)) + buffer_field = schema_fb.BufferT() + buffer_field.data = metadata_builder.Output() + + if not model_object.metadata: + model_object.metadata = [] + else: + # Check if metadata has already been populated. + for meta in model_object.metadata: + if meta.name.decode("utf-8") == CONVERSION_METADATA_FIELD_NAME: + model_object.buffers[meta.buffer] = buffer_field + return model_object + + if not model_object.buffers: + model_object.buffers = [] + model_object.buffers.append(buffer_field) + # Creates a new metadata field. + metadata_field = schema_fb.MetadataT() + metadata_field.name = CONVERSION_METADATA_FIELD_NAME + metadata_field.buffer = len(model_object.buffers) - 1 + model_object.metadata.append(metadata_field) + + return model_object + except Exception: # pylint: disable=broad-except + return model_object + + +def get_conversion_metadata(model_buffer): + """Read conversion metadata from a tflite model. + + Args: + model_buffer: A tflite model. + + Returns: + The conversion metadata or None if it is not populated. + """ + model_object = flatbuffer_utils.convert_bytearray_to_object(model_buffer) + if not model_object or not model_object.metadata: + return None + + for meta in model_object.metadata: + if meta.name.decode("utf-8") == CONVERSION_METADATA_FIELD_NAME: + metadata_buf = model_object.buffers[meta.buffer].data.tobytes() + return conversion_metadata_fb.ConversionMetadataT.InitFromObj( + conversion_metadata_fb.ConversionMetadata.GetRootAsConversionMetadata( + metadata_buf, 0 + ) + ) + + return None diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..683f451db1a10d6b3301f7e2cbac7af3c2bb83cd Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/__pycache__/flatbuffer_utils.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/__pycache__/flatbuffer_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca3d420a8499ec614678b732fdfde8c39016f708 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/__pycache__/flatbuffer_utils.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/__pycache__/visualize.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/__pycache__/visualize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a33c0c8844d2111b2d4e63564c2770a39fd6d622 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/__pycache__/visualize.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/flatbuffer_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/flatbuffer_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ad90f04b01d892c7ae825a3620dbd0dd185ba884 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/flatbuffer_utils.py @@ -0,0 +1,455 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility functions for FlatBuffers. + +All functions that are commonly used to work with FlatBuffers. + +Refer to the tensorflow lite flatbuffer schema here: +tensorflow/lite/schema/schema.fbs +""" + +import copy +import random +import re +import struct +import sys + +import flatbuffers + +from tensorflow.lite.python import schema_py_generated as schema_fb +from tensorflow.lite.python import schema_util +from tensorflow.python.platform import gfile + +_TFLITE_FILE_IDENTIFIER = b'TFL3' + + +def convert_bytearray_to_object(model_bytearray): + """Converts a tflite model from a bytearray to an object for parsing.""" + model_object = schema_fb.Model.GetRootAsModel(model_bytearray, 0) + return schema_fb.ModelT.InitFromObj(model_object) + + +def read_model(input_tflite_file): + """Reads a tflite model as a python object. + + Args: + input_tflite_file: Full path name to the input tflite file + + Raises: + RuntimeError: If input_tflite_file path is invalid. + IOError: If input_tflite_file cannot be opened. + + Returns: + A python object corresponding to the input tflite file. + """ + if not gfile.Exists(input_tflite_file): + raise RuntimeError('Input file not found at %r\n' % input_tflite_file) + with gfile.GFile(input_tflite_file, 'rb') as input_file_handle: + model_bytearray = bytearray(input_file_handle.read()) + return read_model_from_bytearray(model_bytearray) + + +def read_model_from_bytearray(model_bytearray): + """Reads a tflite model as a python object. + + Args: + model_bytearray: TFLite model in bytearray format. + + Returns: + A python object corresponding to the input tflite file. + """ + model = convert_bytearray_to_object(model_bytearray) + if sys.byteorder == 'big': + byte_swap_tflite_model_obj(model, 'little', 'big') + + # Offset handling for models > 2GB + for buffer in model.buffers: + if buffer.offset: + buffer.data = model_bytearray[buffer.offset : buffer.offset + buffer.size] + buffer.offset = 0 + buffer.size = 0 + for subgraph in model.subgraphs: + for op in subgraph.operators: + if op.largeCustomOptionsOffset: + op.customOptions = model_bytearray[ + op.largeCustomOptionsOffset : op.largeCustomOptionsOffset + + op.largeCustomOptionsSize + ] + op.largeCustomOptionsOffset = 0 + op.largeCustomOptionsSize = 0 + + return model + + +def read_model_with_mutable_tensors(input_tflite_file): + """Reads a tflite model as a python object with mutable tensors. + + Similar to read_model() with the addition that the returned object has + mutable tensors (read_model() returns an object with immutable tensors). + + NOTE: This API only works for TFLite generated with + _experimental_use_buffer_offset=false + + Args: + input_tflite_file: Full path name to the input tflite file + + Raises: + RuntimeError: If input_tflite_file path is invalid. + IOError: If input_tflite_file cannot be opened. + + Returns: + A mutable python object corresponding to the input tflite file. + """ + return copy.deepcopy(read_model(input_tflite_file)) + + +def convert_object_to_bytearray(model_object, extra_buffer=b''): + """Converts a tflite model from an object to a immutable bytearray.""" + # Initial size of the buffer, which will grow automatically if needed + builder = flatbuffers.Builder(1024) + model_offset = model_object.Pack(builder) + builder.Finish(model_offset, file_identifier=_TFLITE_FILE_IDENTIFIER) + model_bytearray = bytes(builder.Output()) + model_bytearray = model_bytearray + extra_buffer + return model_bytearray + + +def write_model(model_object, output_tflite_file): + """Writes the tflite model, a python object, into the output file. + + NOTE: This API only works for TFLite generated with + _experimental_use_buffer_offset=false + + Args: + model_object: A tflite model as a python object + output_tflite_file: Full path name to the output tflite file. + + Raises: + IOError: If output_tflite_file path is invalid or cannot be opened. + """ + if sys.byteorder == 'big': + model_object = copy.deepcopy(model_object) + byte_swap_tflite_model_obj(model_object, 'big', 'little') + model_bytearray = convert_object_to_bytearray(model_object) + with gfile.GFile(output_tflite_file, 'wb') as output_file_handle: + output_file_handle.write(model_bytearray) + + +def strip_strings(model): + """Strips all nonessential strings from the model to reduce model size. + + We remove the following strings: + (find strings by searching ":string" in the tensorflow lite flatbuffer schema) + 1. Model description + 2. SubGraph name + 3. Tensor names + We retain OperatorCode custom_code and Metadata name. + + Args: + model: The model from which to remove nonessential strings. + """ + + model.description = None + for subgraph in model.subgraphs: + subgraph.name = None + for tensor in subgraph.tensors: + tensor.name = None + # We clear all signature_def structure, since without names it is useless. + model.signatureDefs = None + + +def type_to_name(tensor_type): + """Converts a numerical enum to a readable tensor type.""" + for name, value in schema_fb.TensorType.__dict__.items(): + if value == tensor_type: + return name + return None + + +def randomize_weights(model, random_seed=0, buffers_to_skip=None): + """Randomize weights in a model. + + Args: + model: The model in which to randomize weights. + random_seed: The input to the random number generator (default value is 0). + buffers_to_skip: The list of buffer indices to skip. The weights in these + buffers are left unmodified. + """ + + # The input to the random seed generator. The default value is 0. + random.seed(random_seed) + + # Parse model buffers which store the model weights + buffers = model.buffers + buffer_ids = range(1, len(buffers)) # ignore index 0 as it's always None + if buffers_to_skip is not None: + buffer_ids = [idx for idx in buffer_ids if idx not in buffers_to_skip] + + buffer_types = {} + for graph in model.subgraphs: + for op in graph.operators: + if op.inputs is None: + break + for input_idx in op.inputs: + tensor = graph.tensors[input_idx] + buffer_types[tensor.buffer] = type_to_name(tensor.type) + + for i in buffer_ids: + buffer_i_data = buffers[i].data + buffer_i_size = 0 if buffer_i_data is None else buffer_i_data.size + if buffer_i_size == 0: + continue + + # Raw data buffers are of type ubyte (or uint8) whose values lie in the + # range [0, 255]. Those ubytes (or unint8s) are the underlying + # representation of each datatype. For example, a bias tensor of type + # int32 appears as a buffer 4 times it's length of type ubyte (or uint8). + # For floats, we need to generate a valid float and then pack it into + # the raw bytes in place. + buffer_type = buffer_types.get(i, 'INT8') + if buffer_type.startswith('FLOAT'): + format_code = 'e' if buffer_type == 'FLOAT16' else 'f' + for offset in range(0, buffer_i_size, struct.calcsize(format_code)): + value = random.uniform(-0.5, 0.5) # See http://b/152324470#comment2 + struct.pack_into(format_code, buffer_i_data, offset, value) + else: + for j in range(buffer_i_size): + buffer_i_data[j] = random.randint(0, 255) + + +def rename_custom_ops(model, map_custom_op_renames): + """Rename custom ops so they use the same naming style as builtin ops. + + Args: + model: The input tflite model. + map_custom_op_renames: A mapping from old to new custom op names. + """ + for op_code in model.operatorCodes: + if op_code.customCode: + op_code_str = op_code.customCode.decode('ascii') + if op_code_str in map_custom_op_renames: + op_code.customCode = map_custom_op_renames[op_code_str].encode('ascii') + + +def opcode_to_name(model, op_code): + """Converts a TFLite op_code to the human readable name. + + Args: + model: The input tflite model. + op_code: The op_code to resolve to a readable name. + + Returns: + A string containing the human readable op name, or None if not resolvable. + """ + op = model.operatorCodes[op_code] + code = max(op.builtinCode, op.deprecatedBuiltinCode) + for name, value in vars(schema_fb.BuiltinOperator).items(): + if value == code: + return name + return None + + +def xxd_output_to_bytes(input_cc_file): + """Converts xxd output C++ source file to bytes (immutable). + + Args: + input_cc_file: Full path name to th C++ source file dumped by xxd + + Raises: + RuntimeError: If input_cc_file path is invalid. + IOError: If input_cc_file cannot be opened. + + Returns: + A bytearray corresponding to the input cc file array. + """ + # Match hex values in the string with comma as separator + pattern = re.compile(r'\W*(0x[0-9a-fA-F,x ]+).*') + + model_bytearray = bytearray() + + with open(input_cc_file) as file_handle: + for line in file_handle: + values_match = pattern.match(line) + + if values_match is None: + continue + + # Match in the parentheses (hex array only) + list_text = values_match.group(1) + + # Extract hex values (text) from the line + # e.g. 0x1c, 0x00, 0x00, 0x00, 0x54, 0x46, 0x4c, + values_text = filter(None, list_text.split(',')) + + # Convert to hex + values = [int(x, base=16) for x in values_text] + model_bytearray.extend(values) + + return bytes(model_bytearray) + + +def xxd_output_to_object(input_cc_file): + """Converts xxd output C++ source file to object. + + Args: + input_cc_file: Full path name to th C++ source file dumped by xxd + + Raises: + RuntimeError: If input_cc_file path is invalid. + IOError: If input_cc_file cannot be opened. + + Returns: + A python object corresponding to the input tflite file. + """ + model_bytes = xxd_output_to_bytes(input_cc_file) + return convert_bytearray_to_object(model_bytes) + + +def byte_swap_buffer_content(buffer, chunksize, from_endiness, to_endiness): + """Helper function for byte-swapping the buffers field.""" + to_swap = [ + buffer.data[i : i + chunksize] + for i in range(0, len(buffer.data), chunksize) + ] + buffer.data = b''.join([ + int.from_bytes(byteswap, from_endiness).to_bytes(chunksize, to_endiness) + for byteswap in to_swap + ]) + + +def byte_swap_string_content(buffer, from_endiness, to_endiness): + """Helper function for byte-swapping the string buffer. + + Args: + buffer: TFLite string buffer of from_endiness format. + from_endiness: The original endianness format of the string buffer. + to_endiness: The destined endianness format of the string buffer. + """ + num_of_strings = int.from_bytes(buffer.data[0:4], from_endiness) + string_content = bytearray(buffer.data[4 * (num_of_strings + 2) :]) + prefix_data = b''.join([ + int.from_bytes(buffer.data[i : i + 4], from_endiness).to_bytes( + 4, to_endiness + ) + for i in range(0, (num_of_strings + 1) * 4 + 1, 4) + ]) + buffer.data = prefix_data + string_content + + +def byte_swap_tflite_model_obj(model, from_endiness, to_endiness): + """Byte swaps the buffers field in a TFLite model. + + Args: + model: TFLite model object of from_endiness format. + from_endiness: The original endianness format of the buffers in model. + to_endiness: The destined endianness format of the buffers in model. + """ + if model is None: + return + # Get all the constant buffers, byte swapping them as per their data types + buffer_swapped = [] + types_of_16_bits = [ + schema_fb.TensorType.FLOAT16, + schema_fb.TensorType.INT16, + schema_fb.TensorType.UINT16, + ] + types_of_32_bits = [ + schema_fb.TensorType.FLOAT32, + schema_fb.TensorType.INT32, + schema_fb.TensorType.COMPLEX64, + schema_fb.TensorType.UINT32, + ] + types_of_64_bits = [ + schema_fb.TensorType.INT64, + schema_fb.TensorType.FLOAT64, + schema_fb.TensorType.COMPLEX128, + schema_fb.TensorType.UINT64, + ] + for subgraph in model.subgraphs: + for tensor in subgraph.tensors: + if ( + tensor.buffer > 0 + and tensor.buffer < len(model.buffers) + and tensor.buffer not in buffer_swapped + and model.buffers[tensor.buffer].data is not None + ): + if tensor.type == schema_fb.TensorType.STRING: + byte_swap_string_content( + model.buffers[tensor.buffer], from_endiness, to_endiness + ) + elif tensor.type in types_of_16_bits: + byte_swap_buffer_content( + model.buffers[tensor.buffer], 2, from_endiness, to_endiness + ) + elif tensor.type in types_of_32_bits: + byte_swap_buffer_content( + model.buffers[tensor.buffer], 4, from_endiness, to_endiness + ) + elif tensor.type in types_of_64_bits: + byte_swap_buffer_content( + model.buffers[tensor.buffer], 8, from_endiness, to_endiness + ) + else: + continue + buffer_swapped.append(tensor.buffer) + + +def byte_swap_tflite_buffer(tflite_model, from_endiness, to_endiness): + """Generates a new model byte array after byte swapping its buffers field. + + Args: + tflite_model: TFLite flatbuffer in a byte array. + from_endiness: The original endianness format of the buffers in + tflite_model. + to_endiness: The destined endianness format of the buffers in tflite_model. + + Returns: + TFLite flatbuffer in a byte array, after being byte swapped to to_endiness + format. + """ + if tflite_model is None: + return None + # Load TFLite Flatbuffer byte array into an object. + model = convert_bytearray_to_object(tflite_model) + + # Byte swapping the constant buffers as per their data types + byte_swap_tflite_model_obj(model, from_endiness, to_endiness) + + # Return a TFLite flatbuffer as a byte array. + return convert_object_to_bytearray(model) + + +def count_resource_variables(model): + """Calculates the number of unique resource variables in a model. + + Args: + model: the input tflite model, either as bytearray or object. + + Returns: + An integer number representing the number of unique resource variables. + """ + if not isinstance(model, schema_fb.ModelT): + model = convert_bytearray_to_object(model) + unique_shared_names = set() + for subgraph in model.subgraphs: + if subgraph.operators is None: + continue + for op in subgraph.operators: + builtin_code = schema_util.get_builtin_code_from_operator_code( + model.operatorCodes[op.opcodeIndex] + ) + if builtin_code == schema_fb.BuiltinOperator.VAR_HANDLE: + unique_shared_names.add(op.builtinOptions.sharedName) + return len(unique_shared_names) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bde304a40502d1202f02015c32be5ddb4ad926e5 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b8f175860085f24009e93000fa4fc8b6f568fcd Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/python/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/python/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/python/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e8d0543d2599ce972fe0f827d3a3fd36f8d6a4c Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/python/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/python/__pycache__/debugger.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/python/__pycache__/debugger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1dab442f74c787a74fed216cee6a81aed3719ca0 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/python/__pycache__/debugger.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/python/debugger.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/python/debugger.py new file mode 100644 index 0000000000000000000000000000000000000000..c748d6ef62b706be01c704f36786428b4f675bfe --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/optimize/debugging/python/debugger.py @@ -0,0 +1,549 @@ +# Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Python TF-Lite QuantizationDebugger.""" +import collections +import csv +import re +from typing import (Any, Callable, Dict, IO, Iterable, List, Mapping, Optional, + Sequence, Tuple) + +import numpy as np + +from tensorflow.lite.python import convert +from tensorflow.lite.python import interpreter as _interpreter +from tensorflow.lite.python.metrics import metrics as metrics_stub # type: ignore +from tensorflow.python.util import tf_export + + +# TODO(b/198099651): move converter implementation out of lite.py +TFLiteConverter = Any # importing tf.lite creates circular dependency + +# Returns metrics based on difference of values for quantized/float ops. +_DEFAULT_LAYER_DEBUG_METRICS = { + 'num_elements': lambda diffs: diffs.size, + 'stddev': np.std, + 'mean_error': np.average, + 'max_abs_error': lambda diffs: np.max(np.abs(diffs)), + 'mean_squared_error': lambda diffs: np.average(diffs**2), +} + +_NUMERIC_VERIFY_OP_NAME = 'NumericVerify' + + +def _get_quant_params( + tensor_detail: Mapping[str, Any]) -> Optional[Tuple[float, int]]: + """Returns first scale and zero point from tensor detail, if present.""" + quant_params = tensor_detail['quantization_parameters'] + if not quant_params: + return None + if quant_params['scales'] and quant_params['zero_points']: + return (quant_params['scales'][0], quant_params['zero_points'][0]) + return None + + +@tf_export.tf_export('lite.experimental.QuantizationDebugOptions') +class QuantizationDebugOptions: + """Debug options to set up a given QuantizationDebugger.""" + + def __init__(self, + layer_debug_metrics: Optional[Mapping[str, + Callable[[np.ndarray], + float]]] = None, + model_debug_metrics: Optional[Mapping[ + str, Callable[[Sequence[np.ndarray], Sequence[np.ndarray]], + float]]] = None, + layer_direct_compare_metrics: Optional[Mapping[str, Callable[ + [Sequence[np.ndarray], Sequence[np.ndarray], float, int], + float]]] = None, + denylisted_ops: Optional[List[str]] = None, + denylisted_nodes: Optional[List[str]] = None, + fully_quantize: bool = False) -> None: + """Initializes debugger options. + + Args: + layer_debug_metrics: a dict to specify layer debug functions + {function_name_str: function} where the function accepts result of + NumericVerify Op, which is value difference between float and + dequantized op results. The function returns single scalar value. + model_debug_metrics: a dict to specify model debug functions + {function_name_str: function} where the function accepts outputs from + two models, and returns single scalar value for a metric. (e.g. + accuracy, IoU) + layer_direct_compare_metrics: a dict to specify layer debug functions + {function_name_str: function}. The signature is different from that of + `layer_debug_metrics`, and this one gets passed (original float value, + original quantized value, scale, zero point). The function's + implementation is responsible for correctly dequantize the quantized + value to compare. Use this one when comparing diff is not enough. + (Note) quantized value is passed as int8, so cast to int32 is needed. + denylisted_ops: a list of op names which is expected to be removed from + quantization. + denylisted_nodes: a list of op's output tensor names to be removed from + quantization. + fully_quantize: Bool indicating whether to fully quantize the model. + Besides model body, the input/output will be quantized as well. + Corresponding to mlir_quantize's fully_quantize parameter. + + Raises: + ValueError: when there are duplicate keys + """ + self.layer_debug_metrics = layer_debug_metrics + self.model_debug_metrics = model_debug_metrics + self.layer_direct_compare_metrics = layer_direct_compare_metrics + + keys = [] + for metrics in [ + layer_debug_metrics, model_debug_metrics, layer_direct_compare_metrics + ]: + if metrics is not None: + keys.extend(metrics.keys()) + if len(keys) != len(set(keys)): + raise ValueError('Provided metrics have duplicate keys.') + + self.denylisted_ops = denylisted_ops + self.denylisted_nodes = denylisted_nodes + self.fully_quantize = fully_quantize + + +@tf_export.tf_export('lite.experimental.QuantizationDebugger') +class QuantizationDebugger: + """Debugger for Quantized TensorFlow Lite debug mode models. + + This can run the TensorFlow Lite converted models equipped with debug ops and + collect debug information. This debugger calculates statistics from + user-defined post-processing functions as well as default ones. + """ + + def __init__(self, + quant_debug_model_path: Optional[str] = None, + quant_debug_model_content: Optional[bytes] = None, + float_model_path: Optional[str] = None, + float_model_content: Optional[bytes] = None, + debug_dataset: Optional[Callable[ + [], Iterable[Sequence[np.ndarray]]]] = None, + debug_options: Optional[QuantizationDebugOptions] = None, + converter: Optional[TFLiteConverter] = None) -> None: + """Runs the TFLite debugging model with given debug options. + + Args: + quant_debug_model_path: Path to the quantized debug TFLite model file. + quant_debug_model_content: Content of the quantized debug TFLite model. + float_model_path: Path to float TFLite model file. + float_model_content: Content of the float TFLite model. + debug_dataset: a factory function that returns dataset generator which is + used to generate input samples (list of np.ndarray) for the model. The + generated elements must have same types and shape as inputs to the + model. + debug_options: Debug options to debug the given model. + converter: Optional, use converter instead of quantized model. + + Raises: + ValueError: If the debugger was unable to be created. + + Attributes: + layer_statistics: results of error metrics for each NumericVerify op + results. in {layer_name: {metric_name: metric}} format. + model_statistics: results of error metrics for difference between float + and quantized models. in {metric_name: metric} format. + """ + self._data_gen = debug_dataset + self._debug_options = debug_options or QuantizationDebugOptions() + self.converter = None + self.calibrated_model = None + self.float_model = None + self._float_interpreter = None + if converter is not None: + if self._debug_options.model_debug_metrics: + old_optimizations = converter.optimizations + self.converter = self._set_converter_options_for_float(converter) + self.float_model = self.converter.convert() + converter.optimizations = old_optimizations + + self.converter = self._set_converter_options_for_calibration(converter) + self.calibrated_model = self.converter.convert() + # Converter should be already set up with all options + self._init_from_converter( + self._debug_options, + self.converter, + self.calibrated_model, + float_model=self.float_model) + else: + self._quant_interpreter = _interpreter.Interpreter( + quant_debug_model_path, + quant_debug_model_content, + experimental_preserve_all_tensors=( + self._debug_options.layer_direct_compare_metrics is not None)) + if self._debug_options.model_debug_metrics: + self._float_interpreter = _interpreter.Interpreter( + float_model_path, float_model_content) + self._initialize_stats() + + @property + def options(self) -> QuantizationDebugOptions: + return self._debug_options + + @options.setter + def options(self, options: QuantizationDebugOptions) -> None: + self._debug_options = options + if not self.converter or not self.calibrated_model: + return + self._init_from_converter( + self._debug_options, + self.converter, + self.calibrated_model, + float_model=self.float_model) + self._initialize_stats() + + def _initialize_stats(self): + """Helper function initializes stats.""" + # TODO(b/177749613) : Fix the dependency on tf.lite._get_ops_details() + # Following code is needed to get op's name from the output tensor index, + # since NumericVerify op only provides its quantized input tensor index. + self._defining_op = dict() + for op_info in self._quant_interpreter._get_ops_details(): # pylint: disable=protected-access + self._defining_op.update( + {tensor_idx: op_info['index'] for tensor_idx in op_info['outputs']}) + + self._numeric_verify_tensor_details = None + self._numeric_verify_op_details = None + if not self._get_numeric_verify_tensor_details(): + raise ValueError('Please check if the quantized model is in debug mode') + + self._layer_debug_metrics = _DEFAULT_LAYER_DEBUG_METRICS.copy() + if self._debug_options.layer_debug_metrics: + self._layer_debug_metrics.update(self._debug_options.layer_debug_metrics) + + self.layer_statistics = None + self.model_statistics = None + + self._metrics = metrics_stub.TFLiteMetrics() + self._metrics.increase_counter_debugger_creation() + + def _get_quantized_model(self, is_debug: bool) -> bytes: + if not self.converter: + raise ValueError('No converter found, use this function with the ' + 'converter option in the constructor.') + + return convert.mlir_quantize( + self.calibrated_model, + disable_per_channel=self.converter._experimental_disable_per_channel, # pylint: disable=protected-access + fully_quantize=self._debug_options.fully_quantize, + enable_numeric_verify=is_debug, + denylisted_ops=self._debug_options.denylisted_ops, + denylisted_nodes=self._debug_options.denylisted_nodes) + + def get_nondebug_quantized_model(self) -> bytes: + """Returns a non-instrumented quantized model. + + Convert the quantized model with the initialized converter and + return bytes for nondebug model. The model will not be instrumented with + numeric verification operations. + + Returns: + Model bytes corresponding to the model. + Raises: + ValueError: if converter is not passed to the debugger. + """ + return self._get_quantized_model(is_debug=False) + + def get_debug_quantized_model(self) -> bytes: + """Returns an instrumented quantized model. + + Convert the quantized model with the initialized converter and + return bytes for model. The model will be instrumented with numeric + verification operations and should only be used for debugging. + + Returns: + Model bytes corresponding to the model. + Raises: + ValueError: if converter is not passed to the debugger. + """ + return self._get_quantized_model(is_debug=True) + + def _init_from_converter(self, + options: QuantizationDebugOptions, + converter: TFLiteConverter, + calibrated_model: Optional[bytes] = None, + float_model: Optional[bytes] = None) -> None: + """Convert the model and apply options. + + Converts the quantized model and initializes a quantized model interpreter + with the quantized model. Returns a float model interpreter if float model + is provided. + + Args: + options: a QuantizationDebugOptions object. + converter: an initialized tf.lite.TFLiteConverter. + calibrated_model: Calibrated model bytes. + float_model: Float model bytes. + """ + self.quant_model = convert.mlir_quantize( + calibrated_model, + disable_per_channel=converter._experimental_disable_per_channel, # pylint: disable=protected-access + fully_quantize=options.fully_quantize, + enable_numeric_verify=True, + denylisted_ops=options.denylisted_ops, + denylisted_nodes=options.denylisted_nodes) + self._quant_interpreter = _interpreter.Interpreter( + model_content=self.quant_model) + self._float_interpreter = None + if float_model is not None: + self._float_interpreter = _interpreter.Interpreter( + model_content=float_model) + + def _set_converter_options_for_float( + self, converter: TFLiteConverter) -> TFLiteConverter: + """Verify converter options and set required experimental options.""" + if converter.optimizations: + converter.optimizations = [] + return converter + + def _set_converter_options_for_calibration( + self, converter: TFLiteConverter) -> TFLiteConverter: + """Verify converter options and set required experimental options.""" + if not converter.optimizations: + raise ValueError( + 'converter object must set optimizations to lite.Optimize.DEFAULT') + if not converter.representative_dataset: + raise ValueError('converter object must set representative_dataset') + + converter.experimental_mlir_quantizer = True + converter._experimental_calibrate_only = True # pylint: disable=protected-access + return converter + + def run(self) -> None: + """Runs models and gets metrics.""" + self.layer_statistics = self._collect_layer_statistics() + if self._debug_options.model_debug_metrics: + self.model_statistics = self._collect_model_statistics() + + def _collect_layer_statistics(self) -> Dict[str, Dict[str, float]]: + """Collects layer statistics by applying layer debug metrics. + + For all data from the given RepresentativeDataset, collect statistics per + example by getting the NumericVerify op results in _quant_interpreter + and calculating layer debug metrics on the results. + + Returns: + aggregated per-layer statistics of NumericVerify results. + {layer_name: {metric_name: metric}} + """ + layer_statistics = collections.defaultdict( + lambda: collections.defaultdict(list)) + + initialize = True + for tensor_data in self._data_gen(): + self._set_input_tensors(self._quant_interpreter, tensor_data, initialize) + initialize = False + + # Run the model. + self._quant_interpreter.invoke() + + # Collect the statistics of this invoke result. + for tensor_detail in self._get_numeric_verify_tensor_details(): + tensor_name = tensor_detail['name'] # pytype: disable=unsupported-operands # dynamic-method-lookup + diffs = self._quant_interpreter.get_tensor(tensor_detail['index']) # pytype: disable=unsupported-operands # dynamic-method-lookup + for metric_name, metric_fn in self._layer_debug_metrics.items(): + layer_statistics[tensor_name][metric_name].append(metric_fn(diffs)) + + if self._debug_options.layer_direct_compare_metrics is not None: + for tensor_detail in self._get_numeric_verify_tensor_details(): + tensor_name = tensor_detail['name'] # pytype: disable=unsupported-operands # dynamic-method-lookup + op_idx = self._defining_op[tensor_detail['index']] # pytype: disable=unsupported-operands # dynamic-method-lookup + op_detail = self._quant_interpreter._get_op_details(op_idx) # pylint: disable=protected-access + q_idx, f_idx = op_detail['inputs'] + quant_input_detail = self._quant_interpreter._get_tensor_details( # pylint: disable=protected-access + q_idx, subgraph_index=0) + for (metric_name, metric_fn + ) in self._debug_options.layer_direct_compare_metrics.items(): + layer_statistics[tensor_name][metric_name].append( + metric_fn( + self._quant_interpreter.get_tensor(f_idx), + self._quant_interpreter.get_tensor(q_idx), + quant_input_detail['quantization_parameters']['scales'][0], + quant_input_detail['quantization_parameters']['zero_points'] + [0])) + + # Calculate final aggregated metrics for each layer. + for metrics in layer_statistics.values(): + for metric_name in metrics: + metrics[metric_name] = np.nanmean(metrics[metric_name]) + + return layer_statistics + + def _collect_model_statistics(self) -> Dict[str, float]: + """Collects model output metrics. + + For all data from the given RepresentativeDataset, collect all model output + results from float model & quantized debug model, and calculate metrics + by using model output functions. As a result, self.model_results is filled, + + where self.model_results[model_output_function_name] = `aggregated model + output function value` (a scalar). + + Returns: + aggregated per-model output discrepancy metrics. + {metric_name: aggregated_metric} + """ + + model_statistics = collections.defaultdict(list) + + initialize = True + for tensor_data in self._data_gen(): + # Run quantized debug model and collect output results. + self._set_input_tensors(self._quant_interpreter, tensor_data, initialize) + self._quant_interpreter.invoke() + quant_tensor_data = self._get_output_tensors(self._quant_interpreter) + + # Run float model if it's initialized. + float_tensor_data = [] + if self._float_interpreter: + self._set_input_tensors( + self._float_interpreter, tensor_data, initialize) + self._float_interpreter.invoke() + float_tensor_data = self._get_output_tensors(self._float_interpreter) + + initialize = False + + # Calculate the metrics. + for (metric_name, + metric_fn) in self._debug_options.model_debug_metrics.items(): + model_statistics[metric_name].append( + metric_fn(float_tensor_data, quant_tensor_data)) + + # Calculate final aggregated metrics for each outputs. + return { + metric_name: np.mean(metric) + for metric_name, metric in model_statistics.items() + } + + def _set_input_tensors(self, interpreter: _interpreter.Interpreter, + tensor_data: Sequence[np.ndarray], + initialize: bool) -> None: + """Sets input tensors into TFLite model Interpreter. + + Args: + interpreter: a tf.lite.Interpreter object with allocated tensors. + tensor_data: a list of Numpy array data. + initialize: set to true when input is first set for the interpreter, to + set input shapes and allocate tensors. + + Raises: + ValueError: when inputs can't be set, or size of provided inputs does not + match size of model inputs. + """ + input_details = interpreter.get_input_details() + if len(input_details) != len(tensor_data): + raise ValueError( + 'Number of inputs provided ({}) does not match number of inputs to ' + 'the model ({})'.format(len(tensor_data), len(input_details))) + + if initialize: + for input_detail, tensor in zip(input_details, tensor_data): + interpreter.resize_tensor_input(input_detail['index'], tensor.shape) + interpreter.allocate_tensors() + + for input_detail, tensor in zip(input_details, tensor_data): + if tensor.dtype == np.float32 and input_detail['dtype'] == np.int8: + quant_params = _get_quant_params(input_detail) + if quant_params: + scale, zero_point = quant_params + tensor = np.round((tensor / scale) + zero_point).astype(np.int8) + interpreter.set_tensor(input_detail['index'], tensor) + + def _get_output_tensors( + self, interpreter: _interpreter.Interpreter) -> List[np.ndarray]: + """Returns output tensors of given TFLite model Interpreter. + + Args: + interpreter: a tf.lite.Interpreter object with allocated tensors. + + Returns: + a list of numpy arrays representing output tensor results. + """ + + outputs = [] + for output_detail in interpreter.get_output_details(): + tensor = interpreter.get_tensor(output_detail['index']) + if output_detail['dtype'] == np.int8: + quant_params = _get_quant_params(output_detail) + if quant_params: + scale, zero_point = quant_params + tensor = ((tensor.astype(np.float32) - zero_point) * scale).astype( + np.float32) + outputs.append(tensor) + + return outputs + + def _get_numeric_verify_tensor_details(self) -> List[str]: + """Returns all names of all tensors from NumericVerify op.""" + # pylint: disable=protected-access + if not self._numeric_verify_tensor_details: + self._numeric_verify_tensor_details = [] + self._numeric_verify_op_details = {} + for op_info in self._quant_interpreter._get_ops_details(): + if op_info['op_name'] == _NUMERIC_VERIFY_OP_NAME: + self._numeric_verify_tensor_details.append( + self._quant_interpreter._get_tensor_details( + op_info['outputs'][0], subgraph_index=0)) + tensor_name = self._numeric_verify_tensor_details[-1]['name'] + self._numeric_verify_op_details[tensor_name] = op_info + # pylint: enable=protected-access + return self._numeric_verify_tensor_details + + def _get_operand_name_and_index(self, + numeric_verify_name: str) -> Tuple[str, int]: + """Gets the index and name of NumericVerify Op's quantized input tensor. + + Args: + numeric_verify_name: name of the NumericVerify op's output tensor. It has + format of `NumericVerify/{quantized_tensor_name}:{quantized_tensor_idx}` + + Returns: + Tuple of (tensor_name, tensor_idx) for quantized op's output tensor. + """ + tensor_name, tensor_idx = numeric_verify_name.rsplit(':', 1) + float_tensor_name = tensor_name[len(_NUMERIC_VERIFY_OP_NAME) + 1:] + if re.match(r'\d', float_tensor_name[-1]): + float_tensor_name = float_tensor_name[:-1] + + return (float_tensor_name, int(tensor_idx)) + + def layer_statistics_dump(self, file: IO[str]) -> None: + """Dumps layer statistics into file, in csv format. + + Args: + file: file, or file-like object to write. + """ + # order of `fields` is the order of fields in csv. + fields = ['op_name', 'tensor_idx'] + list(self._layer_debug_metrics.keys()) + if self._debug_options.layer_direct_compare_metrics is not None: + fields += list(self._debug_options.layer_direct_compare_metrics.keys()) + fields += ['scale', 'zero_point', 'tensor_name'] + writer = csv.DictWriter(file, fields) + writer.writeheader() + if self.layer_statistics: + for name, metrics in self.layer_statistics.items(): + data = metrics.copy() + (data['tensor_name'], _) = self._get_operand_name_and_index(name) + data['tensor_idx'] = self._numeric_verify_op_details[name]['inputs'][0] + data['op_name'] = self._quant_interpreter._get_op_details( # pylint: disable=protected-access + self._defining_op[data['tensor_idx']])['op_name'] + details = self._quant_interpreter._get_tensor_details( # pylint: disable=protected-access + data['tensor_idx'], subgraph_index=0) + data['scale'], data['zero_point'] = ( + details['quantization_parameters']['scales'][0], + details['quantization_parameters']['zero_points'][0]) + writer.writerow(data) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/visualize.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/visualize.py new file mode 100644 index 0000000000000000000000000000000000000000..3dca3f62b798212ab5d589ed5c4901e072a308ef --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/lite/tools/visualize.py @@ -0,0 +1,549 @@ +#!/usr/bin/env python +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""This tool creates an html visualization of a TensorFlow Lite graph. + +Example usage: + +python visualize.py foo.tflite foo.html +""" + +import json +import os +import re +import sys +import numpy as np + +# pylint: disable=g-import-not-at-top +if not os.path.splitext(__file__)[0].endswith( + os.path.join("tflite_runtime", "visualize")): + # This file is part of tensorflow package. + from tensorflow.lite.python import schema_py_generated as schema_fb +else: + # This file is part of tflite_runtime package. + from tflite_runtime import schema_py_generated as schema_fb + +# A CSS description for making the visualizer +_CSS = """ + + + + + + + + +""" + +_D3_HTML_TEMPLATE = """ + +""" + + +def TensorTypeToName(tensor_type): + """Converts a numerical enum to a readable tensor type.""" + for name, value in schema_fb.TensorType.__dict__.items(): + if value == tensor_type: + return name + return None + + +def BuiltinCodeToName(code): + """Converts a builtin op code enum to a readable name.""" + for name, value in schema_fb.BuiltinOperator.__dict__.items(): + if value == code: + return name + return None + + +def NameListToString(name_list): + """Converts a list of integers to the equivalent ASCII string.""" + if isinstance(name_list, str): + return name_list + else: + result = "" + if name_list is not None: + for val in name_list: + result = result + chr(int(val)) + return result + + +class OpCodeMapper: + """Maps an opcode index to an op name.""" + + def __init__(self, data): + self.code_to_name = {} + for idx, d in enumerate(data["operator_codes"]): + self.code_to_name[idx] = BuiltinCodeToName(d["builtin_code"]) + if self.code_to_name[idx] == "CUSTOM": + self.code_to_name[idx] = NameListToString(d["custom_code"]) + + def __call__(self, x): + if x not in self.code_to_name: + s = "" + else: + s = self.code_to_name[x] + return "%s (%d)" % (s, x) + + +class DataSizeMapper: + """For buffers, report the number of bytes.""" + + def __call__(self, x): + if x is not None: + return "%d bytes" % len(x) + else: + return "--" + + +class TensorMapper: + """Maps a list of tensor indices to a tooltip hoverable indicator of more.""" + + def __init__(self, subgraph_data): + self.data = subgraph_data + + def __call__(self, x): + html = "" + if x is None: + return html + + html += "" + for i in x: + tensor = self.data["tensors"][i] + html += str(i) + " " + html += NameListToString(tensor["name"]) + " " + html += TensorTypeToName(tensor["type"]) + " " + html += (repr(tensor["shape"]) if "shape" in tensor else "[]") + html += (repr(tensor["shape_signature"]) + if "shape_signature" in tensor else "[]") + "
" + html += "
" + html += repr(x) + html += "
" + return html + + +def GenerateGraph(subgraph_idx, g, opcode_mapper): + """Produces the HTML required to have a d3 visualization of the dag.""" + + def TensorName(idx): + return "t%d" % idx + + def OpName(idx): + return "o%d" % idx + + edges = [] + nodes = [] + first = {} + second = {} + pixel_mult = 200 # TODO(aselle): multiplier for initial placement + width_mult = 170 # TODO(aselle): multiplier for initial placement + for op_index, op in enumerate(g["operators"] or []): + if op["inputs"] is not None: + for tensor_input_position, tensor_index in enumerate(op["inputs"]): + if tensor_index not in first: + first[tensor_index] = ((op_index - 0.5 + 1) * pixel_mult, + (tensor_input_position + 1) * width_mult) + edges.append({ + "source": TensorName(tensor_index), + "target": OpName(op_index) + }) + if op["outputs"] is not None: + for tensor_output_position, tensor_index in enumerate(op["outputs"]): + if tensor_index not in second: + second[tensor_index] = ((op_index + 0.5 + 1) * pixel_mult, + (tensor_output_position + 1) * width_mult) + edges.append({ + "target": TensorName(tensor_index), + "source": OpName(op_index) + }) + + nodes.append({ + "id": OpName(op_index), + "name": opcode_mapper(op["opcode_index"]), + "group": 2, + "x": pixel_mult, + "y": (op_index + 1) * pixel_mult + }) + for tensor_index, tensor in enumerate(g["tensors"]): + initial_y = ( + first[tensor_index] if tensor_index in first else + second[tensor_index] if tensor_index in second else (0, 0)) + + nodes.append({ + "id": TensorName(tensor_index), + "name": "%r (%d)" % (getattr(tensor, "shape", []), tensor_index), + "group": 1, + "x": initial_y[1], + "y": initial_y[0] + }) + graph_str = json.dumps({"nodes": nodes, "edges": edges}) + + html = _D3_HTML_TEMPLATE % (graph_str, subgraph_idx) + return html + + +def GenerateTableHtml(items, keys_to_print, display_index=True): + """Given a list of object values and keys to print, make an HTML table. + + Args: + items: Items to print an array of dicts. + keys_to_print: (key, display_fn). `key` is a key in the object. i.e. + items[0][key] should exist. display_fn is the mapping function on display. + i.e. the displayed html cell will have the string returned by + `mapping_fn(items[0][key])`. + display_index: add a column which is the index of each row in `items`. + + Returns: + An html table. + """ + html = "" + # Print the list of items + html += "\n" + html += "\n" + if display_index: + html += "" + for h, mapper in keys_to_print: + html += "" % h + html += "\n" + for idx, tensor in enumerate(items): + html += "\n" + if display_index: + html += "" % idx + # print tensor.keys() + for h, mapper in keys_to_print: + val = tensor[h] if h in tensor else None + val = val if mapper is None else mapper(val) + html += "\n" % val + + html += "\n" + html += "
index%s
%d%s
\n" + return html + + +def CamelCaseToSnakeCase(camel_case_input): + """Converts an identifier in CamelCase to snake_case.""" + s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel_case_input) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower() + + +def FlatbufferToDict(fb, preserve_as_numpy): + """Converts a hierarchy of FB objects into a nested dict. + + We avoid transforming big parts of the flat buffer into python arrays. This + speeds conversion from ten minutes to a few seconds on big graphs. + + Args: + fb: a flat buffer structure. (i.e. ModelT) + preserve_as_numpy: true if all downstream np.arrays should be preserved. + false if all downstream np.array should become python arrays + Returns: + A dictionary representing the flatbuffer rather than a flatbuffer object. + """ + if isinstance(fb, int) or isinstance(fb, float) or isinstance(fb, str): + return fb + elif hasattr(fb, "__dict__"): + result = {} + for attribute_name in dir(fb): + attribute = fb.__getattribute__(attribute_name) + if not callable(attribute) and attribute_name[0] != "_": + snake_name = CamelCaseToSnakeCase(attribute_name) + preserve = True if attribute_name == "buffers" else preserve_as_numpy + result[snake_name] = FlatbufferToDict(attribute, preserve) + return result + elif isinstance(fb, np.ndarray): + return fb if preserve_as_numpy else fb.tolist() + elif hasattr(fb, "__len__"): + return [FlatbufferToDict(entry, preserve_as_numpy) for entry in fb] + else: + return fb + + +def CreateDictFromFlatbuffer(buffer_data): + model_obj = schema_fb.Model.GetRootAsModel(buffer_data, 0) + model = schema_fb.ModelT.InitFromObj(model_obj) + return FlatbufferToDict(model, preserve_as_numpy=False) + + +def create_html(tflite_input, input_is_filepath=True): # pylint: disable=invalid-name + """Returns html description with the given tflite model. + + Args: + tflite_input: TFLite flatbuffer model path or model object. + input_is_filepath: Tells if tflite_input is a model path or a model object. + + Returns: + Dump of the given tflite model in HTML format. + + Raises: + RuntimeError: If the input is not valid. + """ + + # Convert the model into a JSON flatbuffer using flatc (build if doesn't + # exist. + if input_is_filepath: + if not os.path.exists(tflite_input): + raise RuntimeError("Invalid filename %r" % tflite_input) + if tflite_input.endswith(".tflite") or tflite_input.endswith(".bin"): + with open(tflite_input, "rb") as file_handle: + file_data = bytearray(file_handle.read()) + data = CreateDictFromFlatbuffer(file_data) + elif tflite_input.endswith(".json"): + data = json.load(open(tflite_input)) + else: + raise RuntimeError("Input file was not .tflite or .json") + else: + data = CreateDictFromFlatbuffer(tflite_input) + html = "" + html += _CSS + html += "

TensorFlow Lite Model

" + + data["filename"] = tflite_input if input_is_filepath else ( + "Null (used model object)") # Avoid special case + + toplevel_stuff = [("filename", None), ("version", None), + ("description", None)] + + html += "\n" + for key, mapping in toplevel_stuff: + if not mapping: + mapping = lambda x: x + html += "\n" % (key, mapping(data.get(key))) + html += "
%s%s
\n" + + # Spec on what keys to display + buffer_keys_to_display = [("data", DataSizeMapper())] + operator_keys_to_display = [("builtin_code", BuiltinCodeToName), + ("custom_code", NameListToString), + ("version", None)] + + # Update builtin code fields. + for d in data["operator_codes"]: + d["builtin_code"] = max(d["builtin_code"], d["deprecated_builtin_code"]) + + for subgraph_idx, g in enumerate(data["subgraphs"]): + # Subgraph local specs on what to display + html += "
" + tensor_mapper = TensorMapper(g) + opcode_mapper = OpCodeMapper(data) + op_keys_to_display = [("inputs", tensor_mapper), ("outputs", tensor_mapper), + ("builtin_options", None), + ("opcode_index", opcode_mapper)] + tensor_keys_to_display = [("name", NameListToString), + ("type", TensorTypeToName), ("shape", None), + ("shape_signature", None), ("buffer", None), + ("quantization", None)] + + html += "

Subgraph %d

\n" % subgraph_idx + + # Inputs and outputs. + html += "

Inputs/Outputs

\n" + html += GenerateTableHtml([{ + "inputs": g["inputs"], + "outputs": g["outputs"] + }], [("inputs", tensor_mapper), ("outputs", tensor_mapper)], + display_index=False) + + # Print the tensors. + html += "

Tensors

\n" + html += GenerateTableHtml(g["tensors"], tensor_keys_to_display) + + # Print the ops. + if g["operators"]: + html += "

Ops

\n" + html += GenerateTableHtml(g["operators"], op_keys_to_display) + + # Visual graph. + html += "\n" % ( + subgraph_idx,) + html += GenerateGraph(subgraph_idx, g, opcode_mapper) + html += "
" + + # Buffers have no data, but maybe in the future they will + html += "

Buffers

\n" + html += GenerateTableHtml(data["buffers"], buffer_keys_to_display) + + # Operator codes + html += "

Operator Codes

\n" + html += GenerateTableHtml(data["operator_codes"], operator_keys_to_display) + + html += "\n" + + return html + + +def main(argv): + try: + tflite_input = argv[1] + html_output = argv[2] + except IndexError: + print("Usage: %s " % (argv[0])) + else: + html = create_html(tflite_input) + with open(html_output, "w") as output_file: + output_file.write(html) + + +if __name__ == "__main__": + main(sys.argv) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/impl/testing/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/impl/testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/impl/testing/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/impl/testing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80a7251d1b3982b19ccdc44164138de3e5ff7c77 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/impl/testing/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/impl/testing/pybind_for_testing.pyi b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/impl/testing/pybind_for_testing.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ec051f113b7b79232a800ec015ff606dfcdc16a5 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/impl/testing/pybind_for_testing.pyi @@ -0,0 +1,18 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +class TestClassDef: + def __init__(self) -> None: ... + def method(self) -> object: ... diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/impl/testing/pybind_for_testing.so b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/impl/testing/pybind_for_testing.so new file mode 100644 index 0000000000000000000000000000000000000000..a0a21b5fa34f37d5fc9171d460289ed82e8b3691 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/impl/testing/pybind_for_testing.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7dd76e74055bba4c02308da5f57791117799704b278e153aef7741edbae230b2 +size 1072920 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..db275e6ed9dc9220f2dae1ade04434d9c86d410e --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__init__.py @@ -0,0 +1,63 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""This module implements operators that AutoGraph overloads. + +Note that "operator" is used loosely here, and includes control structures like +conditionals and loops, implemented in functional form, using for example +closures for the body. +""" + +# Naming conventions: +# * operator names match the name usually used for the respective Python +# idiom; examples: for_stmt, list_append +# * operator arguments match either of: +# - the corresponding Python AST attribute (e.g. the condition of an if +# statement is called test) if the operator represents an AST construct +# - the names used in the Python docs, if the operator is a function (e.g. +# list_ and x for append, see +# https://docs.python.org/3.7/tutorial/datastructures.html) +# +# All operators may accept a final argument named "opts", of a type that +# subclasses namedtuple and contains any arguments that are only required +# for some specializations of the operator. + +from tensorflow.python.autograph.operators.conditional_expressions import if_exp +from tensorflow.python.autograph.operators.control_flow import for_stmt +from tensorflow.python.autograph.operators.control_flow import if_stmt +from tensorflow.python.autograph.operators.control_flow import while_stmt +from tensorflow.python.autograph.operators.data_structures import list_append +from tensorflow.python.autograph.operators.data_structures import list_pop +from tensorflow.python.autograph.operators.data_structures import list_stack +from tensorflow.python.autograph.operators.data_structures import ListPopOpts +from tensorflow.python.autograph.operators.data_structures import ListStackOpts +from tensorflow.python.autograph.operators.data_structures import new_list +from tensorflow.python.autograph.operators.exceptions import assert_stmt +from tensorflow.python.autograph.operators.logical import and_ +from tensorflow.python.autograph.operators.logical import eq +from tensorflow.python.autograph.operators.logical import not_ +from tensorflow.python.autograph.operators.logical import not_eq +from tensorflow.python.autograph.operators.logical import or_ +from tensorflow.python.autograph.operators.py_builtins import float_ +from tensorflow.python.autograph.operators.py_builtins import int_ +from tensorflow.python.autograph.operators.py_builtins import len_ +from tensorflow.python.autograph.operators.py_builtins import print_ +from tensorflow.python.autograph.operators.py_builtins import range_ +from tensorflow.python.autograph.operators.slices import get_item +from tensorflow.python.autograph.operators.slices import GetItemOpts +from tensorflow.python.autograph.operators.slices import set_item +from tensorflow.python.autograph.operators.variables import ld +from tensorflow.python.autograph.operators.variables import ldu +from tensorflow.python.autograph.operators.variables import Undefined +from tensorflow.python.autograph.operators.variables import UndefinedReturnValue diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1250879ea60e350f950dd5ebfbf79a4c58abf89 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/conditional_expressions.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/conditional_expressions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..208d592c90cf7c85cee8ed177e2fdb9f3dee777d Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/conditional_expressions.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/control_flow.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/control_flow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0e095fb4094eb93c66908f498f6b71109b7ef4f Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/control_flow.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/data_structures.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/data_structures.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..027ef10fe4e240203d255bb818dea6de2e687024 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/data_structures.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/exceptions.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13286cfc2e97710fae04fc96f70f78f3c16cf336 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/exceptions.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/logical.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/logical.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6c5751be07c1071060e237182b4649efbee22b3 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/logical.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/py_builtins.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/py_builtins.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ef8ecd203c92fd02f7b1fbe342318c9e3105591 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/py_builtins.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/slices.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/slices.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4cc8a4d32f319217c8b84626225451d0ebfcdb3 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/slices.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/variables.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/variables.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d550cabae2a24013a618200a583b1642a4ad0b9a Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/__pycache__/variables.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/conditional_expressions.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/conditional_expressions.py new file mode 100644 index 0000000000000000000000000000000000000000..28fd328834c65bf1fed10793bbdb9fdcde167276 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/conditional_expressions.py @@ -0,0 +1,52 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Conditional expressions (e.g. the ternary if statement).""" + + +from tensorflow.python.autograph.operators import control_flow +from tensorflow.python.autograph.utils import tensors +from tensorflow.python.ops import cond as tf_cond + + +def if_exp(cond, if_true, if_false, expr_repr): + if tensors.is_dense_tensor(cond): + return _tf_if_exp(cond, if_true, if_false, expr_repr) + else: + return _py_if_exp(cond, if_true, if_false) + + +def _tf_if_exp(cond, if_true, if_false, expr_repr): + """Overload of if_exp that stages a TF cond.""" + # TODO(mdan): Use nonlocal once we no longer need to support py2. + true_val = [] + false_val = [] + + def true_fn(): + true_val.append(if_true()) + if true_val and false_val: + control_flow.verify_single_cond_var(expr_repr, true_val[0], false_val[0]) + return true_val[0] + + def false_fn(): + false_val.append(if_false()) + if true_val and false_val: + control_flow.verify_single_cond_var(expr_repr, true_val[0], false_val[0]) + return false_val[0] + + return tf_cond.cond(cond, true_fn, false_fn) + + +def _py_if_exp(cond, if_true, if_false): + return if_true() if cond else if_false() diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/control_flow.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/control_flow.py new file mode 100644 index 0000000000000000000000000000000000000000..2069cf3342c8139d105164745eccf09bee21c7d8 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/control_flow.py @@ -0,0 +1,1270 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Control flow statements: loops, conditionals, etc. + +Note: most of these operators accept pairs of get_state/set_state functions, to +capture mutations that the corresponding code blocks might make. These +mutations only need to be captured when staging the control flow, and they just +work when reverting to Python behavior. + +__Examples__ + +``` +while cond: + self.x += i +``` + +When the functionalized version is executed as a Python loop, it just works: + +``` +def loop_body(): + self.x += i # works as expected for Python loops +``` + +But it won't work for TF loops: + +``` +def loop_body(): + self.x += i # self.x has the wrong value! +``` + +get_state/set_state allow piping the mutations through the loop variables as +well, in effect changing the loop body: + +``` +def loop_body(self_x): + self.x = self_x # self.x now has the proper value + self.x += i # the original block + self_x = self.x # write self.x back into the loop vars + return self_x + +self_x = tf.while_loop(...) +self.x = self_x # the result is not properly captured +``` +""" + +import functools +import sys +import traceback + +import numpy as np + +from tensorflow.python.autograph.operators import py_builtins +from tensorflow.python.autograph.operators import variables +from tensorflow.python.autograph.utils import ag_logging +from tensorflow.python.autograph.utils import misc +from tensorflow.python.autograph.utils import tensors +from tensorflow.python.autograph.utils import type_registry +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors_impl +from tensorflow.python.framework import func_graph +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_conversion +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import cond as tf_cond +from tensorflow.python.ops import control_flow_assert +from tensorflow.python.ops import control_flow_util +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import tensor_array_ops +from tensorflow.python.ops import while_loop +from tensorflow.python.types import distribute +from tensorflow.python.util import nest +from tensorflow.python.util import variable_utils + + +PYTHON_MAX_ITERATIONS = 100000000 # Fails in about one minute for empty loops. +WARN_INEFFICIENT_UNROLL = True +INEFFICIENT_UNROLL_MIN_ITERATIONS = 50000 +INEFFICIENT_UNROLL_MIN_OPS = 1 + + +# TODO(mdan): Use the custom operator pattern instead of type dispatch. +# An example of this pattern is found in the implementation of distributed +# datasets. Before it can be used though, we need to standardize the interface. + +for_loop_registry = type_registry.TypeRegistry() + + +def _is_none_or_undef(value): + """Tests whether a value is None or undefined. + + AutoGraph represents undefined symbols using special objects of type Undefined + or UndefinedReturnValue. + + Args: + value: value to test + + Returns: + Boolean + """ + return ((value is None) + or isinstance(value, variables.UndefinedReturnValue) + or isinstance(value, variables.Undefined)) + + +def _verify_tf_condition(cond, tag): + """Ensures that the condition can be used in a TF control flow.""" + extra_hint = 'to check for None, use `is not None`' + cond = tensor_conversion.convert_to_tensor_v2(cond) + + if cond.dtype != dtypes.bool: + raise ValueError( + 'condition of {} expected to be `tf.bool` scalar, got {}' + '; to use as boolean Tensor, use `tf.cast`' + '; {}'.format(tag, cond, extra_hint)) + + if cond.shape is None or cond.shape.ndims is None: + # TODO(mdan): Consider a explicit size check, if not too slow. + cond = array_ops.reshape(cond, ()) + + elif cond.shape.ndims > 0: + known_dims = [d for d in cond.shape.as_list() if d is not None] + if np.prod(known_dims) > 1: + raise ValueError( + 'condition of {} expected to be `tf.bool` scalar, got {}' + '; {}'.format(tag, cond, extra_hint)) + else: + cond = array_ops.reshape(cond, ()) + + return cond + + +def verify_loop_init_vars( + init_vars, symbol_names, first_iter_vars=None, extra_message=None +): + """Ensures that all values in the state are valid to use in a TF loop. + + The init_vars may contain placeholder values derived from first_iter_vars. + + Args: + init_vars: initial loop variables (as taken before entering the loop) + symbol_names: corresponding names of the initial loop variables + first_iter_vars: loop variables after one iteration of the loop + extra_message: an extra string to append to the error message, in case of + "undefined variable" errors (see variables.Undefined) + """ + if not symbol_names: + return + if first_iter_vars is None: + first_iter_vars = (None,) * len(symbol_names) + + assert len(symbol_names) == len(init_vars) + assert len(symbol_names) == len(first_iter_vars) + for name, val, fi_val in zip(symbol_names, init_vars, first_iter_vars): + if isinstance(val, variables.UndefinedReturnValue): + if fi_val: + raise ValueError( + 'the return value from a TensorFlow loop may only be a {}; got {}' + .format(LEGAL_LOOP_TYPES, type(fi_val))) + else: + # TODO(mdan): This can be handled by removing the return value. + raise NotImplementedError( + 'a return statement cannot be placed inside this TensorFlow loop;' + ' this may happen if a return statement depends on a' + ' static Python condition such as a hyperparameter') + + error_msg = None + if val is None: + error_msg = "'{}' is not allowed to be None before the loop".format(name) + elif isinstance(val, variables.Undefined): + error_msg = "'{}' must be defined before the loop".format(name) + if extra_message: + error_msg += '\n' + extra_message + + if error_msg is not None: + raise ValueError(error_msg) + + +def _is_subshape(left, right): + """Returns True if left shape is at least as specific as right shape.""" + # TODO(mdan): This code should be in TensorShape. + # Note: this is not the same as TensorShape.is_compatible_with, which is + # symmetric. + # This code also duplicates _ShapeLessThanOrEqual from control_flow_ops.py. + if right.dims is None: + return True + if left.ndims != right.ndims: + return False + for ldim, rdim in zip(left.dims, right.dims): + if rdim.value is not None and ldim.value != rdim.value: + return False + return True + + +# TODO(mdan): Remove these verifications once TF ops can properly report names. +def _verify_single_loop_var( + name, check_shape, init, entry, exit_, shape_invariant): + """Verifies whether the initial, entry and exit values are consistent.""" + assert entry is not None, "no TF op should set '{}' to None?".format(name) + if exit_ is None: + raise ValueError("'{}' is None at the end of the iteration.".format(name)) + + if isinstance(init, (bool, int, float, str, np.ndarray)): + init = tensor_conversion.convert_to_tensor_v2(init) + if isinstance(entry, (bool, int, float, str, np.ndarray)): + entry = tensor_conversion.convert_to_tensor_v2(entry) + if isinstance(exit_, (bool, int, float, str, np.ndarray)): + exit_ = tensor_conversion.convert_to_tensor_v2(exit_) + + if (not tensor_util.is_tf_type(entry) or + not tensor_util.is_tf_type(exit_)): + return + + # TODO(mdan): Properly account for CompositeTensors. + if (not hasattr(entry, 'dtype') or + not hasattr(exit_, 'dtype')): + return + if (not hasattr(entry, 'shape') or + not hasattr(exit_, 'shape')): + return + + if entry.dtype != exit_.dtype: + raise TypeError( + "'{}' has dtype {} before the loop, but dtype {} after one" + ' iteration'.format( + name, + entry.dtype.name, + exit_.dtype.name, + )) + if check_shape: + exit_shape = exit_.shape + if shape_invariant is None: + entry_shape = entry.shape + if not _is_subshape(exit_shape, entry_shape): + raise ValueError( + "'{}' has shape {} before the loop, but shape {} after one" + ' iteration. Use tf.autograph.experimental.set_loop_options to set' + ' shape invariants.'.format(name, entry_shape, exit_shape)) + else: + init_shape = init.shape + if not _is_subshape(init_shape, shape_invariant): + raise ValueError( + "'{}' has shape {} before the loop, which does not conform with" + ' the shape invariant {}.'.format(name, init_shape, + shape_invariant)) + if not _is_subshape(exit_shape, shape_invariant): + raise ValueError( + "'{}' has shape {} after one iteration, which does not conform with" + ' the shape invariant {}.'.format(name, exit_shape, shape_invariant) + ) + + +def verify_tf_loop_vars( + init_vars, + iter_entry_vars, + iter_exit_vars, + symbol_names, + opts, + check_shapes=True, +): + """Verifies loop variables for consistency.""" + if check_shapes and 'shape_invariants' in opts: + shape_invariants = opts['shape_invariants'] + else: + shape_invariants = nest.map_structure(lambda _: None, iter_entry_vars) + + assert len(symbol_names) == len(shape_invariants) + assert len(symbol_names) == len(init_vars) + assert len(symbol_names) == len(iter_entry_vars) + assert len(symbol_names) == len(iter_exit_vars) + + for i in range(len(symbol_names)): + name = symbol_names[i] + init = init_vars[i] + entry = iter_entry_vars[i] + exit_ = iter_exit_vars[i] + invariant = shape_invariants[i] + + try: + nest.assert_same_structure(init, entry, expand_composites=True) + except (ValueError, TypeError): + # `Variable`s in `init` may be implicitly converted to `Tensor`s. Convert + # `ResourceVariable`s to Tensors so tf.nest.assert_same_structure + # won't break due to type spec mismatches between `ResourceVariable`s and + # `Tensor`s. + try: + init_tensors = variable_utils.convert_variables_to_tensors(init) + nest.assert_same_structure(init_tensors, entry, expand_composites=True) + except (ValueError, TypeError) as e: + raise TypeError("'{}' does not have the same nested structure after one" + ' iteration.\n\n{}'.format(name, e)) from e + + try: + nest.assert_same_structure(entry, exit_, expand_composites=True) + except (ValueError, TypeError) as e: + raise TypeError("'{}' does not have the same nested structure after one" + ' iteration.\n\n{}'.format(name, e)) from e + if invariant is not None: + try: + nest.assert_same_structure(init, invariant, expand_composites=False) + except (ValueError, TypeError) as e: + raise TypeError("'{}' does not have the same nested structure as its" + ' corresponding shape invariant.\n\n{}'.format( + name, e)) from e + + nest.map_structure( + functools.partial(_verify_single_loop_var, name, check_shapes), init, + entry, exit_, invariant) + + +def verify_single_cond_var(name, body_var, orelse_var): + """Verifies whether body_var and orelse_var are consistent.""" + if body_var is None: + raise ValueError("'{}' is None at the end of the main branch.".format(name)) + if orelse_var is None: + raise ValueError( + "'{}' is None at the end of the else branch.".format(name)) + + if isinstance(body_var, (bool, int, float, str, np.ndarray)): + body_var = tensor_conversion.convert_to_tensor_v2(body_var) + + if isinstance(orelse_var, (bool, int, float, str, np.ndarray)): + orelse_var = tensor_conversion.convert_to_tensor_v2(orelse_var) + + if (not tensor_util.is_tf_type(body_var) or + not tensor_util.is_tf_type(orelse_var)): + return + + # TODO(mdan): Properly account for CompositeTensors. + if (not hasattr(body_var, 'dtype') or + not hasattr(orelse_var, 'dtype')): + return + + if body_var.dtype != orelse_var.dtype: + raise TypeError( + "'{}' has dtype {} in the main branch, but dtype {} in the else" + ' branch'.format(name, body_var.dtype.name, + orelse_var.dtype.name)) + + +def _verify_tf_cond_branch_vars(vars_, symbol_names, branch_name): + """Verifies variables output by a conditional branch for consistency.""" + for name, var_ in zip(symbol_names, vars_): + if isinstance(var_, variables.Undefined): + raise ValueError( + "'{}' must also be initialized in the {} branch".format( + name, branch_name)) + if isinstance(var_, variables.UndefinedReturnValue): + raise ValueError( + 'the {} branch must also have a return statement.'.format( + branch_name)) + + +def _verify_tf_cond_vars(body_vars, orelse_vars, symbol_names): + """Verifies variables manipulated by a conditional for consistency.""" + named_vars = zip(symbol_names, body_vars, orelse_vars) + + for name, body_var, orelse_var in named_vars: + try: + nest.assert_same_structure(body_var, orelse_var, expand_composites=True) + except (ValueError, TypeError): + # One branch of cond could be a `Tensor`, while the other branch could be + # a `ResourceVariable`. Convert `ResourceVariable`s to `Tensor`s so + # assert_same_structure won't fail. + try: + body_var_tensors = variable_utils.convert_variables_to_tensors(body_var) + orelse_var_tensors = variable_utils.convert_variables_to_tensors( + orelse_var) + nest.assert_same_structure(body_var_tensors, orelse_var_tensors, + expand_composites=True) + except (ValueError, TypeError) as e: + raise TypeError( + "'{}' must have the same nested structure in the main and else" + ' branches:\n\n{}'.format(name, str(e))) from e + nest.map_structure( + functools.partial(verify_single_cond_var, name), body_var, orelse_var) + + +def for_stmt(iter_, extra_test, body, get_state, set_state, symbol_names, opts): + """Functional form of a for statement. + + The loop operates on a state, which includes all symbols that are + variant across loop iterations, excluding the variables local to the loop. + + For example, given the loop below that calculates the geometric and + arithmetic means or some numbers: + + ``` + geo_mean = 1 + arith_mean = 0 + for i in range(n): + a = numbers[i] + geo_mean *= a + arith_mean += a + ``` + + The state is represented by the variables named geo_mean and arith_mean. The + `extra_test`, `body`, `get_state` and `set_state` functions must bind to the + original `geo_mean` and `arith_mean` symbols, using `nonlocal`. + + The inputs and outputs of the callables representing the loop blocks are not + explicit - instead, these functions must use nonlocal/global for side effects. + The inputs and outputs are instead controlled by the set_state/get_state + functions. + + Args: + iter_: The entity being iterated over. + extra_test: Callable with boolean return type. An additional loop condition. + body: Callable representing the actual loop body. + get_state: Additional callable which can capture additional state (such as + the values of composite symbols). This is only useful when staging the + loop. + set_state: Additional callable which save values captured by get_state back + into the Python environment. This is only useful when staging the loop. + symbol_names: Tuple containing names of the loop variables returned by + get_state. + opts: Optional dict of extra loop parameters. + """ + + try: + for_fn = for_loop_registry.lookup(iter_) + except LookupError: + for_fn = _py_for_stmt + + if tensor_util.is_tf_type(iter_): + if tensors.is_range_tensor(iter_): + for_fn = _tf_range_for_stmt + else: + for_fn = _known_len_tf_for_stmt + elif isinstance(iter_, distribute.Iterator): + for_fn = _tf_iterator_for_stmt + elif isinstance(iter_, distribute.Iterable): + # TODO(b/162250181): Use _tf_iterator_for_stmt(iter(iter_)... + for_fn = _tf_distributed_iterable_for_stmt + + for_fn(iter_, extra_test, body, get_state, set_state, symbol_names, opts) + + +def _py_for_stmt( + iter_, extra_test, body, get_state, set_state, symbol_names, opts +): + """Overload of for_stmt that executes a Python for loop.""" + del get_state, set_state, symbol_names, opts + + if __debug__: + checker = _PythonLoopChecker() + before_iteration = checker.before_iteration + after_iteration = checker.after_iteration + before_iteration() + + original_body = body + def protected_body(protected_iter): + original_body(protected_iter) + after_iteration() + before_iteration() + body = protected_body + + if extra_test is not None: + def guarded_extra_test(): + extra_test_result = extra_test() + try: + # Note: Using try/except and not tensor_util.is_tf_type to avoid + # performance degradation. + return bool(extra_test_result) + except errors_impl.OperatorNotAllowedInGraphError as e: + ag_logging.log( + 1, + 'Caught error while evaluating loop stop condition', + exc_info=True) + # TODO(mdan): We can pass the location of extra_test and show it here. + raise NotImplementedError( + 'break and return statements which depend on a TF condition are not' + ' supported in Python for loops. Did you intend to make it a TF' + ' loop?\nSee ' + 'https://github.com/tensorflow/tensorflow/blob/master/tensorflow/' + 'python/autograph/g3doc/reference/limitations.md' + '#consistency-of-control-flow-types for more info.') from e + + if guarded_extra_test(): + for target in iter_: + body(target) + if not guarded_extra_test(): + break + + else: + for target in iter_: + body(target) + + +def _add_max_iterations_hint(opts, n): + # TODO(b/159186914): Remove the safeguard, and always set maximum_iterations. + if control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph()): + opts['maximum_iterations'] = n + + +def _known_len_tf_for_stmt( + iter_, extra_test, body, get_state, set_state, symbol_names, opts): + """Overload of for_stmt that iterates over TF entities that admit a length.""" + n = py_builtins.len_(iter_) + + # TODO(b/117628877): Revisit performance once XLA has the necessary support. + # Note: using a TensorArray creates an extra copy, but can calculate + # gradients more efficiently than StridedSlice. + ta = tensor_array_ops.TensorArray(iter_.dtype, size=n) + iter_ = ta.unstack(iter_) + + iterate_index = 0 + + def aug_get_state(): + return (iterate_index,) + get_state() + + def aug_set_state(aug_loop_vars): + nonlocal iterate_index + # TODO(b/171479293): Drop the lint override. + iterate_index, *loop_vars = aug_loop_vars # pylint:disable=unused-variable + # The iteration index is not "output" by the for loop. If the iteration index + # is used outside the loop, it will appear in the loop vars separately. + set_state(loop_vars) + + def aug_body(): + nonlocal iterate_index + body(iter_.read(iterate_index)) + iterate_index += 1 + + def aug_test(): + main_test = iterate_index < n + if extra_test is not None: + return tf_cond.cond(main_test, extra_test, lambda: False) + return main_test + + _add_max_iterations_hint(opts, n) + + _tf_while_stmt( + aug_test, + aug_body, + aug_get_state, + aug_set_state, + ('',) + symbol_names, + opts, + ) + + +def _tf_range_for_stmt( + iter_, extra_test, body, get_state, set_state, symbol_names, opts): + """Overload of for_stmt that iterates over a TF range (and elides it).""" + start, limit, delta = iter_.op.inputs + + iterate = start + + def _value_or(name, var, default): + if (name == opts['iterate_names'] and isinstance(var, variables.Undefined)): + return default + return var + + def aug_get_state(): + state_vars = get_state() + state_vars = tuple( + _value_or(name, var, iterate) + for name, var in zip(symbol_names, state_vars)) + return (iterate,) + state_vars + + def aug_set_state(aug_loop_vars): + nonlocal iterate + # TODO(b/171479293): Drop the lint override. + iterate, *loop_vars = aug_loop_vars # pylint:disable=unused-variable + # The iteration index is not "output" by the for loop. If the iterate + # is used outside the loop, it will appear in the loop vars separately. + set_state(loop_vars) + + def aug_body(): + nonlocal iterate + body(iterate) + iterate += delta + + def aug_test(): + # TODO(b/159713842): Remove once constant folding works. + const_delta = tensor_util.constant_value(delta) + if const_delta is not None: + if const_delta >= 0: + main_test = iterate < limit + else: + main_test = iterate > limit + else: + main_test = math_ops.logical_or( + math_ops.logical_and(delta >= 0, iterate < limit), + math_ops.logical_and(delta < 0, iterate > limit)) + + if extra_test is not None: + main_test = tf_cond.cond(main_test, extra_test, lambda: False) + return main_test + + _add_max_iterations_hint( + opts, + math_ops.cast(misc.get_range_len(start, limit, delta), dtypes.int32)) + + _tf_while_stmt( + aug_test, + aug_body, + aug_get_state, + aug_set_state, + ('',) + symbol_names, + opts) + + +def _tf_iterator_for_stmt( + iter_, extra_test, body, get_state, set_state, symbol_names, opts): + """Overload of for_stmt that iterates over TF Iterators. See for_loop.""" + symbol_names = ('',) + symbol_names + has_next = True + + def aug_get_state(): + return (has_next,) + get_state() + + def aug_set_state(aug_loop_vars): + nonlocal has_next + # TODO(b/171479293): Drop the lint override. + has_next, *loop_vars = aug_loop_vars # pylint:disable=unused-variable + set_state(loop_vars) + + init_vars = aug_get_state() + verify_loop_init_vars(init_vars, symbol_names) + + def aug_body(): + """Main body passed to _tf_while_stmt.""" + nonlocal has_next + opt_iterate = iter_.get_next_as_optional() + has_next = opt_iterate.has_value() + loop_vars = aug_get_state() # updated by set_state() in _tf_while_loop. + + def main_path(): + body(opt_iterate.get_value()) + new_loop_vars = aug_get_state() + # Note: this verification duplicates the one performed in tf_while_stmt, + # but needs to be done earlier to prevent the tf.cond from blowing up + # first. + verify_tf_loop_vars( + init_vars, loop_vars, new_loop_vars, symbol_names, opts) + return new_loop_vars + + def noop_path(): + return loop_vars + + # TODO(mdan): If tf.while_loop supported Optional, this could be avoided. + # Calling set_state so that get_state() _tf_while_loop sees the conditional + # tensors. + aug_set_state( + tf_cond.cond(has_next, main_path, noop_path)) + + def aug_test(): + # This value takes a complicated path to get here: + # prev_iteration_body -> get_state -> tf.while_loop (as loop var) + # -> current_iteration_body -> set_state -> has_next + main_test = has_next + if extra_test is not None: + return tf_cond.cond(main_test, extra_test, lambda: False) + return main_test + + _tf_while_stmt( + aug_test, + aug_body, + aug_get_state, + aug_set_state, + symbol_names, + opts) + + +def _tf_distributed_iterable_for_stmt( + iter_, extra_test, body, get_state, set_state, symbol_names, opts): + """Overload of for_stmt that iterates over TF distributed datasets.""" + + if extra_test is not None: + raise NotImplementedError( + 'break and return statements are not yet supported in ' + 'for ... in distributed input loops.') + + init_vars = get_state() + verify_loop_init_vars(init_vars, symbol_names) + + if 'shape_invariants' in opts: + opts['shape_invariants'] = _shape_invariants_mapping_to_positional_list( + opts['shape_invariants'], init_vars) + + def reduce_body(loop_vars, iterate): + set_state(loop_vars) + body(iterate) + new_loop_vars = get_state() + verify_tf_loop_vars( + init_vars, loop_vars, new_loop_vars, symbol_names, opts) + return new_loop_vars + + set_state(iter_.reduce(init_vars, reduce_body)) + + +def while_stmt(test, body, get_state, set_state, symbol_names, opts): + """Functional form of a while statement. + + The loop operates on a so-called state, which includes all symbols that are + variant across loop iterations. In what follows we refer to state as either + a tuple of entities that represent an actual state, or a list of arguments + of the corresponding types. + + The inputs and outputs of the callables representing the loop blocks are not + explicit - instead, these functions must use nonlocal/global for side effects. + The inputs and outputs are instead controlled by the set_state/get_state + functions. + + Args: + test: Callable with boolean return type. The loop condition. + body: Callable representing the actual loop body. + get_state: Additional callable which can capture additional state (such as + the values of composite symbols). This is only useful when staging the + loop. + set_state: Additional callable which save values captured by get_state back + into the Python environment. This is only useful when staging the loop. + symbol_names: Tuple containing the names of all loop variables. + opts: Optional dict of extra loop parameters. + + Returns: + Tuple containing the final state. + """ + + # Evaluate the initial test once in order to do the dispatch. The evaluation + # is isolated to minimize unwanted side effects. + # TODO(mdan): Do a full iteration - some state types might lower to Tensor. + with func_graph.FuncGraph('tmp').as_default(): + init_test = test() + + # TensorFlow: Multiple evaluations are acceptable in this case, so we're fine + # with the re-evaluation of `test` that `_tf_while_stmt` will make. + if tensors.is_dense_tensor(init_test): + _tf_while_stmt(test, body, get_state, set_state, symbol_names, opts) + return + + # Normal Python: We already consumed one evaluation of `test`; consistently, + # unroll one iteration before dispatching to a normal loop. + # TODO(mdan): Push the "init_test" value via opts into _py_while_stmt? + if not init_test: + return + body() + + _py_while_stmt(test, body, get_state, set_state, opts) + + +class _PythonLoopChecker(object): + """Verifies Python loops for TF-specific limits.""" + + __slots__ = ( + 'iterations', + 'check_inefficient_unroll', + 'check_op_count_after_iteration', + 'ops_before_iteration', + ) + + def __init__(self): + self.iterations = 1 + self.check_inefficient_unroll = WARN_INEFFICIENT_UNROLL + + # Triggered when we decided to test the op counts. + self.check_op_count_after_iteration = False + + def _get_ops(self): + return set(ops.get_default_graph().get_operations()) + + def _check_unroll_limits(self): + if self.iterations > PYTHON_MAX_ITERATIONS: + raise ValueError('iteration limit exceeded') + + def _stop_checking_inefficient_unroll(self): + self.check_inefficient_unroll = False + self.check_op_count_after_iteration = False + self.ops_before_iteration = None + + def _verify_inefficient_unroll(self): + """Checks for possibly-inefficient creation of ops in a Python loop.""" + assert self.ops_before_iteration is not None + ops_after_iteration = self._get_ops() + new_ops = tuple( + op for op in ops_after_iteration if op not in self.ops_before_iteration) + + if len(new_ops) < INEFFICIENT_UNROLL_MIN_OPS: + return False + + ag_logging.warning( + 'Large unrolled loop detected. Did you mean to use a TF loop?' + ' The following ops were created after iteration %s: %s' + '\nSee' + ' https://github.com/tensorflow/tensorflow/blob/master/' + 'tensorflow/python/autograph/g3doc/reference/common_errors.md' + '#warning-large-unrolled-loop-detected' + '\n' + 'Location:' + '\n%s' + '', self.iterations, new_ops, '\n'.join(traceback.format_stack())) + return True + + def before_iteration(self): + """Called before each iteration in a Python loop.""" + if (self.check_inefficient_unroll and + self.iterations > INEFFICIENT_UNROLL_MIN_ITERATIONS): + self.ops_before_iteration = self._get_ops() + self.check_op_count_after_iteration = True + + def after_iteration(self): + """Called after each iteration in a Python loop.""" + self.iterations += 1 + + self._check_unroll_limits() + + if self.check_op_count_after_iteration: + did_warn = self._verify_inefficient_unroll() + if did_warn: + self._stop_checking_inefficient_unroll() # Only warn once. + elif self.iterations > INEFFICIENT_UNROLL_MIN_ITERATIONS + 3: + # Once deciding to check the op counts, only do it for a few iterations. + self._stop_checking_inefficient_unroll() + + +def _py_while_stmt(test, body, get_state, set_state, opts): + """Overload of while_stmt that executes a Python while loop.""" + del opts, get_state, set_state + + if __debug__: + checker = _PythonLoopChecker() + before_iteration = checker.before_iteration + after_iteration = checker.after_iteration + before_iteration() + + original_body = body + def protected_body(): + original_body() + after_iteration() + before_iteration() + body = protected_body + + def guarded_test(): + test_result = test() + try: + # Note: Using try/except and not tensor_util.is_tf_type to avoid + # performance degradation. + return bool(test_result) + except errors_impl.OperatorNotAllowedInGraphError as e: + ag_logging.log( + 1, + 'Caught error while evaluating while loop condition', + exc_info=True) + # TODO(mdan): distinguish between these two cases. + raise NotImplementedError( + 'The condition of while loop started as non-Tensor, then changed to' + ' Tensor. This may happen either because variables changed type, or' + ' when a break or return statement inside the loop depends on a' + ' Tensor condition. In both cases, changing to a TF loop should' + ' remove the error.\nSee ' + 'https://github.com/tensorflow/tensorflow/blob/master/tensorflow/' + 'python/autograph/g3doc/reference/limitations.md' + '#consistency-of-control-flow-types for more info.') from e + while guarded_test(): + body() + + +def _shape_invariants_mapping_to_positional_list(mapping, keys): + # The keys are not expected to be hashable. + mapping = {id(k): (k, v) for k, v in mapping} + result = [] + for k in keys: + map_key, map_val = mapping.get(id(k), (None, None)) + result.append( + map_val if map_key is k else nest.map_structure(lambda _: None, k)) + return tuple(result) + + +# Textual description of what a legal TF loop variable is. This description +# summarizes types that _placeholder_value below can handle. Keep the two +# together and in sync. +LEGAL_LOOP_TYPES = 'Tensor, int, float, bool or a list, tuple or dict thereof' + + +def _placeholder_value(like, shape_invariant, original=None): + """Constructs a (dummy) placeholder value for a loop-initialized variable. + + Args: + like: Any object. The value created by the first iteration of the loop. If a + Python scalar, the placeholder will be the zero value of that type. If a + Tensor, the placeholder will be a zero tensor of matching shape and dtype. + If a list, dict or tuple, the placeholder will be an identical structure + of placeholders. + shape_invariant: The shape invariant specified by the user (or None, if + nothing was specified) for the respective variable. + original: Any object. The value of the variable prior to entering the loop. + Typically, this is one of the special "Undefined" value, because that's + when a placeholder is needed. + + Returns: + Either a zero value of structure, shape and dtype matching 'like', or + 'original', if no such zero value could be created. + """ + if like is None: + return original, None + + elif isinstance(like, (variables.Undefined, variables.UndefinedReturnValue)): + return original, None + + elif isinstance(like, (int, float, bool)): + return type(like)(0), None + + elif tensor_util.is_tf_type(like): + + like_shape = shape_invariant if shape_invariant is not None else like.shape + if like_shape is None or like_shape.rank is None: + return array_ops.zeros((), like.dtype), like_shape + + # If the shape contains dynamic values, set the corresponding starting + # dimension to either zero or what the shape invariant specified. + placeholder_shape = [] + has_dynamic_dims = False + for s, i in zip(like.shape, like_shape): + if i is None: + like_dim = 0 + elif isinstance(i, tensor_shape.Dimension): + if i.value is None: + like_dim = 0 + else: + like_dim = i.value + else: + like_dim = i + + if s is None: + placeholder_shape.append(like_dim) + has_dynamic_dims = True + elif isinstance(s, tensor_shape.Dimension): + if s.value is None: + placeholder_shape.append(like_dim) + has_dynamic_dims = True + else: + placeholder_shape.append(s.value) + else: + placeholder_shape.append(s) + + if has_dynamic_dims: + invariant = like_shape + else: + invariant = None + + return array_ops.zeros(placeholder_shape, like.dtype), invariant + + elif isinstance(like, (list, tuple, dict)): + if shape_invariant is None: + zipped = nest.map_structure(lambda v: _placeholder_value(v, None), + nest.flatten(like)) + else: + zipped = nest.map_structure(_placeholder_value, nest.flatten(like), + nest.flatten(shape_invariant)) + vals, invars = zip(*zipped) + return (nest.pack_sequence_as(like, + vals), nest.pack_sequence_as(like, invars)) + + # This is to be caught by _try_handling_undefineds, to give more context. + raise TypeError( + "Found an unsupported type '{}' while creating placeholder for {}." + ' Supported types include Tensor, int, float, bool, list, tuple or dict.' + .format(type(like).__name__, like)) + + +def _try_handling_undefineds(body, get_state, set_state, init_vars, nulls, + shape_invariants, symbol_names): + """Makes a best-effort attempt to substitute undefineds with placeholders. + + Note: this substitution requires two things to happen: + 1. the types of loop variables could be inferred (usually by staging one + iteration) + 2. these types could be replaced by placeholders (e.g. zero values, for + tensors). + + Args: + body: a function representing the loop body. See while_stmt. + get_state: state getter for the loop statement. See while_stmt. + set_state: state getter for the loop statement. See while_stmt. + init_vars: loop variables before entering the loop. See while_stmt. + nulls: list of boolean flags indicating whether the corresponding loop var + is None or undefined. + shape_invariants: user-specified shape invariant for each loop variable. + symbol_names: list of loop variable names. See while_stmt. + + Returns: + A tuple (success, new_init_vars, extra_shape_invariants, failure_message): + * success is a boolean flag indicating + whether types could be successfully inferred (step 1 above) + * new_init_vars contains the loop vars, with None or undefined values + replaced by default values, where possible (step 2 above) + * extra_shape_invariants contains shape invariants that would be needed + by while_stmt, for instance if the placeholder values had a shape + different from the corresponding loop outputs + """ + state_modified = False + first_iter_vars = None + failure_message = None + + try: + # Stage an iteration of the loop body in a temporary graph. + with func_graph.FuncGraph('tmp').as_default(): + # This call to set_state helps report nicer error messages when symbols + # are inconsistently used. + # Another complication is that non_tensor values will be autocast to + # Tensor by while_loop, and their static value lost. So we need to account + # that here. + def autocast_to_tensor(v): + if isinstance( + v, (int, float, bool, str, list, tuple, np.ndarray, np.generic)): + init_val = tensor_conversion.convert_to_tensor_v2(v) + return array_ops.placeholder(init_val.dtype, init_val.shape) + return v + autocast_init_vars = nest.map_structure(autocast_to_tensor, init_vars) + set_state(autocast_init_vars) + state_modified = True + + body() + first_iter_vars = get_state() + + # Note: the actual placeholder value doesn't matter, because as the + # staging proved, it will be replaced by an actual value before being + # read. + inits_and_invariants = tuple( + (_placeholder_value(iv, i, v) if n else (v, None)) + for v, n, iv, i in zip(init_vars, nulls, first_iter_vars, + shape_invariants)) + init_vars, extra_shape_invariants = zip(*inits_and_invariants) + success = True + + except (UnboundLocalError, TypeError, ValueError, KeyError): + ag_logging.log(1, 'Caught error while staging loop body', exc_info=True) + # Fall back to the old functionality. It will likely result in an input + # validation failure. + exc = sys.exc_info() + failure_message = ( + 'Note: AutoGraph tried to define it automatically, but ran into a' + ' {}: {}'.format(exc[0].__name__, exc[1])) + + finally: + if state_modified: + set_state(init_vars) + + # This check runs regardless, in case we captured non-Tensor inputs. + verify_loop_init_vars( + init_vars, symbol_names, first_iter_vars, extra_message=failure_message) + + return success, init_vars, extra_shape_invariants + + +def _runtime_zero_iterations_errmsg(symbol_names, nulls, init_vars): + """Creates an error message asking for the loop to iterate at least once.""" + var_names = [] + for sn, n, v in zip(symbol_names, nulls, init_vars): + if not n: + continue + if isinstance(v, variables.UndefinedReturnValue): + var_names.append('the function return value') + else: + var_names.append(sn) + var_names = ', '.join(var_names) + return 'loop must iterate at least once to initialize {}'.format(var_names) + + +def _tf_while_stmt(test, body, get_state, set_state, symbol_names, opts): + """Overload of while_stmt that stages a TF while_stmt.""" + init_vars = get_state() + orig_init_vars = init_vars + + nulls = tuple(_is_none_or_undef(v) for v in init_vars) + if any(nulls): + shape_invars_by_init_vals = { + id(v): i for v, i in opts.get('shape_invariants', ()) + } + shape_invariants = tuple( + shape_invars_by_init_vals.get(id(v), None) for v in orig_init_vars) + (require_one_iteration, init_vars, + extra_shape_invariants) = _try_handling_undefineds(body, get_state, + set_state, init_vars, + nulls, shape_invariants, + symbol_names) + else: + require_one_iteration = False + + if require_one_iteration: + merged_shape_invariants = dict(shape_invars_by_init_vals) + # This has two roles: + # 1. Shape invariants are remapped from the old init vars to the new ones. + # 2. Any new shape invariants created by the init vars are kept, but only + # if the user didn't already specify some. + for v, nv, ni in zip(orig_init_vars, init_vars, extra_shape_invariants): + merged_invariant = merged_shape_invariants.get(id(v), ni) + if merged_invariant is not None: + merged_shape_invariants[id(nv)] = merged_invariant + merged_shape_invariants = tuple((nv, merged_shape_invariants[id(nv)]) + for nv in init_vars + if id(nv) in merged_shape_invariants) + if merged_shape_invariants: + opts = dict(**opts) + opts['shape_invariants'] = merged_shape_invariants + + def aug_test(*loop_vars): + if require_one_iteration: + loop_vars = loop_vars[1:] + + set_state(loop_vars) + return _verify_tf_condition(test(), 'while loop') + + def aug_body(*loop_vars): + if require_one_iteration: + loop_vars = loop_vars[1:] + + set_state(loop_vars) + body() + new_loop_vars = get_state() + verify_tf_loop_vars( + init_vars, loop_vars, new_loop_vars, symbol_names, opts) + + if require_one_iteration: + new_loop_vars = (True,) + new_loop_vars + + return new_loop_vars + + if 'shape_invariants' in opts: + opts['shape_invariants'] = _shape_invariants_mapping_to_positional_list( + opts['shape_invariants'], init_vars) + + while_loop_opts = dict(opts) + while_loop_opts.pop('iterate_names', None) + + # Non-v2 while_loop unpacks the results when there is only one return value. + # This enforces consistency across versions. + while_loop_opts['return_same_structure'] = True + + if require_one_iteration: + aug_init_vars = (False,) + init_vars + if 'shape_invariants' in while_loop_opts: + while_loop_opts['shape_invariants'] = ( + (None,) + while_loop_opts['shape_invariants']) + else: + aug_init_vars = init_vars + + final_loop_vars = while_loop.while_loop(aug_test, aug_body, aug_init_vars, + **while_loop_opts) + + if require_one_iteration: + with ops.control_dependencies([ + control_flow_assert.Assert(final_loop_vars[0], [ + _runtime_zero_iterations_errmsg(symbol_names, nulls, orig_init_vars) + ]) + ]): + final_loop_vars = nest.map_structure( + lambda v: (array_ops.identity(v) if tensor_util.is_tf_type(v) else v), + final_loop_vars[1:], + ) + + set_state(final_loop_vars) + + +def if_stmt(cond, body, orelse, get_state, set_state, symbol_names, nouts): + """Functional form of an if statement. + + The conditional operates on a state, which includes all symbols whose values + are a function of the branch taken. + + For example, given the code below that calculates the abs function: + + ``` + x = 1 + if x > 0: + x = -x + ``` + + The state is represented by the variable `x`. The `body, `orelse` and + `set_state` functions must bind to the original `x` symbol, using `nonlocal`. + + The inputs and outputs of the callables representing the loop blocks are not + explicit - instead, these functions must use nonlocal/global for side effects. + The inputs and outputs are instead controlled by the set_state/get_state + functions. + + Args: + cond: Boolean. + body: Callable representing the main block of the conditional. + orelse: Callable representing the else block of the conditional. + get_state: Function that returns a tuple containing the values of all + composite symbols modified within the conditional. This allows access to + state that branches may mutate through side effects. This function is not + needed and should not be called when dispatching to code matching Python's + default semantics. This is useful for checkpointing to avoid unintended + side-effects when staging requires evaluating all code-paths. + set_state: Function to set the values of all composite symbols modified + within the conditional. This is the complement to get_state, used to + restore checkpointed values. The single argument a tuple containing values + for each composite symbol that may be modified in a branch of the + conditional. The is usually the result of a call to get_state. + symbol_names: Tuple containing basic loop var names. + nouts: Number of variables output by the statement. Vars which are not + outputs will not be passed through staged control flow such as tf.cond. + This includes variables that are defined before the conditional, but are + not used after it. + """ + # Note: tf.cond doesn't support SparseTensor. + if tensors.is_dense_tensor(cond): + _tf_if_stmt(cond, body, orelse, get_state, set_state, symbol_names, nouts) + else: + _py_if_stmt(cond, body, orelse) + + +def _tf_if_stmt( + cond, body, orelse, get_state, set_state, symbol_names, nouts): + """Overload of if_stmt that stages a TF cond.""" + cond = _verify_tf_condition(cond, 'if statement') + + if not nouts: + prev_get_state, prev_set_state = get_state, set_state + # Control flow V1 wants at least one output. + get_state = lambda: (0,) + prev_get_state() + set_state = lambda v: prev_set_state(v[1:]) + symbol_names += ('',) + nouts = 1 + + init_vars = get_state() + + # TODO(mdan): Use nonlocal once we no longer need to support py2. + new_body_vars_ = [None] + new_orelse_vars_ = [None] + + def aug_body(): + set_state(init_vars) + body() + new_body_vars = get_state() + new_body_vars = new_body_vars[:nouts] + new_body_vars_[0] = new_body_vars + _verify_tf_cond_branch_vars(new_body_vars, symbol_names, 'main') + if new_orelse_vars_[0] is not None: + _verify_tf_cond_vars(new_body_vars, new_orelse_vars_[0], symbol_names) + return new_body_vars + + def aug_orelse(): + set_state(init_vars) + orelse() + new_orelse_vars = get_state() + new_orelse_vars = new_orelse_vars[:nouts] + new_orelse_vars_[0] = new_orelse_vars + _verify_tf_cond_branch_vars(new_orelse_vars, symbol_names, 'else') + if new_body_vars_[0] is not None: + _verify_tf_cond_vars(new_body_vars_[0], new_orelse_vars, symbol_names) + return new_orelse_vars + + final_cond_vars = tf_cond.cond( + cond, aug_body, aug_orelse, strict=True) + final_cond_vars = final_cond_vars + init_vars[nouts:] + + set_state(final_cond_vars) + + +def _py_if_stmt(cond, body, orelse): + """Overload of if_stmt that executes a Python if statement.""" + return body() if cond else orelse() diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/data_structures.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/data_structures.py new file mode 100644 index 0000000000000000000000000000000000000000..375d3179ab7291a4ef2ce1aacfc6b005c1113d3a --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/data_structures.py @@ -0,0 +1,347 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Operators specific to data structures: list append, subscripts, etc.""" + +import collections + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import cond +from tensorflow.python.ops import list_ops +from tensorflow.python.ops import tensor_array_ops + + +# TODO(mdan): Once control flow supports objects, repackage as a class. + + +def new_list(iterable=None): + """The list constructor. + + Args: + iterable: Optional elements to fill the list with. + + Returns: + A list-like object. The exact return value depends on the initial elements. + """ + if iterable: + elements = tuple(iterable) + else: + elements = () + + if elements: + # When the list contains elements, it is assumed to be a "Python" lvalue + # list. + return _py_list_new(elements) + return tf_tensor_list_new(elements) + + +def tf_tensor_array_new(elements, element_dtype=None, element_shape=None): + """Overload of new_list that stages a Tensor list creation.""" + elements = tuple(ops.convert_to_tensor(el) for el in elements) + + all_dtypes = set(el.dtype for el in elements) + if len(all_dtypes) == 1: + inferred_dtype, = tuple(all_dtypes) + if element_dtype is not None and element_dtype != inferred_dtype: + raise ValueError( + 'incompatible dtype; specified: {}, inferred from {}: {}'.format( + element_dtype, elements, inferred_dtype)) + elif len(all_dtypes) > 1: + raise ValueError( + 'TensorArray requires all elements to have the same dtype:' + ' {}'.format(elements)) + else: + if element_dtype is None: + raise ValueError('dtype is required to create an empty TensorArray') + + all_shapes = set(tuple(el.shape.as_list()) for el in elements) + if len(all_shapes) == 1: + inferred_shape, = tuple(all_shapes) + if element_shape is not None and element_shape != inferred_shape: + raise ValueError( + 'incompatible shape; specified: {}, inferred from {}: {}'.format( + element_shape, elements, inferred_shape)) + elif len(all_shapes) > 1: + raise ValueError( + 'TensorArray requires all elements to have the same shape:' + ' {}'.format(elements)) + # TODO(mdan): We may want to allow different shapes with infer_shape=False. + else: + inferred_shape = None + + if element_dtype is None: + element_dtype = inferred_dtype + if element_shape is None: + element_shape = inferred_shape + + l = tensor_array_ops.TensorArray( + dtype=element_dtype, + size=len(elements), + dynamic_size=True, + infer_shape=(element_shape is None), + element_shape=element_shape) + for i, el in enumerate(elements): + l = l.write(i, el) + return l + + +def tf_tensor_list_new(elements, element_dtype=None, element_shape=None): + """Overload of new_list that stages a Tensor list creation.""" + if tensor_util.is_tf_type(elements): + if element_shape is not None: + raise ValueError( + 'element shape may not be specified when creating list from tensor') + element_shape = array_ops.shape(elements)[1:] + l = list_ops.tensor_list_from_tensor(elements, element_shape=element_shape) + return l + + elements = tuple(ops.convert_to_tensor(el) for el in elements) + + all_dtypes = set(el.dtype for el in elements) + if len(all_dtypes) == 1: + inferred_dtype = tuple(all_dtypes)[0] + if element_dtype is not None and element_dtype != inferred_dtype: + raise ValueError( + 'incompatible dtype; specified: {}, inferred from {}: {}'.format( + element_dtype, elements, inferred_dtype)) + elif all_dtypes: + # Heterogeneous lists are ok. + if element_dtype is not None: + raise ValueError( + 'specified dtype {} is inconsistent with that of elements {}'.format( + element_dtype, elements)) + inferred_dtype = dtypes.variant + else: + inferred_dtype = dtypes.variant + + all_shapes = set(tuple(el.shape.as_list()) for el in elements) + if len(all_shapes) == 1: + inferred_shape = array_ops.shape(elements[0]) + if element_shape is not None and element_shape != inferred_shape: + raise ValueError( + 'incompatible shape; specified: {}, inferred from {}: {}'.format( + element_shape, elements, inferred_shape)) + elif all_shapes: + # Heterogeneous lists are ok. + if element_shape is not None: + raise ValueError( + 'specified shape {} is inconsistent with that of elements {}'.format( + element_shape, elements)) + inferred_shape = constant_op.constant(-1) # unknown shape, by convention + else: + inferred_shape = constant_op.constant(-1) # unknown shape, by convention + + if element_dtype is None: + element_dtype = inferred_dtype + if element_shape is None: + element_shape = inferred_shape + + element_shape = ops.convert_to_tensor(element_shape, dtype=dtypes.int32) + l = list_ops.empty_tensor_list( + element_shape=element_shape, element_dtype=element_dtype) + for el in elements: + l = list_ops.tensor_list_push_back(l, el) + return l + + +def _py_list_new(elements): + """Overload of new_list that creates a Python list.""" + return list(elements) + + +def list_append(list_, x): + """The list append function. + + Note: it is unspecified where list_ will be mutated or not. If list_ is + a TensorFlow entity, it will not be typically mutated. If list_ is a plain + list, it will be. In general, if the list is mutated then the return value + should point to the original entity. + + Args: + list_: An entity that supports append semantics. + x: The element to append. + + Returns: + Same as list_, after the append was performed. + + Raises: + ValueError: if list_ is not of a known list-like type. + """ + if isinstance(list_, tensor_array_ops.TensorArray): + return _tf_tensorarray_append(list_, x) + elif tensor_util.is_tf_type(list_): + if list_.dtype == dtypes.variant: + return _tf_tensor_list_append(list_, x) + else: + raise ValueError( + 'tensor lists are expected to be Tensors with dtype=tf.variant,' + ' instead found %s' % list_) + else: + return _py_list_append(list_, x) + + +def _tf_tensor_list_append(list_, x): + """Overload of list_append that stages a Tensor list write.""" + def empty_list_of_elements_like_x(): + tensor_x = ops.convert_to_tensor(x) + return list_ops.empty_tensor_list( + element_shape=array_ops.shape(tensor_x), + element_dtype=tensor_x.dtype) + + list_ = cond.cond( + list_ops.tensor_list_length(list_) > 0, + lambda: list_, + empty_list_of_elements_like_x, + ) + return list_ops.tensor_list_push_back(list_, x) + + +def _tf_tensorarray_append(list_, x): + """Overload of list_append that stages a TensorArray write.""" + return list_.write(list_.size(), x) + + +def _py_list_append(list_, x): + """Overload of list_append that executes a Python list append.""" + # Revert to the original call. + list_.append(x) + return list_ + + +class ListPopOpts( + collections.namedtuple('ListPopOpts', ('element_dtype', 'element_shape'))): + pass + + +def list_pop(list_, i, opts): + """The list pop function. + + Note: it is unspecified where list_ will be mutated or not. If list_ is + a TensorFlow entity, it will not be typically mutated. If list_ is a plain + list, it will be. In general, if the list is mutated then the return value + should point to the original entity. + + Args: + list_: An entity that supports pop semantics. + i: Optional index to pop from. May be None. + opts: A ListPopOpts. + + Returns: + Tuple (x, out_list_): + out_list_: same as list_, after the removal was performed. + x: the removed element value. + + Raises: + ValueError: if list_ is not of a known list-like type or the operation is + not supported for that type. + """ + assert isinstance(opts, ListPopOpts) + + if isinstance(list_, tensor_array_ops.TensorArray): + raise ValueError('TensorArray does not support item removal') + elif tensor_util.is_tf_type(list_): + if list_.dtype == dtypes.variant: + return _tf_tensor_list_pop(list_, i, opts) + else: + raise ValueError( + 'tensor lists are expected to be Tensors with dtype=tf.variant,' + ' instead found %s' % list_) + else: + return _py_list_pop(list_, i) + + +def _tf_tensor_list_pop(list_, i, opts): + """Overload of list_pop that stages a Tensor list pop.""" + if i is not None: + raise NotImplementedError('tensor lists only support removing from the end') + + if opts.element_dtype is None: + raise ValueError('cannot pop from a list without knowing its element ' + 'type; use set_element_type to annotate it') + if opts.element_shape is None: + raise ValueError('cannot pop from a list without knowing its element ' + 'shape; use set_element_type to annotate it') + list_out, x = list_ops.tensor_list_pop_back( + list_, element_dtype=opts.element_dtype) + x.set_shape(opts.element_shape) + return list_out, x + + +def _py_list_pop(list_, i): + """Overload of list_pop that executes a Python list append.""" + if i is None: + x = list_.pop() + else: + x = list_.pop(i) + return list_, x + + +# TODO(mdan): Look into reducing duplication between all these containers. +class ListStackOpts( + collections.namedtuple('ListStackOpts', + ('element_dtype', 'original_call'))): + pass + + +def list_stack(list_, opts): + """The list stack function. + + This does not have a direct correspondent in Python. The closest idiom to + this is tf.append or np.stack. It's different from those in the sense that it + accepts a Tensor list, rather than a list of tensors. It can also accept + TensorArray. When the target is anything else, the dispatcher will rely on + ctx.original_call for fallback. + + Args: + list_: An entity that supports append semantics. + opts: A ListStackOpts object. + + Returns: + The output of the stack operation, typically a Tensor. + """ + assert isinstance(opts, ListStackOpts) + + if isinstance(list_, tensor_array_ops.TensorArray): + return _tf_tensorarray_stack(list_) + elif tensor_util.is_tf_type(list_): + if list_.dtype == dtypes.variant: + return _tf_tensor_list_stack(list_, opts) + else: + # No-op for primitive Tensor arguments. + return list_ + else: + return _py_list_stack(list_, opts) + + +def _tf_tensorarray_stack(list_): + """Overload of list_stack that stages a TensorArray stack.""" + return list_.stack() + + +def _tf_tensor_list_stack(list_, opts): + """Overload of list_stack that stages a Tensor list write.""" + if opts.element_dtype is None: + raise ValueError('cannot stack a list without knowing its element type;' + ' use set_element_type to annotate it') + return list_ops.tensor_list_stack(list_, element_dtype=opts.element_dtype) + + +def _py_list_stack(list_, opts): + """Overload of list_stack that executes a Python list append.""" + # Revert to the original call. + return opts.original_call(list_) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/exceptions.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..763a6da90127159cb907cbe361daf4c5ab35ab32 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/exceptions.py @@ -0,0 +1,82 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Exception handling statements: assert, etc.""" + +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import control_flow_assert +from tensorflow.python.util import tf_inspect + + +def assert_stmt(expression1, expression2): + """Functional form of an assert statement. + + This follows the semantics of the Python assert statement, however the + concrete implementations may deviate from it. See the respective + implementation for details. + + In general, the assert statement should not be used for control flow. + Furthermore, it is encouraged that the assertion expressions should not have + side effects. + + Args: + expression1: Any + expression2: Callable[[], Any], returns the expression to include in the + error message when expression1 evaluates to False. When expression1 is + True, the result of expression2 will not be evaluated, however, + expression2 itself may be evaluated in some implementations. + + Returns: + Any, implementation-dependent. + + Raises: + ValueError: if any arguments are illegal. + """ + if not callable(expression2): + raise ValueError('{} must be a callable'.format(expression2)) + args, _, keywords, _ = tf_inspect.getargspec(expression2) + if args or keywords: + raise ValueError('{} may not have any arguments'.format(expression2)) + + if tensor_util.is_tf_type(expression1): + return _tf_assert_stmt(expression1, expression2) + else: + return _py_assert_stmt(expression1, expression2) + + +def _tf_assert_stmt(expression1, expression2): + """Overload of assert_stmt that stages a TF Assert. + + This implementation deviates from Python semantics as follows: + (1) the assertion is verified regardless of the state of __debug__ + (2) on assertion failure, the graph execution will fail with + tensorflow.errors.ValueError, rather than AssertionError. + + Args: + expression1: tensorflow.Tensor, must evaluate to a tf.bool scalar + expression2: Callable[[], Union[tensorflow.Tensor, List[tensorflow.Tensor]]] + + Returns: + tensorflow.Operation + """ + expression2_tensors = expression2() + if not isinstance(expression2_tensors, list): + expression2_tensors = [expression2_tensors] + return control_flow_assert.Assert(expression1, expression2_tensors) + + +def _py_assert_stmt(expression1, expression2): + """Overload of assert_stmt that executes a Python assert statement.""" + assert expression1, expression2() + return None diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/logical.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/logical.py new file mode 100644 index 0000000000000000000000000000000000000000..73608807223ef093b28de839fb8a6db6b5f4d47a --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/logical.py @@ -0,0 +1,96 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Logical boolean operators: not, and, or.""" + +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import cond as tf_cond +from tensorflow.python.ops import gen_math_ops + + +def not_(a): + """Functional form of "not".""" + if tensor_util.is_tf_type(a): + return _tf_not(a) + return _py_not(a) + + +def _tf_not(a): + """Implementation of the "not_" operator for TensorFlow.""" + return gen_math_ops.logical_not(a) + + +def _py_not(a): + """Default Python implementation of the "not_" operator.""" + return not a + + +def and_(a, b): + """Functional form of "and". Uses lazy evaluation semantics.""" + a_val = a() + if tensor_util.is_tf_type(a_val): + return _tf_lazy_and(a_val, b) + return _py_lazy_and(a_val, b) + + +def _tf_lazy_and(cond, b): + """Lazy-eval equivalent of "and" for Tensors.""" + # TODO(mdan): Enforce cond is scalar here? + return tf_cond.cond(cond, b, lambda: cond) + + +def _py_lazy_and(cond, b): + """Lazy-eval equivalent of "and" in Python.""" + return cond and b() + + +def or_(a, b): + """Functional form of "or". Uses lazy evaluation semantics.""" + a_val = a() + if tensor_util.is_tf_type(a_val): + return _tf_lazy_or(a_val, b) + return _py_lazy_or(a_val, b) + + +def _tf_lazy_or(cond, b): + """Lazy-eval equivalent of "or" for Tensors.""" + # TODO(mdan): Enforce cond is scalar here? + return tf_cond.cond(cond, lambda: cond, b) + + +def _py_lazy_or(cond, b): + """Lazy-eval equivalent of "or" in Python.""" + return cond or b() + + +def eq(a, b): + """Functional form of "equal".""" + if tensor_util.is_tf_type(a) or tensor_util.is_tf_type(b): + return _tf_equal(a, b) + return _py_equal(a, b) + + +def _tf_equal(a, b): + """Overload of "equal" for Tensors.""" + return gen_math_ops.equal(a, b) + + +def _py_equal(a, b): + """Overload of "equal" that falls back to Python's default implementation.""" + return a == b + + +def not_eq(a, b): + """Functional form of "not-equal".""" + return not_(eq(a, b)) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/py_builtins.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/py_builtins.py new file mode 100644 index 0000000000000000000000000000000000000000..96ee2d53a92bc5c937dcca9c309740ecb1f5068c --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/py_builtins.py @@ -0,0 +1,533 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Operators corresponding to Python builtin functions. + +List of built-in functions: https://docs.python.org/3/library/functions.html +""" + +import inspect + +from tensorflow.python.autograph.utils import tensors +from tensorflow.python.autograph.utils import type_registry +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import cond +from tensorflow.python.ops import control_flow_assert +from tensorflow.python.ops import gen_parsing_ops +from tensorflow.python.ops import gen_string_ops +from tensorflow.python.ops import list_ops +from tensorflow.python.ops import math_ops + + +UNSPECIFIED = object() + +abs_registry = type_registry.TypeRegistry() +len_registry = type_registry.TypeRegistry() +print_registry = type_registry.TypeRegistry() +enumerate_registry = type_registry.TypeRegistry() +zip_registry = type_registry.TypeRegistry() +map_registry = type_registry.TypeRegistry() +filter_registry = type_registry.TypeRegistry() +any_registry = type_registry.TypeRegistry() +all_registry = type_registry.TypeRegistry() +sorted_registry = type_registry.TypeRegistry() +next_registry = type_registry.TypeRegistry() + + +def registry_lookup(reg, obj): + try: + return reg.lookup(obj) + except LookupError: + pass + return None + + +def overload_of(f): + if f in SUPPORTED_BUILTINS: + return BUILTIN_FUNCTIONS_MAP[f.__name__] + return f + + +def _find_originating_frame(caller_fn_scope, innermost=True): + """Locates the frame in which `caller_fn_scope` was defined.""" + ctx_frame = inspect.currentframe() + result = None + while ctx_frame is not None: + # Note it should not be normally possible to get false positives this way + # because the function scope object is not accessible to user code (barring + # call stack introspection). + if ctx_frame.f_locals.get(caller_fn_scope.name, None) is caller_fn_scope: + result = ctx_frame + if innermost: + break + ctx_frame = ctx_frame.f_back + + assert result is not None, ( + 'the conversion process should ensure the caller_fn_scope is always' + ' found somewhere on the call stack') + + return result + + +def locals_in_original_context(caller_fn_scope): + """Executes the locals function in the context of a specified function.""" + return _find_originating_frame(caller_fn_scope, innermost=True).f_locals + + +def globals_in_original_context(caller_fn_scope): + """Executes the locals function in the context of a specified function.""" + return _find_originating_frame(caller_fn_scope, innermost=True).f_globals + + +def eval_in_original_context(f, args, caller_fn_scope): + """Executes the eval function in the context of a specified function.""" + # When control flow is rewritten using functions, eval should use the + # variables found in the same block where it was called. That is equivalent + # to the innermost function call. + ctx_frame = _find_originating_frame(caller_fn_scope, innermost=True) + + args = ( + args[0], + ctx_frame.f_globals if len(args) < 2 else args[1], + ctx_frame.f_locals if len(args) < 3 else args[2], + ) + return f(*args) + + +def super_in_original_context(f, args, caller_fn_scope): + """Executes the super function in the context of a specified function. + + See https://docs.python.org/3/library/functions.html#super for the exact + details + + Args: + f: Callable, typically the super builtin + args: List[Any], the original call arguments + caller_fn_scope: Optional[function_wrappers.FunctionScope], the function + scope of the converted function in which this call was originally made + + Returns: + The result of calling `f` as if it was called in the frame indicated by + `caller_fn_scope`. + """ + + # Only the no-arg call is desugared. + if args: + return f(*args) + + # Inner functions seem to include their closure in f_locals, so we need + # to find the outermost frame. + ctx_frame = _find_originating_frame(caller_fn_scope, innermost=False) + + # When super(..) is called without arguments, it looks for __class__ cell + # variable and the first argument passed in the enclosing function according + # to the spec https://www.python.org/dev/peps/pep-3135/ . + # + # We couldn't verify if `inspect.currentframe().f_code.co_varnames[0]` is + # guaranteed to be the first argument from an official doc or PEP, however, + # it's fairly stable and well established: + # - An unofficial community doc mentions it. + # https://python-reference.readthedocs.io/en/latest/docs/code/varnames.html + # - CPython has tests checking that order, which was merged in 2008, and + # unchanged since then. + # https://github.com/python/cpython/blame/2f224a077a83ac9de8a12bb7dcc516642b8176d8/Lib/lib2to3/tests/data/py2_test_grammar.py#L157 + # https://github.com/python/cpython/blame/2f224a077a83ac9de8a12bb7dcc516642b8176d8/Lib/lib2to3/tests/data/py3_test_grammar.py#L192 + # + # Note: the name can be more reliably obtained by inspecting the calling + # function's argspec. + # + # Even though methods can be declared using *args (def method(*args)), + # that pattern is disallowed by super() -- it raises super() no arguments. + # Method definitions using **kwargs are not allowed at all. + # In other words, we can always assume that self is on the first positional + # argument (for correct code). + # + # TODO(mdan): Consider additional checks in case the input code is incorrect. + # For example, the error might be cryptic compared to what super() regularly + # raises. + + type_arg = ctx_frame.f_locals['__class__'] + self_arg_name = ctx_frame.f_code.co_varnames[0] + self_arg = ctx_frame.f_locals[self_arg_name] + return f(type_arg, self_arg) + + +def abs_(x): + abs_override = registry_lookup(abs_registry, x) + if abs_override is not None: + return abs_override(x) + if tensor_util.is_tf_type(x): + return _tf_abs(x) + return _py_abs(x) + + +def _tf_abs(x): + return math_ops.abs(x) + + +def _py_abs(x): + return abs(x) + + +def float_(x=0): + if tensor_util.is_tf_type(x): + return _tf_float(x) + return _py_float(x) + + +def _tf_float(x): + # TODO(mdan): We shouldn't assume float32. + if x.dtype == dtypes.string: + return gen_parsing_ops.string_to_number(x, out_type=dtypes.float32) + return math_ops.cast(x, dtype=dtypes.float32) + + +def _py_float(x): + return float(x) + + +def int_(x=0, base=UNSPECIFIED): + if tensor_util.is_tf_type(x): + return _tf_int(x, base) + return _py_int(x, base) + + +def _tf_int(x, base): + if base not in (10, UNSPECIFIED): + raise NotImplementedError('base {} not supported for int'.format(base)) + + # TODO(mdan): We shouldn't assume int32. + if x.dtype == dtypes.string: + return gen_parsing_ops.string_to_number(x, out_type=dtypes.int32) + return math_ops.cast(x, dtype=dtypes.int32) + + +def _py_int(x, base): + if base is UNSPECIFIED: + return int(x) + return int(x, base) + + +def len_(s): + len_override = registry_lookup(len_registry, s) + if len_override is not None: + return len_override(s) + if tensors.is_tensor_array(s): + return _tf_tensor_array_len(s) + elif tensors.is_tensor_list(s): + return _tf_tensor_list_len(s) + elif tensor_util.is_tf_type(s): + return _tf_tensor_len(s) + return _py_len(s) + + +def _tf_tensor_array_len(s): + return s.size() + + +def _tf_tensor_list_len(s): + return list_ops.tensor_list_length(s) + + +def _tf_tensor_len(s): + """Overload of len_ for Tensor arguments.""" + # Statically shaped tensors: length is known ahead of time. + if s.shape.ndims and s.shape.dims[0].value is not None: + return s.shape.dims[0].value + + # Static shape of unknown dimensions: use dynamic shape but statically + # check that it's a scalar. + shape = array_ops.shape(s) + + assert shape.shape, 'shape tensor of zero size? {}'.format(shape) + + if shape.shape[0] == 0: + raise ValueError( + 'len requires a non-scalar tensor, got one of shape {}'.format(shape)) + + if shape.shape.dims[0].value is not None: + return array_ops.shape(s)[0] + + # Fully dynamic shape: use ops. + rank = array_ops.rank(s) + + def raise_zero_rank_error(): + msg = gen_string_ops.string_join( + ['len requires non-zero rank, got ', + gen_string_ops.as_string(rank)]) + with ops.control_dependencies([control_flow_assert.Assert(False, [msg])]): + return constant_op.constant(0, dtype=dtypes.int32) + + return cond.cond(rank > 0, lambda: array_ops.shape(s)[0], + raise_zero_rank_error) + + +def _py_len(s): + return len(s) + + +def print_(*objects, **kwargs): + """Overload of the print builtin.""" + # Note: Python 2.6 doesn't support explicit keywords after starargs. + unknown_kwargs = tuple( + set(kwargs.keys()) - set(('sep', 'end', 'file', 'flush'))) + if unknown_kwargs: + raise ValueError('invalid keyword arguments: {}'.format(unknown_kwargs)) + + print_fn = _py_print + for x in objects: + print_override = registry_lookup(print_registry, x) + if print_override is not None: # pylint: disable=comparison-with-callable + print_fn = print_override + break + + if print_fn is _py_print: + # If this fails, ops/autograph_ops.py hasn't been imported. + assert not any(tensor_util.is_tf_type(s) for s in objects) + + return print_fn(*objects, **kwargs) + + +def _py_print(*objects, **kwargs): + print(*objects, **kwargs) + + +def min_(*args, **kwargs): + if any(tensor_util.is_tf_type(s) for s in args): + return _tf_min(*args, **kwargs) + return _py_min(*args, **kwargs) + + +def _tf_min(*args, **kwargs): + if len(kwargs): + kwargs_tuple = tuple(set(kwargs.keys())) + raise ValueError('These keyword arguments are ' + 'currently not supported: {}'.format(kwargs_tuple)) + if len(args) == 1: + rank = args[0].shape.rank + if rank == 0: + return args[0] + if rank == 1: + return math_ops.reduce_min(*args, axis=0) + raise ValueError('min(arg) currently support only tensor with rank 1, ' + 'but got a tensor with rank {}'.format(rank)) + for arg in args: + rank = arg.shape.rank + if rank != 0: + raise ValueError('min(arg1, arg2, *args) currently support ' + 'only scalar tensor, but got a tensor ' + 'with shape {}'.format(rank)) + return math_ops.reduce_min(args, axis=0) + + +def _py_min(*args, **kwargs): + return min(*args, **kwargs) + + +def max_(*args, **kwargs): + if any(tensor_util.is_tf_type(s) for s in args): + return _tf_max(*args, **kwargs) + return _py_max(*args, **kwargs) + + +def _tf_max(*args, **kwargs): + if len(kwargs): + kwargs_tuple = tuple(set(kwargs.keys())) + raise ValueError('These keyword arguments are ' + 'currently not supported: {}'.format(kwargs_tuple)) + if len(args) == 1: + rank = args[0].shape.rank + if rank == 0: + return args[0] + if rank == 1: + return math_ops.reduce_max(*args, axis=0) + raise ValueError('max(arg) currently support only tensor with rank 1, ' + 'but got a tensor with rank {}'.format(rank)) + for arg in args: + rank = arg.shape.rank + if rank != 0: + raise ValueError('max(arg1, arg2, *args) currently support ' + 'only scalar tensor, but got a tensor ' + 'with shape {}'.format(rank)) + return math_ops.reduce_max(args, axis=0) + + +def _py_max(*args, **kwargs): + return max(*args, **kwargs) + + +def range_(start_or_stop, stop=UNSPECIFIED, step=UNSPECIFIED): + if any(tensor_util.is_tf_type(s) for s in (start_or_stop, stop, step)): + return _tf_range(start_or_stop, stop, step) + return _py_range(start_or_stop, stop, step) + + +def _tf_range(start_or_stop, stop, step): + """Overload of range_ that generates a TF range tensor.""" + # Note: for static inputs (e.g. constants), tf.range errors out at graph + # construction time, instead of returning an empty tensor. Preventing the + # graph construction error aligns the semantics with Python. + + # TODO(mdan): We should optimize this when a full tensor is not required. + if step is not UNSPECIFIED: + # TODO(mdan): Add argument coercion similar to other cases. + return math_ops.range(start_or_stop, stop, step) + if stop is not UNSPECIFIED: + stop = math_ops.maximum(start_or_stop, stop) + return math_ops.range(start_or_stop, stop) + start_or_stop = math_ops.maximum(start_or_stop, 0) + return math_ops.range(start_or_stop) + + +def _py_range(start_or_stop, stop, step): + if step is not UNSPECIFIED: + return range(start_or_stop, stop, step) + if stop is not UNSPECIFIED: + return range(start_or_stop, stop) + return range(start_or_stop) + + +def enumerate_(s, start=0): + enumerate_override = registry_lookup(enumerate_registry, s) + if enumerate_override is not None: + return enumerate_override(s, start) + return _py_enumerate(s, start) + + +def _py_enumerate(s, start=0): + return enumerate(s, start) + + +def zip_(*iterables, strict=False): + zip_fn = _py_zip + # If the overridden function is not the same across all iterables, use _py_zip + for x in iterables: + zip_override = registry_lookup(zip_registry, x) + if zip_override is None or (zip_fn != _py_zip and zip_override != zip_fn): # pylint: disable=comparison-with-callable + zip_fn = _py_zip + break + zip_fn = zip_override + return zip_fn(*iterables, strict=strict) + + +def _py_zip(*iterables, strict=False): + if strict: + return zip(*iterables, strict=True) + else: + # Python < 3.10 doesn't have `strict` kwarg. + return zip(*iterables) + + +def map_(fn, *iterables): + map_fn = _py_map + # If the overridden function is not the same across all iterables, use _py_map + for x in iterables: + map_override = registry_lookup(map_registry, x) + if map_override is None or (map_fn != _py_map and map_override != map_fn): # pylint: disable=comparison-with-callable + map_fn = _py_map + break + map_fn = map_override + return map_fn(fn, *iterables) + + +def _py_map(fn, *iterables): + return map(fn, *iterables) + + +def next_(iterator, default=UNSPECIFIED): + next_override = registry_lookup(next_registry, iterator) + if next_override is not None: + return next_override(iterator, default) + return next_py(iterator, default) + + +def next_py(iterator, default=UNSPECIFIED): + if default is UNSPECIFIED: + return next(iterator) + return next(iterator, default) + + +def filter_(function, iterable): + filter_override = registry_lookup(filter_registry, iterable) + if filter_override is not None: + return filter_override(function, iterable) + return _py_filter(function, iterable) + + +def _py_filter(function, iterable): + return filter(function, iterable) + + +def any_(iterable): + any_override = registry_lookup(any_registry, iterable) + if any_override is not None: + return any_override(iterable) + return _py_any(iterable) + + +def _py_any(iterable): + return any(iterable) + + +def all_(iterable): + all_override = registry_lookup(all_registry, iterable) + if all_override is not None: + return all_override(iterable) + return _py_all(iterable) + + +def _py_all(iterable): + return all(iterable) + + +def sorted_(iterable, key=UNSPECIFIED, reverse=UNSPECIFIED): + sorted_override = registry_lookup(sorted_registry, iterable) + if sorted_override is not None: + return sorted_override(iterable, key, reverse) + return _py_sorted(iterable, key, reverse) + + +def _py_sorted(iterable, key, reverse): + if key is not UNSPECIFIED and reverse is UNSPECIFIED: + return sorted(iterable, key=key) + if key is UNSPECIFIED and reverse is not UNSPECIFIED: + return sorted(iterable, reverse=reverse) + if key is not UNSPECIFIED and reverse is not UNSPECIFIED: + return sorted(iterable, key=key, reverse=reverse) + return sorted(iterable) + + +SUPPORTED_BUILTINS = (abs, float, int, len, print, range, enumerate, zip, map, + filter, any, all, sorted) + +BUILTIN_FUNCTIONS_MAP = { + 'abs': abs_, + 'any': any_, + 'all': all_, + 'enumerate': enumerate_, + 'filter': filter_, + 'float': float_, + 'int': int_, + 'len': len_, + 'map': map_, + 'next': next_, + 'print': print_, + 'range': range_, + 'sorted': sorted_, + 'zip': zip_, +} diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/slices.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/slices.py new file mode 100644 index 0000000000000000000000000000000000000000..fde5afdc6037c0668973fcb80a0d18ef44612009 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/slices.py @@ -0,0 +1,142 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Operators specific to slicing operations.""" + +import collections + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import gen_array_ops +from tensorflow.python.ops import gen_string_ops +from tensorflow.python.ops import list_ops +from tensorflow.python.ops import tensor_array_ops + + +# TODO(mdan): Support extended slices. + + +class GetItemOpts(collections.namedtuple('GetItemOpts', ('element_dtype',))): + pass + + +def get_item(target, i, opts): + """The slice read operator (i.e. __getitem__). + + Note: it is unspecified whether target will be mutated or not. In general, + if target is mutable (like Python lists), it will be mutated. + + Args: + target: An entity that supports getitem semantics. + i: Index to read from. + opts: A GetItemOpts object. + + Returns: + The read element. + + Raises: + ValueError: if target is not of a supported type. + """ + assert isinstance(opts, GetItemOpts) + + if isinstance(target, tensor_array_ops.TensorArray): + return _tf_tensorarray_get_item(target, i) + elif tensor_util.is_tf_type(target): + if target.dtype == dtypes.variant: + return _tf_tensor_list_get_item(target, i, opts) + elif target.dtype == dtypes.string and target.shape.ndims == 0: + return _tf_tensor_string_get_item(target, i) + else: + return _tf_tensor_get_item(target, i) + else: + return _py_get_item(target, i) + + +def _tf_tensorarray_get_item(target, i): + """Overload of get_item that stages a TensorArray read.""" + return target.read(i) + + +def _tf_tensor_list_get_item(target, i, opts): + """Overload of get_item that stages a Tensor list read.""" + if opts.element_dtype is None: + raise ValueError('cannot retrieve from a list without knowing its ' + 'element type; use set_element_type to annotate it') + x = list_ops.tensor_list_get_item(target, i, element_dtype=opts.element_dtype) + return x + + +def _tf_tensor_get_item(target, i): + """Overload of get_item that stages a Tensor (not Tensor list) read.""" + return target[i] + + +def _tf_tensor_string_get_item(target, i): + """Overload of get_item that stages a Tensor string read.""" + x = gen_string_ops.substr(target, i, 1) + return x + + +def _py_get_item(target, i): + """Overload of get_item that executes a Python list modification.""" + return target[i] + + +def set_item(target, i, x): + """The slice write operator (i.e. __setitem__). + + Note: it is unspecified whether target will be mutated or not. In general, + if target is mutable (like Python lists), it will be mutated. + + Args: + target: An entity that supports setitem semantics. + i: Index to modify. + x: The new element value. + + Returns: + Same as target, after the update was performed. + + Raises: + ValueError: if target is not of a supported type. + """ + if isinstance(target, tensor_array_ops.TensorArray): + return _tf_tensorarray_set_item(target, i, x) + elif tensor_util.is_tf_type(target): + if target.dtype == dtypes.variant: + return _tf_tensor_list_set_item(target, i, x) + else: + return _tf_tensor_set_item(target, i, x) + else: + return _py_set_item(target, i, x) + + +def _tf_tensorarray_set_item(target, i, x): + """Overload of set_item that stages a TensorArray write.""" + return target.write(i, x) + + +def _tf_tensor_list_set_item(target, i, x): + """Overload of set_item that stages a Tensor list update.""" + return list_ops.tensor_list_set_item(target, i, x) + + +def _tf_tensor_set_item(target, i, x): + """Overload of set_item that stages a Tensor scatter update.""" + return gen_array_ops.tensor_scatter_update(target, ((i,),), (x,)) + + +def _py_set_item(target, i, x): + """Overload of set_item that executes a Python list modification.""" + target[i] = x + return target diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/variables.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/variables.py new file mode 100644 index 0000000000000000000000000000000000000000..115c44701741e9c92a52f3fd3f6f2e59a3bc7ec7 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/operators/variables.py @@ -0,0 +1,104 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities used to capture Python idioms.""" + + +def ld(v): + """Load variable operator.""" + if isinstance(v, Undefined): + return v.read() + return v + + +def ldu(load_v, name): + """Load variable operator that returns Undefined when failing to evaluate. + + Note: the name ("load or return undefined") is abbreviated to minimize + the amount of clutter in generated code. + + This variant of `ld` is useful when loading symbols that may be undefined at + runtime, such as composite symbols, and whether they are defined or not cannot + be determined statically. For example `d['a']` is undefined when `d` is an + empty dict. + + Args: + load_v: Lambda that executes the actual read. + name: Human-readable name of the symbol being read. + Returns: + Either the value of the symbol, or Undefined, if the symbol is not fully + defined. + """ + try: + # TODO(mdan): Use locals()/globals() here. + return load_v() + except (KeyError, AttributeError, NameError): + return Undefined(name) + + +class Undefined(object): + """Represents an undefined symbol in Python. + + This is used to reify undefined symbols, which is required to use the + functional form of loops. + Example: + + while n > 0: + n = n - 1 + s = n + return s # Runtime error if n == 0 + + This is valid Python code and will not result in an error as long as n + is positive. The use of this class is to stay as close to Python semantics + as possible for staged code of this nature. + + Converted version of the above showing the possible usage of this class: + + s = Undefined('s') + init_state = (s,) + s = while_loop(cond, body, init_state) + return s # s is an instance of Undefined if the loop never runs + + Attributes: + symbol_name: Text, identifier for the undefined symbol + """ + + __slots__ = ('symbol_name',) + + def __init__(self, symbol_name): + self.symbol_name = symbol_name + + def read(self): + raise UnboundLocalError("'{}' is used before assignment".format( + self.symbol_name)) + + def __repr__(self): + return self.symbol_name + + def __getattribute__(self, name): + try: + # If it's an existing attribute, return it. + return object.__getattribute__(self, name) + except AttributeError: + # Otherwise return Undefined. + return self + + def __getitem__(self, i): + return self + + +# TODO(mdan): Refactor as a RetVal object, aggregating the value and do_return. +class UndefinedReturnValue(object): + """Represents a return value that is undefined.""" + pass diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e35229464f75b8c33887697435ef60ed05066f1a --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility module that contains APIs usable in the generated code.""" + +from tensorflow.python.autograph.utils.context_managers import control_dependency_on_returns +from tensorflow.python.autograph.utils.misc import alias_tensors +from tensorflow.python.autograph.utils.tensor_list import dynamic_list_append diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/__init__.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30a2fd630a6642165d3fd121a9f5fd400fb1b307 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/ag_logging.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/ag_logging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f7b6f8980a2302608292658be48ed8bcf14e084 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/ag_logging.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/context_managers.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/context_managers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ca80ef66f0d95dd93dc1aa0bb5b81a08aef123a Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/context_managers.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/misc.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/misc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85de7891b49ab7a290dc76f4cb6f83e9f8fbfd21 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/misc.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/tensor_list.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/tensor_list.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0871f117d1a74d89a5b47c2d2b8d725eafc351a9 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/tensor_list.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/tensors.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/tensors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22bb9c05153184df5eeece9cc34952236f3c8e08 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/tensors.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/testing.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17f16fb2cef6045b6aa83505bf340e31344c5432 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/testing.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/type_registry.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/type_registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a010f888604693075f501545773600273fe8712 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/__pycache__/type_registry.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/ag_logging.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/ag_logging.py new file mode 100644 index 0000000000000000000000000000000000000000..dd26223d1e9d79a62aed61ff047e5da0b3328e57 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/ag_logging.py @@ -0,0 +1,145 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Logging and debugging utilities.""" + +import os +import sys +import traceback + +# TODO(mdan): Use a custom logger class. +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.util.tf_export import tf_export + +VERBOSITY_VAR_NAME = 'AUTOGRAPH_VERBOSITY' +DEFAULT_VERBOSITY = 0 + +verbosity_level = None # vlog-like. Takes precedence over the env variable. +echo_log_to_stdout = False + +# In interactive Python, logging echo is enabled by default. +if hasattr(sys, 'ps1') or hasattr(sys, 'ps2'): + echo_log_to_stdout = True + + +@tf_export('autograph.set_verbosity') +def set_verbosity(level, alsologtostdout=False): + """Sets the AutoGraph verbosity level. + + _Debug logging in AutoGraph_ + + More verbose logging is useful to enable when filing bug reports or doing + more in-depth debugging. + + There are two means to control the logging verbosity: + + * The `set_verbosity` function + + * The `AUTOGRAPH_VERBOSITY` environment variable + + `set_verbosity` takes precedence over the environment variable. + + For example: + + ```python + import os + import tensorflow as tf + + os.environ['AUTOGRAPH_VERBOSITY'] = '5' + # Verbosity is now 5 + + tf.autograph.set_verbosity(0) + # Verbosity is now 0 + + os.environ['AUTOGRAPH_VERBOSITY'] = '1' + # No effect, because set_verbosity was already called. + ``` + + Logs entries are output to [absl](https://abseil.io)'s + [default output](https://abseil.io/docs/python/guides/logging), + with `INFO` level. + Logs can be mirrored to stdout by using the `alsologtostdout` argument. + Mirroring is enabled by default when Python runs in interactive mode. + + Args: + level: int, the verbosity level; larger values specify increased verbosity; + 0 means no logging. When reporting bugs, it is recommended to set this + value to a larger number, like 10. + alsologtostdout: bool, whether to also output log messages to `sys.stdout`. + """ + global verbosity_level + global echo_log_to_stdout + verbosity_level = level + echo_log_to_stdout = alsologtostdout + + +@tf_export('autograph.trace') +def trace(*args): + """Traces argument information at compilation time. + + `trace` is useful when debugging, and it always executes during the tracing + phase, that is, when the TF graph is constructed. + + _Example usage_ + + ```python + import tensorflow as tf + + for i in tf.range(10): + tf.autograph.trace(i) + # Output: + ``` + + Args: + *args: Arguments to print to `sys.stdout`. + """ + print(*args) + + +def get_verbosity(): + global verbosity_level + if verbosity_level is not None: + return verbosity_level + return int(os.getenv(VERBOSITY_VAR_NAME, DEFAULT_VERBOSITY)) + + +def has_verbosity(level): + return get_verbosity() >= level + + +def _output_to_stdout(msg, *args, **kwargs): + print(msg % args) + if kwargs.get('exc_info', False): + traceback.print_exc() + + +def error(level, msg, *args, **kwargs): + if has_verbosity(level): + logging.error(msg, *args, **kwargs) + if echo_log_to_stdout: + _output_to_stdout('ERROR: ' + msg, *args, **kwargs) + + +def log(level, msg, *args, **kwargs): + if has_verbosity(level): + logging.info(msg, *args, **kwargs) + if echo_log_to_stdout: + _output_to_stdout(msg, *args, **kwargs) + + +def warning(msg, *args, **kwargs): + logging.warning(msg, *args, **kwargs) + if echo_log_to_stdout: + _output_to_stdout('WARNING: ' + msg, *args, **kwargs) + sys.stdout.flush() diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/context_managers.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/context_managers.py new file mode 100644 index 0000000000000000000000000000000000000000..ada33aed15842a19ab822f240b53dbbbd89ef6db --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/context_managers.py @@ -0,0 +1,45 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Various context managers.""" + +import contextlib + +from tensorflow.python.framework import ops +from tensorflow.python.ops import tensor_array_ops + + +def control_dependency_on_returns(return_value): + """Create a TF control dependency on the return values of a function. + + If the function had no return value, a no-op context is returned. + + Args: + return_value: The return value to set as control dependency. + + Returns: + A context manager. + """ + def control_dependency_handle(t): + if isinstance(t, tensor_array_ops.TensorArray): + return t.flow + return t + + if return_value is None: + return contextlib.contextmanager(lambda: (yield))() + # TODO(mdan): Filter to tensor objects. + if not isinstance(return_value, (list, tuple)): + return_value = (return_value,) + return_value = tuple(control_dependency_handle(t) for t in return_value) + return ops.control_dependencies(return_value) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/misc.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..d14b4758aba03f4300a6109a8da21681ce75292b --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/misc.py @@ -0,0 +1,59 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Miscellaneous utilities that don't fit anywhere else.""" + +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gen_math_ops +from tensorflow.python.ops import math_ops + + +def alias_tensors(*args): + """Wraps any Tensor arguments with an identity op. + + Any other argument, including Variables, is returned unchanged. + + Args: + *args: Any arguments. Must contain at least one element. + + Returns: + Same as *args, with Tensor instances replaced as described. + + Raises: + ValueError: If args doesn't meet the requirements. + """ + + def alias_if_tensor(a): + return array_ops.identity(a) if isinstance(a, tensor.Tensor) else a + + # TODO(mdan): Recurse into containers? + # TODO(mdan): Anything we can do about variables? Fake a scope reuse? + if len(args) > 1: + return (alias_if_tensor(a) for a in args) + elif len(args) == 1: + return alias_if_tensor(args[0]) + + raise ValueError('at least one argument required') + + +def get_range_len(start, limit, delta): + dist = ops.convert_to_tensor(limit - start) + unadjusted_len = dist // delta + adjustment = math_ops.cast( + gen_math_ops.not_equal(dist % delta, + array_ops.zeros_like(unadjusted_len)), dist.dtype) + final_len = unadjusted_len + adjustment + return gen_math_ops.maximum(final_len, array_ops.zeros_like(final_len)) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/tensor_list.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/tensor_list.py new file mode 100644 index 0000000000000000000000000000000000000000..c8bdf3ae982982d33c6dd3c901dbeb2a67cb1ba2 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/tensor_list.py @@ -0,0 +1,64 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A typed list in Python.""" + +from tensorflow.python.framework import tensor +from tensorflow.python.ops import list_ops +from tensorflow.python.ops import tensor_array_ops + + +def dynamic_list_append(target, element): + """Converts a list append call inline.""" + if isinstance(target, tensor_array_ops.TensorArray): + return target.write(target.size(), element) + # TODO(mdan): What's the right way to check this? + # TODO(mdan): We may not need this branch. + # It may be possible to use TensorList alone if the loop body will not + # require wrapping it, although we'd have to think about an autoboxing + # mechanism for lists received as parameter. + if isinstance(target, tensor.Tensor): + return list_ops.tensor_list_push_back(target, element) + + # Python targets (including TensorList): fallback to their original append. + target.append(element) + return target + + +class TensorList(object): + """Tensor list wrapper API-compatible with Python built-in list.""" + + def __init__(self, shape, dtype): + self.dtype = dtype + self.shape = shape + self.clear() + + def append(self, value): + self.list_ = list_ops.tensor_list_push_back(self.list_, value) + + def pop(self): + self.list_, value = list_ops.tensor_list_pop_back(self.list_, self.dtype) + return value + + def clear(self): + self.list_ = list_ops.empty_tensor_list(self.shape, self.dtype) + + def count(self): + return list_ops.tensor_list_length(self.list_) + + def __getitem__(self, key): + return list_ops.tensor_list_get_item(self.list_, key, self.dtype) + + def __setitem__(self, key, value): + self.list_ = list_ops.tensor_list_set_item(self.list_, key, value) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/tensors.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/tensors.py new file mode 100644 index 0000000000000000000000000000000000000000..0868977844783cf605ce7a7e2dff0988257d5f6b --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/tensors.py @@ -0,0 +1,49 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""This module defines tensor utilities not found in TensorFlow. + +The reason these utilities are not defined in TensorFlow is because they may +not be not fully robust, although they work in the vast majority of cases. So +we define them here in order for their behavior to be consistently verified. +""" + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import tensor_array_ops + + +def is_dense_tensor(t): + # TODO(mdan): Resolve this inconsistency. + return (tensor_util.is_tf_type(t) and + not isinstance(t, sparse_tensor.SparseTensor)) + + +def is_tensor_array(t): + return isinstance(t, tensor_array_ops.TensorArray) + + +def is_tensor_list(t): + # TODO(mdan): This is just a heuristic. + # With TF lacking support for templated types, this is unfortunately the + # closest we can get right now. A dedicated op ought to be possible to + # construct. + return (tensor_util.is_tf_type(t) and t.dtype == dtypes.variant and + not t.shape.ndims) + + +def is_range_tensor(t): + """Returns True if a tensor is the result of a tf.range op. Best effort.""" + return tensor_util.is_tf_type(t) and hasattr(t, 'op') and t.op.type == 'Range' diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/testing.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..2c624ee653c52fb34b0c06766ac015644a5651d4 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/testing.py @@ -0,0 +1,168 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Testing utilities.""" + +import re +import sys +import types +import unittest + +from tensorflow.python.eager import def_function +from tensorflow.python.framework import op_callbacks +from tensorflow.python.framework import ops +from tensorflow.python.ops import variables +from tensorflow.python.platform import test + + +class AutoGraphTestCase(test.TestCase): + """Tests specialized for AutoGraph, which run as tf.functions. + + These tests use a staged programming-like approach: most of the test code runs + as-is inside a tf.function, but the assertions are lifted outside the + function, and run with the corresponding function values instead. + + For example, the test: + + def test_foo(self): + baz = bar(); + self.assertEqual(baz, value) + + is equivalent to writing: + + def test_foo(self): + @tf.function + def test_fn(): + baz = bar(); + return baz, value + + baz_actual, value_actual = test_fn() + self.assertEqual(baz_actual, value_actual) + + Only assertions that require evaluation outside the function are lifted + outside the function scope. The rest execute inline, at function creation + time. + """ + + def __new__(cls, *args): + obj = super().__new__(cls) + + for name in cls.__dict__: + if not name.startswith(unittest.TestLoader.testMethodPrefix): + continue + m = getattr(obj, name) + if callable(m): + wrapper = obj._run_as_tf_function(m) + setattr(obj, name, types.MethodType(wrapper, obj)) + + return obj + + def _op_callback( + self, op_type, inputs, attrs, outputs, op_name=None, graph=None): + self.trace_log.append(op_type) + + def _run_as_tf_function(self, fn): + + def wrapper(self): + @def_function.function(autograph=False) # Testing autograph itself. + def fn_wrapper(): + self.assertions = [] + self.raises_cm = None + self.graph_assertions = [] + self.trace_log = [] + fn() + targets = [args for _, args in self.assertions] + return targets + + try: + tensors = fn_wrapper() + + for assertion in self.graph_assertions: + assertion(fn_wrapper.get_concrete_function().graph) + + actuals = self.evaluate(tensors) + + except: # pylint:disable=bare-except + if self.raises_cm is not None: + # Note: Yes, the Raises and function contexts cross. + self.raises_cm.__exit__(*sys.exc_info()) + return + else: + raise + + for (assertion, _), values in zip(self.assertions, actuals): + assertion(*values) + + return wrapper + + def variable(self, name, value, dtype): + with ops.init_scope(): + if name not in self.variables: + self.variables[name] = variables.Variable(value, dtype=dtype) + self.evaluate(self.variables[name].initializer) + return self.variables[name] + + def setUp(self): + super().setUp() + self.variables = {} + self.trace_log = [] + self.raises_cm = None + op_callbacks.add_op_callback(self._op_callback) + + def tearDown(self): + op_callbacks.remove_op_callback(self._op_callback) + self.trace_log = None + self.variables = None + super().tearDown() + + def assertGraphContains(self, op_regex, n): + def assertion(graph): + matches = [] + for node in graph.as_graph_def().node: + if re.match(op_regex, node.name): + matches.append(node) + for fn in graph.as_graph_def().library.function: + for node_def in fn.node_def: + if re.match(op_regex, node_def.name): + matches.append(node_def) + self.assertLen(matches, n) + + self.graph_assertions.append(assertion) + + def assertOpCreated(self, op_type): + self.assertIn(op_type, self.trace_log) + + def assertOpsNotCreated(self, op_types): + self.assertEmpty(set(op_types) & set(self.trace_log)) + + def assertNoOpsCreated(self): + self.assertEmpty(self.trace_log) + + def assertEqual(self, *args): + self.assertions.append((super().assertEqual, list(args))) + + def assertLess(self, *args): + self.assertions.append((super().assertLess, list(args))) + + def assertGreaterEqual(self, *args): + self.assertions.append((super().assertGreaterEqual, list(args))) + + def assertDictEqual(self, *args): + self.assertions.append((super().assertDictEqual, list(args))) + + def assertRaisesRuntime(self, *args): + if self.raises_cm is not None: + raise ValueError('cannot use more than one assertRaisesRuntime in a test') + self.raises_cm = self.assertRaisesRegex(*args) + self.raises_cm.__enter__() diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/type_registry.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/type_registry.py new file mode 100644 index 0000000000000000000000000000000000000000..dad222928c284c92fbef8a1a4b0cf5e09fc059af --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/autograph/utils/type_registry.py @@ -0,0 +1,62 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Registry mechanism implementing the registry pattern for general use.""" + + +class TypeRegistry(object): + """Provides a type registry for the python registry pattern. + + Contains mappings between types and type specific objects, to implement the + registry pattern. + + Some example uses of this would be to register different functions depending + on the type of object. + """ + + def __init__(self): + self._registry = {} + + def register(self, obj, value): + """Registers a Python object within the registry. + + Args: + obj: The object to add to the registry. + value: The stored value for the 'obj' type. + + Raises: + KeyError: If the same obj is used twice. + """ + if obj in self._registry: + raise KeyError(f"{type(obj)} has already been registered.") + self._registry[obj] = value + + def lookup(self, obj): + """Looks up 'obj'. + + Args: + obj: The object to lookup within the registry. + + Returns: + Value for 'obj' in the registry if found. + Raises: + LookupError: if 'obj' has not been registered. + """ + for registered in self._registry: + if isinstance( + obj, registered + ): + return self._registry[registered] + + raise LookupError(f"{type(obj)} has not been registered.")