diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__init__.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/example_parser_configuration.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/example_parser_configuration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fb3af0846cf5caec0240814edd3c2b4a599fd58 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/example_parser_configuration.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/function_utils.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/function_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90da8b8abe58e37ea5eca22e74122dbe6048dda1 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/function_utils.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/is_in_graph_mode.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/is_in_graph_mode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82d0431fe214df7425238cd9ed7cdb1740119b8f Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/is_in_graph_mode.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/keras_deps.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/keras_deps.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64e58e7756d2c7826e7d37c20d5d0d206edbde9a Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/keras_deps.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/keyword_args.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/keyword_args.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e14aae85f4db1231f29342e1e8cc0d652d4ca387 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/keyword_args.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/lazy_loader.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/lazy_loader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0196388fd617b36453df7de75ae7f3968de733ca Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/lazy_loader.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/lock_util.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/lock_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7198d0635ab543d21f74a5c28ff75fc6637195c9 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/lock_util.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/module_wrapper.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/module_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ce348e66f10b84d78edbdb09dd091cb85bf1433 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/module_wrapper.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/nest.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/nest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9bc33509335cdbc2950df91dfff82f72656e432 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/nest.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/nest_util.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/nest_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..948e3963489daee66ec07f72075316159965936f Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/nest_util.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/numpy_compat.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/numpy_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24924bb3a1c37645bbf1dd0c657f344c522d1c9d Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/numpy_compat.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/object_identity.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/object_identity.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b794e7174de0de85bdb490a26c73550e0a2b0427 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/object_identity.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/serialization.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/serialization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be50cd45ddd4519fb668fdb8afc2a77e0deb08f3 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/serialization.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_contextlib.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_contextlib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25eeb4d39e4bf9db8f699e7d94ab517ef568b001 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_contextlib.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_decorator.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_decorator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89d126a091dad8f21e748f9bcda6cdb2d519acc1 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_decorator.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_decorator_export.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_decorator_export.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d76b81e15383b43a61559b37bcc16c4cd4303cd Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_decorator_export.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_export.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_export.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc27a9961982dd25e34dea6f7a9b1440937a46e3 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_export.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_inspect.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_inspect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c75045faebf2f806b012d7b353ab9762e58f8db Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_inspect.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_should_use.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_should_use.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00963c43b8bff34bf39901d0b94c24c1c8a844f5 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_should_use.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_stack.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_stack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdddaa48ac1e7cee812e18d16b73d383cf5b0f6c Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/tf_stack.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/traceback_utils.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/traceback_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99767dadbc40494e39a8c2351958a29473c2b3ba Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/traceback_utils.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/type_annotations.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/type_annotations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e62f0c4f574fd456b2fff5fcc500ba335f977d8 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/type_annotations.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/variable_utils.cpython-310.pyc b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/variable_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed73b62fc16cf41867ce1805de54db9b835fa795 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/__pycache__/variable_utils.cpython-310.pyc differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_checkpoint_reader.pyi b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_checkpoint_reader.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1402d60148afeb9f28c35c15f12c974d31be0322 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_checkpoint_reader.pyi @@ -0,0 +1,25 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from typing import Any + +class CheckpointReader: + def __init__(self, arg0: str) -> None: ... + @classmethod + def CheckpointReader_GetTensor(cls, arg0: CheckpointReader, arg1: str) -> object: ... + def _GetVariableToDataTypeMap(self, *args, **kwargs) -> Any: ... + def _HasTensor(self, arg0: str) -> bool: ... + def debug_string(self) -> bytes: ... + def get_variable_to_shape_map(self, *args, **kwargs) -> Any: ... diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_checkpoint_reader.so b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_checkpoint_reader.so new file mode 100644 index 0000000000000000000000000000000000000000..370db54a8ff77c6695c4993be403d976570f96c4 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_checkpoint_reader.so differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_determinism.pyi b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_determinism.pyi new file mode 100644 index 0000000000000000000000000000000000000000..cd6c39347b6d2870cfa313148e8e33999a82d0cd --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_determinism.pyi @@ -0,0 +1,17 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +def enable(arg0: bool) -> None: ... +def is_enabled() -> bool: ... diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_determinism.so b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_determinism.so new file mode 100644 index 0000000000000000000000000000000000000000..d883a427cbe6ab078a1c1d0f964518bb1dab4d5e Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_determinism.so differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_kernel_registry.pyi b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_kernel_registry.pyi new file mode 100644 index 0000000000000000000000000000000000000000..bb54143b2887435e7c941b72962787748728749e --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_kernel_registry.pyi @@ -0,0 +1,16 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +def TryFindKernelClass(arg0: str) -> bytes: ... diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_kernel_registry.so b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_kernel_registry.so new file mode 100644 index 0000000000000000000000000000000000000000..611469f3be1dcb1f9dee52525a941cc51d6311f1 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_kernel_registry.so differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_nest.pyi b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_nest.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8ea01514027cfa4a50933e819051af34b85c0af2 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_nest.pyi @@ -0,0 +1,16 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +def FlattenDictItems(arg0: object) -> object: ... diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_nest.so b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_nest.so new file mode 100644 index 0000000000000000000000000000000000000000..a74a07b9839d6d03c206972c750b2a12b7dca7dd Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_nest.so differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_stat_summarizer.pyi b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_stat_summarizer.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ba10303b2f04406b888af65e4b76e59b6525cf05 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_stat_summarizer.pyi @@ -0,0 +1,26 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from typing import overload + +class StatSummarizer: + @overload + def __init__(self, arg0: str) -> None: ... + @overload + def __init__(self) -> None: ... + def GetOutputString(self) -> str: ... + def PrintStepStats(self) -> None: ... + def ProcessStepStats(self, arg0) -> None: ... + def ProcessStepStatsStr(self, arg0: str) -> None: ... diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_tensor_float_32_execution.pyi b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_tensor_float_32_execution.pyi new file mode 100644 index 0000000000000000000000000000000000000000..cd6c39347b6d2870cfa313148e8e33999a82d0cd --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_tensor_float_32_execution.pyi @@ -0,0 +1,17 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +def enable(arg0: bool) -> None: ... +def is_enabled() -> bool: ... diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_tfprof.so b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_tfprof.so new file mode 100644 index 0000000000000000000000000000000000000000..c0041f58ccef46860e17a2bc9ea8fc5625bc6d19 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_tfprof.so differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_transform_graph.pyi b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_transform_graph.pyi new file mode 100644 index 0000000000000000000000000000000000000000..0de83df042d914119b7ad1167cdc3e0e508c2958 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_transform_graph.pyi @@ -0,0 +1,16 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +def TransformGraphWithStringInputs(arg0: object, arg1: object, arg2: object, arg3: object) -> bytes: ... diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_transform_graph.so b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_transform_graph.so new file mode 100644 index 0000000000000000000000000000000000000000..dddbb75d722d94aca9a135d6459a8be528ba9b1a Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_transform_graph.so differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_utils.pyi b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_utils.pyi new file mode 100644 index 0000000000000000000000000000000000000000..078311f5d1c9ec11e8e826e750c7bc62b126b7ec --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_utils.pyi @@ -0,0 +1,35 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +def AssertSameStructure(arg0: object, arg1: object, arg2: bool, arg3: bool) -> bool: ... +def AssertSameStructureForData(arg0: object, arg1: object, arg2: bool) -> bool: ... +def Flatten(arg0: object, arg1: bool) -> object: ... +def FlattenForData(arg0: object) -> object: ... +def IsAttrs(arg0: object) -> bool: ... +def IsCompositeTensor(arg0: object) -> bool: ... +def IsDataTypeSupportedByOneDNNOnThisCPU(arg0) -> bool: ... +def IsMapping(arg0: object) -> bool: ... +def IsMappingView(arg0: object) -> bool: ... +def IsMutableMapping(arg0: object) -> bool: ... +def IsNamedtuple(arg0: object, arg1: bool) -> object: ... +def IsNested(arg0: object) -> bool: ... +def IsNestedForData(arg0: object) -> bool: ... +def IsNestedOrComposite(arg0: object) -> bool: ... +def IsResourceVariable(arg0: object) -> bool: ... +def IsTensor(arg0: object) -> bool: ... +def IsTypeSpec(arg0: object) -> bool: ... +def IsVariable(arg0: object) -> bool: ... +def RegisterPyObject(arg0: object, arg1: object) -> object: ... +def SameNamedtuples(arg0: object, arg1: object) -> object: ... diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_tf_stack.pyi b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_tf_stack.pyi new file mode 100644 index 0000000000000000000000000000000000000000..cc906680cbc7056c556442f42f9e918beb303b72 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_tf_stack.pyi @@ -0,0 +1,64 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from typing import Iterator + +from typing import overload + +class GraphDebugInfoBuilder: + def __init__(self) -> None: ... + def AccumulateStackTrace(self, function: str, op: str, trace) -> None: ... + def AppendGraphDebugInfo(self, prefix: str, debug_info: bytes) -> None: ... + def Build(self) -> bytes: ... + +class PyBindFileSet: + def __init__(self) -> None: ... + def update_to(self, arg0: set) -> None: ... + +class PyBindSourceMap: + def __init__(self) -> None: ... + def update_to(self, arg0: tuple) -> None: ... + +class StackFrame: + def __init__(self, *args, **kwargs) -> None: ... + def __eq__(self, arg0: StackFrame) -> bool: ... + def __getitem__(self, arg0: object) -> object: ... + def __hash__(self) -> int: ... + def __iter__(self) -> Iterator: ... + def __len__(self) -> int: ... + def __ne__(self, arg0: StackFrame) -> bool: ... + @property + def filename(self) -> str: ... + @property + def line(self) -> str: ... + @property + def lineno(self) -> int: ... + @property + def name(self) -> str: ... + +class StackTrace: + def __init__(self, *args, **kwargs) -> None: ... + def get_user_frames(self) -> StackTrace: ... + def last_user_frame(self) -> StackFrame: ... + def __eq__(self, arg0: StackTrace) -> bool: ... + @overload + def __getitem__(self, arg0: int) -> StackFrame: ... + @overload + def __getitem__(self, arg0: slice) -> StackTrace: ... + def __hash__(self) -> int: ... + def __len__(self) -> int: ... + +def LoadTracesFromDebugInfo(debug_info_proto: bytes) -> dict[str,StackTrace]: ... +def extract_stack(source_map: PyBindSourceMap, file_set: PyBindFileSet, stacklevel: int = ...) -> StackTrace: ... diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_tf_stack.so b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_tf_stack.so new file mode 100644 index 0000000000000000000000000000000000000000..a4d49c7a83185d77e8096d46cc18ae17029aa632 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/_tf_stack.so differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/all_util.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/all_util.py new file mode 100644 index 0000000000000000000000000000000000000000..b2d2ab5a6735e257b5c5f6697896e4ed33db0da0 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/all_util.py @@ -0,0 +1,117 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Generate __all__ from a module docstring.""" +import re as _re +import sys as _sys + +from tensorflow.python.util import tf_inspect as _tf_inspect + + +_reference_pattern = _re.compile(r'^@@(\w+)$', flags=_re.MULTILINE) + + +def make_all(module_name, doc_string_modules=None): + """Generates `__all__` from the docstring of one or more modules. + + Usage: `make_all(__name__)` or + `make_all(__name__, [sys.modules(__name__), other_module])`. The doc string + modules must each a docstring, and `__all__` will contain all symbols with + `@@` references, where that symbol currently exists in the module named + `module_name`. + + Args: + module_name: The name of the module (usually `__name__`). + doc_string_modules: a list of modules from which to take docstring. + If None, then a list containing only the module named `module_name` is used. + + Returns: + A list suitable for use as `__all__`. + """ + if doc_string_modules is None: + doc_string_modules = [_sys.modules[module_name]] + cur_members = set( + name for name, _ in _tf_inspect.getmembers(_sys.modules[module_name])) + + results = set() + for doc_module in doc_string_modules: + results.update([m.group(1) + for m in _reference_pattern.finditer(doc_module.__doc__) + if m.group(1) in cur_members]) + return list(results) + +# Hidden attributes are attributes that have been hidden by +# `remove_undocumented`. They can be re-instated by `reveal_undocumented`. +# This maps symbol names to a tuple, containing: +# (module object, attribute value) +_HIDDEN_ATTRIBUTES = {} + + +def reveal_undocumented(symbol_name, target_module=None): + """Reveals a symbol that was previously removed by `remove_undocumented`. + + This should be used by tensorflow internal tests only. It explicitly + defeats the encapsulation afforded by `remove_undocumented`. + + It throws an exception when the symbol was not hidden in the first place. + + Args: + symbol_name: a string representing the full absolute path of the symbol. + target_module: if specified, the module in which to restore the symbol. + """ + if symbol_name not in _HIDDEN_ATTRIBUTES: + raise LookupError('Symbol %s is not a hidden symbol' % symbol_name) + symbol_basename = symbol_name.split('.')[-1] + (original_module, attr_value) = _HIDDEN_ATTRIBUTES[symbol_name] + if not target_module: target_module = original_module + setattr(target_module, symbol_basename, attr_value) + + +def remove_undocumented(module_name, allowed_exception_list=None, + doc_string_modules=None): + """Removes symbols in a module that are not referenced by a docstring. + + Args: + module_name: the name of the module (usually `__name__`). + allowed_exception_list: a list of names that should not be removed. + doc_string_modules: a list of modules from which to take the docstrings. + If None, then a list containing only the module named `module_name` is used. + + Furthermore, if a symbol previously added with `add_to_global_allowlist`, + then it will always be allowed. This is useful for internal tests. + + Returns: + None + """ + current_symbols = set(dir(_sys.modules[module_name])) + should_have = make_all(module_name, doc_string_modules) + should_have += allowed_exception_list or [] + extra_symbols = current_symbols - set(should_have) + target_module = _sys.modules[module_name] + for extra_symbol in extra_symbols: + # Skip over __file__, etc. Also preserves internal symbols. + if extra_symbol.startswith('_'): continue + fully_qualified_name = module_name + '.' + extra_symbol + _HIDDEN_ATTRIBUTES[fully_qualified_name] = (target_module, + getattr(target_module, + extra_symbol)) + delattr(target_module, extra_symbol) + + +__all__ = [ + 'make_all', + 'remove_undocumented', + 'reveal_undocumented', +] diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/compat.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..9d22d659561a69034d13a449485c6412fef2c5a8 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/compat.py @@ -0,0 +1,226 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Compatibility functions. + +The `tf.compat` module contains two sets of compatibility functions. + +## Tensorflow 1.x and 2.x APIs + +The `compat.v1` and `compat.v2` submodules provide a complete copy of both the +`v1` and `v2` APIs for backwards and forwards compatibility across TensorFlow +versions 1.x and 2.x. See the +[migration guide](https://www.tensorflow.org/guide/migrate) for details. + +## Utilities for writing compatible code + +Aside from the `compat.v1` and `compat.v2` submodules, `tf.compat` also contains +a set of helper functions for writing code that works in both: + +* TensorFlow 1.x and 2.x +* Python 2 and 3 + + +## Type collections + +The compatibility module also provides the following aliases for common +sets of python types: + +* `bytes_or_text_types` +* `complex_types` +* `integral_types` +* `real_types` + +API docstring: tensorflow.compat +""" + +import codecs +import collections.abc as collections_abc # pylint: disable=unused-import +import numbers as _numbers + +import numpy as _np + +from tensorflow.python.util.tf_export import tf_export + + +def as_bytes(bytes_or_text, encoding='utf-8'): + """Converts `bytearray`, `bytes`, or unicode python input types to `bytes`. + + Uses utf-8 encoding for text by default. + + Args: + bytes_or_text: A `bytearray`, `bytes`, `str`, or `unicode` object. + encoding: A string indicating the charset for encoding unicode. + + Returns: + A `bytes` object. + + Raises: + TypeError: If `bytes_or_text` is not a binary or unicode string. + """ + # Validate encoding, a LookupError will be raised if invalid. + encoding = codecs.lookup(encoding).name + if isinstance(bytes_or_text, bytearray): + return bytes(bytes_or_text) + elif isinstance(bytes_or_text, str): + return bytes_or_text.encode(encoding) + elif isinstance(bytes_or_text, bytes): + return bytes_or_text + else: + raise TypeError('Expected binary or unicode string, got %r' % + (bytes_or_text,)) + + +def as_text(bytes_or_text, encoding='utf-8'): + """Converts any string-like python input types to unicode. + + Returns the input as a unicode string. Uses utf-8 encoding for text + by default. + + Args: + bytes_or_text: A `bytes`, `str`, or `unicode` object. + encoding: A string indicating the charset for decoding unicode. + + Returns: + A `unicode` (Python 2) or `str` (Python 3) object. + + Raises: + TypeError: If `bytes_or_text` is not a binary or unicode string. + """ + # Validate encoding, a LookupError will be raised if invalid. + encoding = codecs.lookup(encoding).name + if isinstance(bytes_or_text, str): + return bytes_or_text + elif isinstance(bytes_or_text, bytes): + return bytes_or_text.decode(encoding) + else: + raise TypeError('Expected binary or unicode string, got %r' % bytes_or_text) + + +def as_str(bytes_or_text, encoding='utf-8'): + """Acts as an alias for the `as_text` function.. + + Args: + bytes_or_text: The input value to be converted. A bytes or unicode object. + encoding: Optional string. The encoding to use if bytes_or_text is a bytes + object. Defaults to 'utf-8'. + + Returns: + A unicode string. + + Raises: + TypeError: If bytes_or_text is not a bytes or unicode object. + UnicodeDecodeError: If bytes_or_text is a bytes object and cannot be + decoded using the specified encoding. + """ + return as_text(bytes_or_text, encoding) + +tf_export('compat.as_text')(as_text) +tf_export('compat.as_bytes')(as_bytes) +tf_export('compat.as_str')(as_str) + + +@tf_export('compat.as_str_any') +def as_str_any(value, encoding='utf-8'): + """Converts input to `str` type. + + Uses `str(value)`, except for `bytes` typed inputs, which are converted + using `as_str`. + + Args: + value: A object that can be converted to `str`. + encoding: Encoding for `bytes` typed inputs. + + Returns: + A `str` object. + """ + if isinstance(value, bytes): + return as_str(value, encoding=encoding) + else: + return str(value) + + +@tf_export('compat.path_to_str') +def path_to_str(path): + r"""Converts input which is a `PathLike` object to `str` type. + + Converts from any python constant representation of a `PathLike` object to + a string. If the input is not a `PathLike` object, simply returns the input. + + Args: + path: An object that can be converted to path representation. + + Returns: + A `str` object. + + Usage: + In case a simplified `str` version of the path is needed from an + `os.PathLike` object. + + Examples: + ```python + $ tf.compat.path_to_str('C:\XYZ\tensorflow\./.././tensorflow') + 'C:\XYZ\tensorflow\./.././tensorflow' # Windows OS + $ tf.compat.path_to_str(Path('C:\XYZ\tensorflow\./.././tensorflow')) + 'C:\XYZ\tensorflow\..\tensorflow' # Windows OS + $ tf.compat.path_to_str(Path('./corpus')) + 'corpus' # Linux OS + $ tf.compat.path_to_str('./.././Corpus') + './.././Corpus' # Linux OS + $ tf.compat.path_to_str(Path('./.././Corpus')) + '../Corpus' # Linux OS + $ tf.compat.path_to_str(Path('./..////../')) + '../..' # Linux OS + + ``` + """ + if hasattr(path, '__fspath__'): + path = as_str_any(path.__fspath__()) + return path + + +def path_to_bytes(path): + r"""Converts input which is a `PathLike` object to `bytes`. + + Converts from any python constant representation of a `PathLike` object + or `str` to bytes. + + Args: + path: An object that can be converted to path representation. + + Returns: + A `bytes` object. + + Usage: + In case a simplified `bytes` version of the path is needed from an + `os.PathLike` object. + """ + if hasattr(path, '__fspath__'): + path = path.__fspath__() + return as_bytes(path) + + +# Numpy 1.8 scalars don't inherit from numbers.Integral in Python 3, so we +# need to check them specifically. The same goes from Real and Complex. +integral_types = (_numbers.Integral, _np.integer) +tf_export('compat.integral_types').export_constant(__name__, 'integral_types') +real_types = (_numbers.Real, _np.integer, _np.floating) +tf_export('compat.real_types').export_constant(__name__, 'real_types') +complex_types = (_numbers.Complex, _np.number) +tf_export('compat.complex_types').export_constant(__name__, 'complex_types') + +# Either bytes or text. +bytes_or_text_types = (bytes, str) +tf_export('compat.bytes_or_text_types').export_constant(__name__, + 'bytes_or_text_types') diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/custom_nest_protocol.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/custom_nest_protocol.py new file mode 100644 index 0000000000000000000000000000000000000000..1da4e463604b5fddd474e0221af3643dc4bc96aa --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/custom_nest_protocol.py @@ -0,0 +1,120 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Protocol class for custom tf.nest support.""" + +import typing +from typing import Protocol + + +@typing.runtime_checkable +class CustomNestProtocol(Protocol): + """Protocol for adding custom tf.nest support in user-defined classes. + + User classes should implement the two methods defined in this protocol in + order to be supported by nest functions. + - `__tf_flatten__` for generating the flattened components and the metadata + of the current object. + - `__tf_unflatten__` for creating a new object based on the input metadata + and the components. + See the method doc for details. + + In terms of support level, classes implementing this protocol + - are supported by tf.nest and tf.data functions. + - have limited support from tf.function, which requires writing a custom + TraceType subclass to be used as the input or output of a tf.function. + - are NOT supported by SavedModel. + + Code Examples: + + >>> import dataclasses + >>> @dataclasses.dataclass + ... class MaskedTensor: + ... mask: bool + ... value: tf.Tensor + ... + ... def __tf_flatten__(self): + ... metadata = (self.mask,) # static config. + ... components = (self.value,) # dynamic values. + ... return metadata, components + ... + ... @classmethod + ... def __tf_unflatten__(cls, metadata, components): + ... mask = metadata[0] + ... value = components[0] + ... return MaskedTensor(mask=mask, value=value) + ... + >>> mt = MaskedTensor(mask=True, value=tf.constant([1])) + >>> mt + MaskedTensor(mask=True, value=) + >>> tf.nest.is_nested(mt) + True + >>> mt2 = MaskedTensor(mask=False, value=tf.constant([2])) + >>> tf.nest.assert_same_structure(mt, mt2) + + >>> leaves = tf.nest.flatten(mt) + >>> leaves + [] + + >>> mt3 = tf.nest.pack_sequence_as(mt, leaves) + >>> mt3 + MaskedTensor(mask=True, value=) + >>> bool(mt == mt3) + True + + >>> tf.nest.map_structure(lambda x: x * 2, mt) + MaskedTensor(mask=True, value=) + + More examples are available in the unit tests (nest_test.py). + """ + + def __tf_flatten__(self): + """Flatten current object into (metadata, components). + + Returns: + A `tuple` of (metadata, components), where + - metadata is a custom Python object that stands for the static config + of the current object, which is supposed to be fixed and not affected + by data transformation. + - components is a `tuple` that contains the modifiable fields of the + current object. + + Implementation Note: + - This method should not invoke any TensorFlow ops. + - This method only needs to flatten the current level. If current object has + an attribute that also need custom flattening, nest functions (such as + `nest.flatten`) will utilize this method to do recursive flattening. + - Components must ba a `tuple`, not a `list` + """ + + @classmethod + def __tf_unflatten__(cls, metadata, components): + """Create a user-defined object from (metadata, components). + + Args: + metadata: a custom Python objet that stands for the static config for + reconstructing a new object of the current class. + components: a `tuple` that contains the dynamic data fields of the current + class, for object reconstruction. + + Returns: + The user-defined object, with the same class of the current object. + + Implementation Note: + - This method should not invoke any TensorFlow ops. + - This method only needs to unflatten the current level. If the object has + an attribute that also need custom unflattening, nest functions will + utilize this method to do recursive unflattening. + """ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/decorator_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/decorator_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7ba87878cb1482b37b5bb950e626719f29c39953 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/decorator_utils.py @@ -0,0 +1,203 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utility functions for writing decorators (which modify docstrings).""" +import sys + + +def get_qualified_name(function): + # Python 3 + if hasattr(function, '__qualname__'): + return function.__qualname__ + + # Python 2 + if hasattr(function, 'im_class'): + return function.im_class.__name__ + '.' + function.__name__ + return function.__name__ + + +def _normalize_docstring(docstring): + """Normalizes the docstring. + + Replaces tabs with spaces, removes leading and trailing blanks lines, and + removes any indentation. + + Copied from PEP-257: + https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation + + Args: + docstring: the docstring to normalize + + Returns: + The normalized docstring + """ + if not docstring: + return '' + # Convert tabs to spaces (following the normal Python rules) + # and split into a list of lines: + lines = docstring.expandtabs().splitlines() + # Determine minimum indentation (first line doesn't count): + # (we use sys.maxsize because sys.maxint doesn't exist in Python 3) + indent = sys.maxsize + for line in lines[1:]: + stripped = line.lstrip() + if stripped: + indent = min(indent, len(line) - len(stripped)) + # Remove indentation (first line is special): + trimmed = [lines[0].strip()] + if indent < sys.maxsize: + for line in lines[1:]: + trimmed.append(line[indent:].rstrip()) + # Strip off trailing and leading blank lines: + while trimmed and not trimmed[-1]: + trimmed.pop() + while trimmed and not trimmed[0]: + trimmed.pop(0) + # Return a single string: + return '\n'.join(trimmed) + + +def add_notice_to_docstring(doc, + instructions, + no_doc_str, + suffix_str, + notice, + notice_type='Warning'): + """Adds a deprecation notice to a docstring. + + Args: + doc: The original docstring. + instructions: A string, describing how to fix the problem. + no_doc_str: The default value to use for `doc` if `doc` is empty. + suffix_str: Is added to the end of the first line. + notice: A list of strings. The main notice warning body. + notice_type: The type of notice to use. Should be one of `[Caution, + Deprecated, Important, Note, Warning]` + + Returns: + A new docstring, with the notice attached. + + Raises: + ValueError: If `notice` is empty. + """ + allowed_notice_types = ['Deprecated', 'Warning', 'Caution', 'Important', + 'Note'] + if notice_type not in allowed_notice_types: + raise ValueError( + f'Unrecognized notice type. Should be one of: {allowed_notice_types}') + + if not doc: + lines = [no_doc_str] + else: + lines = _normalize_docstring(doc).splitlines() + lines[0] += ' ' + suffix_str + + if not notice: + raise ValueError('The `notice` arg must not be empty.') + + notice[0] = f'{notice_type}: {notice[0]}' + notice = [''] + notice + ([instructions] if instructions else []) + + if len(lines) > 1: + # Make sure that we keep our distance from the main body + if lines[1].strip(): + notice.append('') + + lines[1:1] = notice + else: + lines += notice + + return '\n'.join(lines) + + +def validate_callable(func, decorator_name): + if not hasattr(func, '__call__'): + raise ValueError( + '%s is not a function. If this is a property, make sure' + ' @property appears before @%s in your source code:' + '\n\n@property\n@%s\ndef method(...)' % ( + func, decorator_name, decorator_name)) + + +class classproperty(object): # pylint: disable=invalid-name + """Class property decorator. + + Example usage: + + class MyClass(object): + + @classproperty + def value(cls): + return '123' + + > print MyClass.value + 123 + """ + + def __init__(self, func): + self._func = func + + def __get__(self, owner_self, owner_cls): + return self._func(owner_cls) + + +class _CachedClassProperty(object): + """Cached class property decorator. + + Transforms a class method into a property whose value is computed once + and then cached as a normal attribute for the life of the class. Example + usage: + + >>> class MyClass(object): + ... @cached_classproperty + ... def value(cls): + ... print("Computing value") + ... return '' % cls.__name__ + >>> class MySubclass(MyClass): + ... pass + >>> MyClass.value + Computing value + '' + >>> MyClass.value # uses cached value + '' + >>> MySubclass.value + Computing value + '' + + This decorator is similar to `functools.cached_property`, but it adds a + property to the class, not to individual instances. + """ + + def __init__(self, func): + self._func = func + self._cache = {} + + def __get__(self, obj, objtype): + if objtype not in self._cache: + self._cache[objtype] = self._func(objtype) + return self._cache[objtype] + + def __set__(self, obj, value): + raise AttributeError('property %s is read-only' % self._func.__name__) + + def __delete__(self, obj): + raise AttributeError('property %s is read-only' % self._func.__name__) + + +def cached_classproperty(func): + return _CachedClassProperty(func) + + +cached_classproperty.__doc__ = _CachedClassProperty.__doc__ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/deprecated_module.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/deprecated_module.py new file mode 100644 index 0000000000000000000000000000000000000000..ad26c5c97e57a4b0765b4e44b2e245a9d77b8944 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/deprecated_module.py @@ -0,0 +1,24 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A deprecated module. + +For testing `deprecation.deprecate_moved_module`. +""" + +from tensorflow.python.util import deprecated_module_new +from tensorflow.python.util import deprecation + +__getattr__ = deprecation.deprecate_moved_module( + __name__, deprecated_module_new, "2.9") diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/deprecated_module_new.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/deprecated_module_new.py new file mode 100644 index 0000000000000000000000000000000000000000..762c4b022cf69f6f60f43a3b83d8a1725ab18694 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/deprecated_module_new.py @@ -0,0 +1,22 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A module to replace deprecated_module. + +For testing `deprecation.deprecate_moved_module`. +""" + + +def a(): + return 1 diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/deprecation.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/deprecation.py new file mode 100644 index 0000000000000000000000000000000000000000..36a62bddedc3f5f94a5de92bef929a194ae7e430 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/deprecation.py @@ -0,0 +1,763 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tensor utility functions.""" +import collections +import functools +import inspect +import re + +from tensorflow.python.framework import strict_mode +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.util import decorator_utils +from tensorflow.python.util import is_in_graph_mode +from tensorflow.python.util import tf_contextlib +from tensorflow.python.util import tf_decorator +from tensorflow.python.util import tf_inspect +from tensorflow.tools.docs import doc_controls + +# Allow deprecation warnings to be silenced temporarily with a context manager. +_PRINT_DEPRECATION_WARNINGS = True + +# Remember which deprecation warnings have been printed already. +_PRINTED_WARNING = {} + + +class DeprecatedNamesAlreadySetError(Exception): + """Raised when setting deprecated names multiple times for the same symbol.""" + + +def _log_deprecation(msg, *args, **kwargs): + """Raises errors for deprecated methods if in strict mode, warns otherwise.""" + if strict_mode.STRICT_MODE: + logging.error(msg, *args, **kwargs) + raise RuntimeError( + 'This behavior has been deprecated, which raises an error in strict' + ' mode.' + ) + else: + logging.warning(msg, *args, **kwargs) + + +def _add_deprecated_function_notice_to_docstring(doc, date, instructions): + """Adds a deprecation notice to a docstring for deprecated functions.""" + main_text = [ + 'THIS FUNCTION IS DEPRECATED. It will be removed %s.' + % ('in a future version' if date is None else ('after %s' % date)) + ] + if instructions: + main_text.append('Instructions for updating:') + return decorator_utils.add_notice_to_docstring( + doc, + instructions, + 'DEPRECATED FUNCTION', + '(deprecated)', + main_text, + notice_type='Deprecated') + + +def _add_deprecated_arg_notice_to_docstring(doc, date, instructions, + deprecated_names): + """Adds a deprecation notice to a docstring for deprecated arguments.""" + + deprecation_string = ', '.join(sorted(deprecated_names)) + + return decorator_utils.add_notice_to_docstring( + doc, + instructions, + 'DEPRECATED FUNCTION ARGUMENTS', + '(deprecated arguments)', [ + 'SOME ARGUMENTS ARE DEPRECATED: `(%s)`. ' + 'They will be removed %s.' % + (deprecation_string, 'in a future version' if date is None else + ('after %s' % date)), 'Instructions for updating:' + ], + notice_type='Deprecated') + + +def _add_deprecated_arg_value_notice_to_docstring(doc, date, instructions, + deprecated_name_value_dict): + """Adds a deprecation notice to a docstring for deprecated arguments.""" + + deprecation_string = ', '.join( + '%s=%r' % (key, value) + for key, value in sorted(deprecated_name_value_dict.items())) + + when = 'in a future version' if date is None else ('after %s' % date) + + return decorator_utils.add_notice_to_docstring( + doc, + instructions, + 'DEPRECATED FUNCTION ARGUMENT VALUES', + '(deprecated argument values)', [ + 'SOME ARGUMENT VALUES ARE DEPRECATED: `(%s)`. ' + 'They will be removed %s.' % + (deprecation_string, when), 'Instructions for updating:' + ], + notice_type='Deprecated') + + +def _validate_deprecation_args(date, instructions): + if date is not None and not re.match(r'20\d\d-[01]\d-[0123]\d', date): + raise ValueError(f'Date must be in format YYYY-MM-DD. Received: {date}') + if not instructions: + raise ValueError( + 'Don\'t deprecate things without conversion instructions! Specify ' + 'the `instructions` argument.') + + +def _call_location(outer=False): + """Returns call location given level up from current call.""" + # Two up: <_call_location>, <_call_location's caller> + # tf_inspect is not required here. Please ignore the lint warning by adding + # DISABLE_IMPORT_INSPECT_CHECK=TRUE to your cl description. Using it caused + # test timeouts (b/189384061). + f = inspect.currentframe().f_back.f_back + parent = f and f.f_back + if outer and parent is not None: + f = parent + return '{}:{}'.format(f.f_code.co_filename, f.f_lineno) + + +def _safe_eq(a, b): + if a is None or b is None: + return a is None and b is None + return a == b + + +def _wrap_decorator(wrapped_function, decorator_name): + """Indicate that one function wraps another. + + This decorator wraps a function using `tf_decorator.make_decorator` + so that doc generation scripts can pick up original function + signature. + It would be better to use @functools.wrap decorator, but it would + not update function signature to match wrapped function in Python 2. + + Args: + wrapped_function: The function that decorated function wraps. + decorator_name: The name of the decorator. + + Returns: + Function that accepts wrapper function as an argument and returns + `TFDecorator` instance. + """ + + def wrapper(wrapper_func): + return tf_decorator.make_decorator(wrapped_function, wrapper_func, + decorator_name) + + return wrapper + + +def deprecated_alias(deprecated_name, name, func_or_class, warn_once=True): + """Deprecate a symbol in favor of a new name with identical semantics. + + This function is meant to be used when defining a backwards-compatibility + alias for a symbol which has been moved. For example: + + module1.py: + ```python + class NewNameForClass: pass + ``` + + module2.py: + ```python + import module1 + + DeprecatedNameForClass = deprecated_alias( + deprecated_name='module2.DeprecatedNameForClass', + name='module1.NewNameForClass', + func_or_class=module1.NewNameForClass) + ``` + + This function works for classes and functions. + + For classes, it creates a new class which is functionally identical (it + inherits from the original, and overrides its constructor), but which prints + a deprecation warning when an instance is created. It also adds a deprecation + notice to the class' docstring. + + For functions, it returns a function wrapped by `tf_decorator.make_decorator`. + That function prints a warning when used, and has a deprecation notice in its + docstring. This is more or less equivalent (the deprecation warning has + slightly different text) to writing: + + ```python + @deprecated + def deprecated_alias(original_args): + real_function(original_args) + ``` + + Args: + deprecated_name: The name of the symbol that is being deprecated, to be used + in the warning message. This should be its fully qualified name to avoid + confusion. + name: The name of the symbol that is to be used instead of the deprecated + name. This should be a fully qualified name to avoid confusion. + func_or_class: The (non-deprecated) class or function for which a deprecated + alias should be created. + warn_once: If True (the default), only print a deprecation warning the first + time this function is used, or the class is instantiated. + + Returns: + A wrapped version of `func_or_class` which prints a deprecation warning on + use and has a modified docstring. + """ + if tf_inspect.isclass(func_or_class): + + # Make a new class with __init__ wrapped in a warning. + class _NewClass(func_or_class): # pylint: disable=missing-docstring + __doc__ = decorator_utils.add_notice_to_docstring( + func_or_class.__doc__, + 'Please use %s instead.' % name, + 'DEPRECATED CLASS', + '(deprecated)', [('THIS CLASS IS DEPRECATED. ' + 'It will be removed in a future version. ')], + notice_type='Deprecated') + __name__ = func_or_class.__name__ + __module__ = _call_location(outer=True) + + @_wrap_decorator(func_or_class.__init__, 'deprecated_alias') + def __init__(self, *args, **kwargs): + if hasattr(_NewClass.__init__, '__func__'): + # Python 2 + _NewClass.__init__.__func__.__doc__ = func_or_class.__init__.__doc__ + else: + # Python 3 + _NewClass.__init__.__doc__ = func_or_class.__init__.__doc__ + + if _PRINT_DEPRECATION_WARNINGS: + # We're making the alias as we speak. The original may have other + # aliases, so we cannot use it to check for whether it's already been + # warned about. + if _NewClass.__init__ not in _PRINTED_WARNING: + if warn_once: + _PRINTED_WARNING[_NewClass.__init__] = True + _log_deprecation( + 'From %s: The name %s is deprecated. Please use %s instead.\n', + _call_location(), deprecated_name, name) + super(_NewClass, self).__init__(*args, **kwargs) + + return _NewClass + else: + decorator_utils.validate_callable(func_or_class, 'deprecated') + + # Make a wrapper for the original + @functools.wraps(func_or_class) + def new_func(*args, **kwargs): # pylint: disable=missing-docstring + if _PRINT_DEPRECATION_WARNINGS: + # We're making the alias as we speak. The original may have other + # aliases, so we cannot use it to check for whether it's already been + # warned about. + if new_func not in _PRINTED_WARNING: + if warn_once: + _PRINTED_WARNING[new_func] = True + _log_deprecation( + 'From %s: The name %s is deprecated. Please use %s instead.\n', + _call_location(), deprecated_name, name) + return func_or_class(*args, **kwargs) + + return tf_decorator.make_decorator( + func_or_class, new_func, 'deprecated', + _add_deprecated_function_notice_to_docstring( + func_or_class.__doc__, None, 'Please use %s instead.' % name)) + + +def deprecated_endpoints(*args): + """Decorator for marking endpoints deprecated. + + This decorator does not print deprecation messages. + TODO(annarev): eventually start printing deprecation warnings when + @deprecation_endpoints decorator is added. + + Args: + *args: Deprecated endpoint names. + + Returns: + A function that takes symbol as an argument and adds + _tf_deprecated_api_names to that symbol. + _tf_deprecated_api_names would be set to a list of deprecated + endpoint names for the symbol. + """ + + def deprecated_wrapper(func): + # pylint: disable=protected-access + if '_tf_deprecated_api_names' in func.__dict__: + raise DeprecatedNamesAlreadySetError( + f'Cannot set deprecated names for {func.__name__} to {args}. ' + 'Deprecated names are already set to ' + f'{func._tf_deprecated_api_names}.') + func._tf_deprecated_api_names = args + # pylint: disable=protected-access + return func + + return deprecated_wrapper + + +def deprecated(date, instructions, warn_once=True): + """Decorator for marking functions or methods deprecated. + + This decorator logs a deprecation warning whenever the decorated function is + called. It has the following format: + + (from ) is deprecated and will be removed after . + Instructions for updating: + + + If `date` is None, 'after ' is replaced with 'in a future version'. + will include the class name if it is a method. + + It also edits the docstring of the function: ' (deprecated)' is appended + to the first line of the docstring and a deprecation notice is prepended + to the rest of the docstring. + + Args: + date: String or None. The date the function is scheduled to be removed. Must + be ISO 8601 (YYYY-MM-DD), or None. + instructions: String. Instructions on how to update code using the + deprecated function. + warn_once: Boolean. Set to `True` to warn only the first time the decorated + function is called. Otherwise, every call will log a warning. + + Returns: + Decorated function or method. + + Raises: + ValueError: If date is not None or in ISO 8601 format, or instructions are + empty. + """ + _validate_deprecation_args(date, instructions) + + def deprecated_wrapper(func_or_class): + """Deprecation wrapper.""" + if isinstance(func_or_class, type): + # If a class is deprecated, you actually want to wrap the constructor. + cls = func_or_class + if cls.__new__ is object.__new__: + # If a class defaults to its parent's constructor, wrap that instead. + func = cls.__init__ + constructor_name = '__init__' + decorators, _ = tf_decorator.unwrap(func) + for decorator in decorators: + if decorator.decorator_name == 'deprecated': + # If the parent is already deprecated, there's nothing to do. + return cls + else: + func = cls.__new__ + constructor_name = '__new__' + + else: + cls = None + constructor_name = None + func = func_or_class + + decorator_utils.validate_callable(func, 'deprecated') + + @_wrap_decorator(func, 'deprecated') + def new_func(*args, **kwargs): # pylint: disable=missing-docstring + if _PRINT_DEPRECATION_WARNINGS: + if func not in _PRINTED_WARNING and cls not in _PRINTED_WARNING: + if warn_once: + _PRINTED_WARNING[func] = True + if cls: + _PRINTED_WARNING[cls] = True + _log_deprecation( + 'From %s: %s (from %s) is deprecated and will be removed %s.\n' + 'Instructions for updating:\n%s', _call_location(), + decorator_utils.get_qualified_name(func), + func_or_class.__module__, + 'in a future version' if date is None else ('after %s' % date), + instructions) + return func(*args, **kwargs) + + doc_controls.set_deprecated(new_func) + new_func = tf_decorator.make_decorator( + func, new_func, 'deprecated', + _add_deprecated_function_notice_to_docstring(func.__doc__, date, + instructions)) + new_func.__signature__ = inspect.signature(func) + + if cls is None: + return new_func + else: + # Insert the wrapped function as the constructor + setattr(cls, constructor_name, new_func) + + # And update the docstring of the class. + cls.__doc__ = _add_deprecated_function_notice_to_docstring( + cls.__doc__, date, instructions) + + return cls + + return deprecated_wrapper + + +DeprecatedArgSpec = collections.namedtuple( + 'DeprecatedArgSpec', ['position', 'has_ok_value', 'ok_value']) + + +def deprecated_args(date, instructions, *deprecated_arg_names_or_tuples, + **kwargs): + """Decorator for marking specific function arguments as deprecated. + + This decorator logs a deprecation warning whenever the decorated function is + called with the deprecated argument. It has the following format: + + Calling (from ) with is deprecated and will be + removed after . Instructions for updating: + + + If `date` is None, 'after ' is replaced with 'in a future version'. + includes the class name if it is a method. + + It also edits the docstring of the function: ' (deprecated arguments)' is + appended to the first line of the docstring and a deprecation notice is + prepended to the rest of the docstring. + + Args: + date: String or None. The date the function is scheduled to be removed. Must + be ISO 8601 (YYYY-MM-DD), or None. + instructions: String. Instructions on how to update code using the + deprecated function. + *deprecated_arg_names_or_tuples: String or 2-Tuple (String, ok_val). The + string is the deprecated argument name. Optionally, an ok-value may be + provided. If the user provided argument equals this value, the warning is + suppressed. + **kwargs: If `warn_once=False` is passed, every call with a deprecated + argument will log a warning. The default behavior is to only warn the + first time the function is called with any given deprecated argument. All + other kwargs raise `ValueError`. + + Returns: + Decorated function or method. + + Raises: + ValueError: If date is not None or in ISO 8601 format, instructions are + empty, the deprecated arguments are not present in the function + signature, the second element of a deprecated_tuple is not a + list, or if a kwarg other than `warn_once` is passed. + """ + _validate_deprecation_args(date, instructions) + if not deprecated_arg_names_or_tuples: + raise ValueError('Specify which argument is deprecated.') + if kwargs and list(kwargs.keys()) != ['warn_once']: + kwargs.pop('warn_once', None) + raise ValueError(f'Illegal argument passed to deprecated_args: {kwargs}') + warn_once = kwargs.get('warn_once', True) + + def _get_arg_names_to_ok_vals(): + """Returns a dict mapping arg_name to DeprecatedArgSpec w/o position.""" + d = {} + for name_or_tuple in deprecated_arg_names_or_tuples: + if isinstance(name_or_tuple, tuple): + d[name_or_tuple[0]] = DeprecatedArgSpec(-1, True, name_or_tuple[1]) + else: + d[name_or_tuple] = DeprecatedArgSpec(-1, False, None) + return d + + def _get_deprecated_positional_arguments(names_to_ok_vals, arg_spec): + """Builds a dictionary from deprecated arguments to their spec. + + Returned dict is keyed by argument name. + Each value is a DeprecatedArgSpec with the following fields: + position: The zero-based argument position of the argument + within the signature. None if the argument isn't found in + the signature. + ok_values: Values of this argument for which warning will be + suppressed. + + Args: + names_to_ok_vals: dict from string arg_name to a list of values, possibly + empty, which should not elicit a warning. + arg_spec: Output from tf_inspect.getfullargspec on the called function. + + Returns: + Dictionary from arg_name to DeprecatedArgSpec. + """ + # Extract argument list + arg_space = arg_spec.args + arg_spec.kwonlyargs + arg_name_to_pos = {name: pos for pos, name in enumerate(arg_space)} + deprecated_positional_args = {} + for arg_name, spec in iter(names_to_ok_vals.items()): + if arg_name in arg_name_to_pos: + pos = arg_name_to_pos[arg_name] + deprecated_positional_args[arg_name] = DeprecatedArgSpec( + pos, spec.has_ok_value, spec.ok_value) + return deprecated_positional_args + + deprecated_arg_names = _get_arg_names_to_ok_vals() + + def deprecated_wrapper(func): + """Deprecation decorator.""" + decorator_utils.validate_callable(func, 'deprecated_args') + + arg_spec = tf_inspect.getfullargspec(func) + deprecated_positions = _get_deprecated_positional_arguments( + deprecated_arg_names, arg_spec) + + is_varargs_deprecated = arg_spec.varargs in deprecated_arg_names + is_kwargs_deprecated = arg_spec.varkw in deprecated_arg_names + + if (len(deprecated_positions) + is_varargs_deprecated + is_kwargs_deprecated + != len(deprecated_arg_names_or_tuples)): + known_args = ( + arg_spec.args + arg_spec.kwonlyargs + + [arg_spec.varargs, arg_spec.varkw]) + missing_args = [ + arg_name for arg_name in deprecated_arg_names + if arg_name not in known_args + ] + raise ValueError('The following deprecated arguments are not present ' + f'in the function signature: {missing_args}. ' + 'Expected arguments from the following list: ' + f'{known_args}.') + + def _same_value(a, b): + """A comparison operation that works for multiple object types. + + Returns True for two empty lists, two numeric values with the + same value, etc. + + Returns False for (pd.DataFrame, None), and other pairs which + should not be considered equivalent. + + Args: + a: value one of the comparison. + b: value two of the comparison. + + Returns: + A boolean indicating whether the two inputs are the same value + for the purposes of deprecation. + """ + if a is b: + return True + try: + equality = a == b + if isinstance(equality, bool): + return equality + except TypeError: + return False + return False + + @functools.wraps(func) + def new_func(*args, **kwargs): + """Deprecation wrapper.""" + # TODO(apassos) figure out a way to have reasonable performance with + # deprecation warnings and eager mode. + if is_in_graph_mode.IS_IN_GRAPH_MODE() and _PRINT_DEPRECATION_WARNINGS: + invalid_args = [] + named_args = tf_inspect.getcallargs(func, *args, **kwargs) + for arg_name, spec in iter(deprecated_positions.items()): + if (spec.position < len(args) and + not (spec.has_ok_value and + _same_value(named_args[arg_name], spec.ok_value))): + invalid_args.append(arg_name) + if is_varargs_deprecated and len(args) > len(arg_spec.args): + invalid_args.append(arg_spec.varargs) + if is_kwargs_deprecated and kwargs: + invalid_args.append(arg_spec.varkw) + for arg_name in deprecated_arg_names: + if (arg_name in kwargs and + not (deprecated_positions[arg_name].has_ok_value and + _same_value(named_args[arg_name], + deprecated_positions[arg_name].ok_value))): + invalid_args.append(arg_name) + for arg_name in invalid_args: + if (func, arg_name) not in _PRINTED_WARNING: + if warn_once: + _PRINTED_WARNING[(func, arg_name)] = True + _log_deprecation( + 'From %s: calling %s (from %s) with %s is deprecated and will ' + 'be removed %s.\nInstructions for updating:\n%s', + _call_location(), decorator_utils.get_qualified_name(func), + func.__module__, arg_name, + 'in a future version' if date is None else ('after %s' % date), + instructions) + return func(*args, **kwargs) + + doc = _add_deprecated_arg_notice_to_docstring( + func.__doc__, date, instructions, sorted(deprecated_arg_names.keys())) + return tf_decorator.make_decorator(func, new_func, 'deprecated', doc) + + return deprecated_wrapper + + +def deprecated_arg_values(date, + instructions, + warn_once=True, + **deprecated_kwargs): + """Decorator for marking specific function argument values as deprecated. + + This decorator logs a deprecation warning whenever the decorated function is + called with the deprecated argument values. It has the following format: + + Calling (from ) with = is deprecated and + will be removed after . Instructions for updating: + + + If `date` is None, 'after ' is replaced with 'in a future version'. + will include the class name if it is a method. + + It also edits the docstring of the function: ' (deprecated arguments)' is + appended to the first line of the docstring and a deprecation notice is + prepended to the rest of the docstring. + + Args: + date: String or None. The date the function is scheduled to be removed. Must + be ISO 8601 (YYYY-MM-DD), or None + instructions: String. Instructions on how to update code using the + deprecated function. + warn_once: If `True`, warn only the first time this function is called with + deprecated argument values. Otherwise, every call (with a deprecated + argument value) will log a warning. + **deprecated_kwargs: The deprecated argument values. + + Returns: + Decorated function or method. + + Raises: + ValueError: If date is not None or in ISO 8601 format, or instructions are + empty. + """ + _validate_deprecation_args(date, instructions) + if not deprecated_kwargs: + raise ValueError('Specify which argument values are deprecated.') + + def deprecated_wrapper(func): + """Deprecation decorator.""" + decorator_utils.validate_callable(func, 'deprecated_arg_values') + + @functools.wraps(func) + def new_func(*args, **kwargs): + """Deprecation wrapper.""" + if _PRINT_DEPRECATION_WARNINGS: + named_args = tf_inspect.getcallargs(func, *args, **kwargs) + for arg_name, arg_value in deprecated_kwargs.items(): + if arg_name in named_args and _safe_eq(named_args[arg_name], + arg_value): + if (func, arg_name) not in _PRINTED_WARNING: + if warn_once: + _PRINTED_WARNING[(func, arg_name)] = True + _log_deprecation( + 'From %s: calling %s (from %s) with %s=%s is deprecated and ' + 'will be removed %s.\nInstructions for updating:\n%s', + _call_location(), decorator_utils.get_qualified_name(func), + func.__module__, arg_name, arg_value, + 'in a future version' if date is None else + ('after %s' % date), instructions) + return func(*args, **kwargs) + + doc = _add_deprecated_arg_value_notice_to_docstring(func.__doc__, date, + instructions, + deprecated_kwargs) + return tf_decorator.make_decorator(func, new_func, 'deprecated', doc) + + return deprecated_wrapper + + +def deprecated_argument_lookup(new_name, new_value, old_name, old_value): + """Looks up deprecated argument name and ensures both are not used. + + Args: + new_name: new name of argument + new_value: value of new argument (or None if not used) + old_name: old name of argument + old_value: value of old argument (or None if not used) + + Returns: + The effective argument that should be used. + Raises: + ValueError: if new_value and old_value are both non-null + """ + if old_value is not None: + if new_value is not None: + raise ValueError(f"Cannot specify both '{old_name}' and '{new_name}'.") + return old_value + return new_value + + +def rewrite_argument_docstring(old_doc, old_argument, new_argument): + return old_doc.replace('`%s`' % old_argument, + '`%s`' % new_argument).replace('%s:' % old_argument, + '%s:' % new_argument) + + +@tf_contextlib.contextmanager +def silence(): + """Temporarily silence deprecation warnings.""" + global _PRINT_DEPRECATION_WARNINGS + print_deprecation_warnings = _PRINT_DEPRECATION_WARNINGS + _PRINT_DEPRECATION_WARNINGS = False + yield + _PRINT_DEPRECATION_WARNINGS = print_deprecation_warnings + + +def deprecate_moved_module(deprecated_name, new_module, deletion_version): + """Logs a warning when a module that has been moved to a new location is used. + + Copy the following code into the old module: + + ``` + import deprecation + import new_module + + __getattr__ = deprecation.deprecate_moved_module( + __name__, new_module, "2.9") # adjust version number. + ``` + + Args: + deprecated_name: Name of old module. + new_module: Module to replace the old module. + deletion_version: Version of TensorFlow in which the old module will be + removed. + + Returns: + A function that logs a warning and returns the symbol from the new module. + Set this function as the module's `__getattr__`. + """ + + def getter(name): + if getter not in _PRINTED_WARNING and _PRINT_DEPRECATION_WARNINGS: + _PRINTED_WARNING[getter] = True + _log_deprecation( + 'Please fix your imports. Module %s has been moved to %s. The old ' + 'module will be deleted in version %s.', deprecated_name, + new_module.__name__, deletion_version) + return getattr(new_module, name) + + return getter + + +class HiddenTfApiAttribute(property): + """Hides a class attribute from the public API. + + Attributes in public classes can be hidden from the API by having an '_' in + front of the name (e.g. ClassName._variables). This doesn't work when + attributes or methods are inherited from a parent class. To hide inherited + attributes, set their values to be `deprecation.hide_attribute_from_api`. + """ + + def __init__(self, deprecation_message): + + def raise_error(unused_self): + raise AttributeError(deprecation_message) + + super(HiddenTfApiAttribute, self).__init__(raise_error) + + +hide_attribute_from_api = HiddenTfApiAttribute # pylint: disable=invalid-name + +# TODO(kathywu): Remove once cl/246395236 is submitted. +HIDDEN_ATTRIBUTE = HiddenTfApiAttribute('This attribute has been deprecated.') diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/dispatch.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/dispatch.py new file mode 100644 index 0000000000000000000000000000000000000000..2605c2a17c7695896b0e44168cefc2c7ddbbe574 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/dispatch.py @@ -0,0 +1,1302 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Type-based dispatch for TensorFlow's Python APIs. + +"Python APIs" refers to Python functions that have been exported with +`tf_export`, such as `tf.add` and `tf.linalg.matmul`; they are sometimes also +referred to as "ops". + +There are currently two dispatch systems for TensorFlow: + + * The "fallback dispatch" system calls an API's standard implementation first, + and only tries to perform dispatch if that standard implementation raises a + TypeError (or ValueError) exception. + + * The "type-based dispatch" system checks the types of the parameters passed + to an API, and performs dispatch if those types match any signatures that + have been registered for dispatch. + +The fallback dispatch system was the original dispatch system, but it was +somewhat brittle and had limitations, such as an inability to support dispatch +for some operations (like convert_to_tensor). We plan to remove the fallback +dispatch system in favor of the type-based dispatch system, once all users have +been switched over to use it. + +### Fallback Dispatch + +The fallback dispatch system is based on "operation dispatchers", which can be +used to override the behavior for TensorFlow ops when they are called with +otherwise unsupported argument types. In particular, when an operation is +called with arguments that would cause it to raise a TypeError, it falls back on +its registered operation dispatchers. If any registered dispatchers can handle +the arguments, then its result is returned. Otherwise, the original TypeError is +raised. + +### Type-based Dispatch + +The main interface for the type-based dispatch system is the `dispatch_for_api` +decorator, which overrides the default implementation for a TensorFlow API. +The decorated function (known as the "dispatch target") will override the +default implementation for the API when the API is called with parameters that +match a specified type signature. + +### Dispatch Support + +By default, dispatch support is added to the generated op wrappers for any +visible ops by default. APIs/ops that are implemented in Python can opt in to +dispatch support using the `add_dispatch_support` decorator. +""" + +import collections +import itertools +import typing # pylint: disable=unused-import (used in doctests) + +from tensorflow.python.framework import _pywrap_python_api_dispatcher as _api_dispatcher +from tensorflow.python.framework import ops +from tensorflow.python.util import tf_decorator +from tensorflow.python.util import tf_export as tf_export_lib +from tensorflow.python.util import tf_inspect +from tensorflow.python.util import traceback_utils +from tensorflow.python.util import type_annotations +from tensorflow.python.util.tf_export import tf_export + + +# Private function attributes used to store dispatchers on TensorFlow APIs. +FALLBACK_DISPATCH_ATTR = "_tf_fallback_dispatchers" +TYPE_BASED_DISPATCH_ATTR = "_tf_type_based_dispatcher" + +# OpDispatchers which should be used for all operations. +_GLOBAL_DISPATCHERS = [] + + +################################################################################ +# Fallback Dispatch +################################################################################ + + +@tf_export("__internal__.dispatch.OpDispatcher", v1=[]) +class OpDispatcher(object): + """Abstract base class for TensorFlow operator dispatchers. + + Each operation dispatcher acts as an override handler for a single + TensorFlow operation, and its results are used when the handler indicates + that it can handle the operation's arguments (by returning any value other + than `OpDispatcher.NOT_SUPPORTED`). + """ + + # Sentinel value that can be returned to indicate that an operation + # dispatcher does not support a given set of arguments. + NOT_SUPPORTED = object() + + def handle(self, args, kwargs): # pylint: disable=unused-argument + """Handle this dispatcher's operation with the specified arguments. + + If this operation dispatcher can handle the given arguments, then + return an appropriate value (or raise an appropriate exception). + + Args: + args: The arguments to the operation. + kwargs: They keyword arguments to the operation. + + Returns: + The result of the operation, or `OpDispatcher.NOT_SUPPORTED` if this + dispatcher can not handle the given arguments. + """ + return self.NOT_SUPPORTED + + def register(self, op): + """Register this dispatcher as a handler for `op`. + + Args: + op: Python function: the TensorFlow operation that should be handled. Must + have a dispatch list (which is added automatically for generated ops, + and can be added to Python ops using the `add_dispatch_support` + decorator). + """ + if not hasattr(op, FALLBACK_DISPATCH_ATTR): + raise AssertionError("Dispatching not enabled for %s" % op) + getattr(op, FALLBACK_DISPATCH_ATTR).append(self) + + +@tf_export("__internal__.dispatch.GlobalOpDispatcher", v1=[]) +class GlobalOpDispatcher(object): + """Abstract base class for TensorFlow global operator dispatchers.""" + + NOT_SUPPORTED = OpDispatcher.NOT_SUPPORTED + + def handle(self, op, args, kwargs): + """Handle the specified operation with the specified arguments.""" + + def register(self): + """Register this dispatcher as a handler for all ops.""" + _GLOBAL_DISPATCHERS.append(self) + + +def dispatch(op, args, kwargs): + """Returns the result from the first successful dispatcher for a given op. + + Calls the `handle` method of each `OpDispatcher` that has been registered + to handle `op`, and returns the value from the first successful handler. + + Args: + op: Python function: the operation to dispatch for. + args: The arguments to the operation. + kwargs: They keyword arguments to the operation. + + Returns: + The result of the operation, or `NOT_SUPPORTED` if no registered + dispatcher can handle the given arguments. + """ + for dispatcher in getattr(op, FALLBACK_DISPATCH_ATTR): + result = dispatcher.handle(args, kwargs) + if result is not OpDispatcher.NOT_SUPPORTED: + return result + for dispatcher in _GLOBAL_DISPATCHERS: + result = dispatcher.handle(op, args, kwargs) + if result is not OpDispatcher.NOT_SUPPORTED: + return result + return OpDispatcher.NOT_SUPPORTED + + +class _TypeBasedDispatcher(OpDispatcher): + """Dispatcher that handles op if any arguments have a specified type. + + Checks the types of the arguments and keyword arguments (including elements + of lists or tuples), and if any argument values have the indicated type(s), + then delegates to an override function. + """ + + def __init__(self, override_func, types): + self._types = types + self._override_func = override_func + + def _handles(self, args, kwargs): + for arg in itertools.chain(args, kwargs.values()): + if (isinstance(arg, self._types) or + (isinstance(arg, (list, tuple)) and + any(isinstance(elt, self._types) for elt in arg))): + return True + return False + + def handle(self, args, kwargs): + if self._handles(args, kwargs): + return self._override_func(*args, **kwargs) + else: + return self.NOT_SUPPORTED + + +def _remove_annotation(sig): + """Removes annotation from a python Signature.""" + parameters = [p.replace(annotation=p.empty) for p in sig.parameters.values()] + return sig.replace(parameters=parameters, return_annotation=sig.empty) + + +def _get_required_param_names(sig): + """Returns a list of required parameter names from a python Signature.""" + params = [] + for p in sig.parameters.values(): + if p.kind == p.VAR_POSITIONAL: + continue + if p.kind == p.VAR_KEYWORD: + continue + if p.default is not p.empty: + continue + params.append(p.name) + return params + + +def get_compatible_func(op, func): + """Returns a compatible function. + + Args: + op: a callable with whose signature the returned function is compatible. + func: a callable which is called by the returned function. + + Returns: + a compatible function, which conducts the actions of `func` but can + be called like `op`, given that: + - the list of required arguments in `func` and `op` are the same. + - there is no override of the default arguments of `op` that are not + supported by `func`. + """ + op_signature = _remove_annotation(tf_inspect.signature(op)) + func_signature = _remove_annotation(tf_inspect.signature(func)) + + # Identitical signatures, no need to apply compatibility fixes. + if op_signature == func_signature: + return func + + # When calling func: + # - Positional args without default must be in the same order. + # - Ignore missing optional arguments from op + + op_pos_names = _get_required_param_names(op_signature) + func_pos_names = _get_required_param_names(func_signature) + + if op_pos_names != func_pos_names: + raise AssertionError( + "The decorated function's non-default arguments must be identical" + " to that of the overridden op." + f" func has {func_pos_names}. op has {op_pos_names}." + ) + + func_missing_params = {} + + for name in set(op_signature.parameters.keys()) - set( + func_signature.parameters.keys() + ): + p = op_signature.parameters[name] + if p.default is p.empty: + raise AssertionError( + "The decorated function's signature must implement all of the" + f" non-default arguments of the overridden op. Argument `{name}` is" + " unimplemented." + ) + func_missing_params[name] = p + + def compatible_func(*args, **kwargs): + bound = op_signature.bind(*args, **kwargs) + for name, param in func_missing_params.items(): + if name not in bound.arguments: + continue + value = bound.arguments.pop(name) + if value is not param.default: + raise AssertionError( + f"Dispatched op is called with argument `{name}` set to a" + " non-default value, which is not supported by the decorated" + " function" + ) + return func(*bound.args, **bound.kwargs) + + return compatible_func + + +# pylint: disable=g-doc-return-or-yield +def dispatch_for_types(op, *types): + """Decorator to declare that a Python function overrides an op for a type. + + The decorated function is used to override `op` if any of the arguments or + keyword arguments (including elements of lists or tuples) have one of the + specified types. + + Example: + + ```python + @dispatch_for_types(math_ops.add, RaggedTensor, RaggedTensorValue) + def ragged_add(x, y, name=None): ... + ``` + + Args: + op: Python function: the operation that should be overridden. + *types: The argument types for which this function should be used. + """ + + def decorator(func): + + _TypeBasedDispatcher(get_compatible_func(op, func), types).register(op) + return func + + return decorator + + +# pylint: enable=g-doc-return-or-yield + + +def add_fallback_dispatch_list(target): + """Decorator that adds a dispatch_list attribute to an op.""" + if hasattr(target, FALLBACK_DISPATCH_ATTR): + raise AssertionError("%s already has a dispatch list" % target) + setattr(target, FALLBACK_DISPATCH_ATTR, []) + return target + + +# Alias for backwards-compatibility. +add_dispatch_list = add_fallback_dispatch_list + + +################################################################################ +# Type-based Dispatch +################################################################################ + + +@tf_export("experimental.dispatch_for_api") +def dispatch_for_api(api, *signatures): + """Decorator that overrides the default implementation for a TensorFlow API. + + The decorated function (known as the "dispatch target") will override the + default implementation for the API when the API is called with parameters that + match a specified type signature. Signatures are specified using dictionaries + that map parameter names to type annotations. E.g., in the following example, + `masked_add` will be called for `tf.add` if both `x` and `y` are + `MaskedTensor`s: + + >>> class MaskedTensor(tf.experimental.ExtensionType): + ... values: tf.Tensor + ... mask: tf.Tensor + + >>> @dispatch_for_api(tf.math.add, {'x': MaskedTensor, 'y': MaskedTensor}) + ... def masked_add(x, y, name=None): + ... return MaskedTensor(x.values + y.values, x.mask & y.mask) + + >>> mt = tf.add(MaskedTensor([1, 2], [True, False]), MaskedTensor(10, True)) + >>> print(f"values={mt.values.numpy()}, mask={mt.mask.numpy()}") + values=[11 12], mask=[ True False] + + If multiple type signatures are specified, then the dispatch target will be + called if any of the signatures match. For example, the following code + registers `masked_add` to be called if `x` is a `MaskedTensor` *or* `y` is + a `MaskedTensor`. + + >>> @dispatch_for_api(tf.math.add, {'x': MaskedTensor}, {'y':MaskedTensor}) + ... def masked_add(x, y): + ... x_values = x.values if isinstance(x, MaskedTensor) else x + ... x_mask = x.mask if isinstance(x, MaskedTensor) else True + ... y_values = y.values if isinstance(y, MaskedTensor) else y + ... y_mask = y.mask if isinstance(y, MaskedTensor) else True + ... return MaskedTensor(x_values + y_values, x_mask & y_mask) + + The type annotations in type signatures may be type objects (e.g., + `MaskedTensor`), `typing.List` values, or `typing.Union` values. For + example, the following will register `masked_concat` to be called if `values` + is a list of `MaskedTensor` values: + + >>> @dispatch_for_api(tf.concat, {'values': typing.List[MaskedTensor]}) + ... def masked_concat(values, axis): + ... return MaskedTensor(tf.concat([v.values for v in values], axis), + ... tf.concat([v.mask for v in values], axis)) + + Each type signature must contain at least one subclass of `tf.CompositeTensor` + (which includes subclasses of `tf.ExtensionType`), and dispatch will only be + triggered if at least one type-annotated parameter contains a + `CompositeTensor` value. This rule avoids invoking dispatch in degenerate + cases, such as the following examples: + + * `@dispatch_for_api(tf.concat, {'values': List[MaskedTensor]})`: Will not + dispatch to the decorated dispatch target when the user calls + `tf.concat([])`. + + * `@dispatch_for_api(tf.add, {'x': Union[MaskedTensor, Tensor], 'y': + Union[MaskedTensor, Tensor]})`: Will not dispatch to the decorated dispatch + target when the user calls `tf.add(tf.constant(1), tf.constant(2))`. + + The dispatch target's signature must match the signature of the API that is + being overridden. In particular, parameters must have the same names, and + must occur in the same order. The dispatch target may optionally elide the + "name" parameter, in which case it will be wrapped with a call to + `tf.name_scope` when appropraite. + + Args: + api: The TensorFlow API to override. + *signatures: Dictionaries mapping parameter names or indices to type + annotations, specifying when the dispatch target should be called. In + particular, the dispatch target will be called if any signature matches; + and a signature matches if all of the specified parameters have types that + match with the indicated type annotations. If no signatures are + specified, then a signature will be read from the dispatch target + function's type annotations. + + Returns: + A decorator that overrides the default implementation for `api`. + + #### Registered APIs + + The TensorFlow APIs that may be overridden by `@dispatch_for_api` are: + + <> + """ + dispatcher = getattr(api, TYPE_BASED_DISPATCH_ATTR, None) + if dispatcher is None: + raise ValueError(f"{api} does not support dispatch.") + + api_signature = tf_inspect.signature(api) + signature_checkers = [ + _make_signature_checker(api_signature, signature) + for signature in signatures + ] + + def decorator(dispatch_target): + """Decorator that registers the given dispatch target.""" + if not callable(dispatch_target): + raise TypeError("Expected dispatch_target to be callable; " + f"got {dispatch_target!r}") + dispatch_target = _add_name_scope_wrapper(dispatch_target, api_signature) + _check_signature(api_signature, dispatch_target) + + for signature_checker in signature_checkers: + dispatcher.Register(signature_checker, dispatch_target) + _TYPE_BASED_DISPATCH_SIGNATURES[api][dispatch_target].extend(signatures) + + if not signature_checkers: + signature = _signature_from_annotations(dispatch_target) + checker = _make_signature_checker(api_signature, signature) + dispatcher.Register(checker, dispatch_target) + _TYPE_BASED_DISPATCH_SIGNATURES[api][dispatch_target].append(signature) + + return dispatch_target + + return decorator + + +# Nested dict mapping `api_func` -> `dispatch_target` -> `List[signature]`, +# which can be used for documentation generation and for improved error messages +# when APIs are called with unsupported types. +_TYPE_BASED_DISPATCH_SIGNATURES = {} + + +def apis_with_type_based_dispatch(): + """Returns a list of TensorFlow APIs that support type-based dispatch.""" + return sorted( + _TYPE_BASED_DISPATCH_SIGNATURES, + key=lambda api: f"{api.__module__}.{api.__name__}") + + +def type_based_dispatch_signatures_for(cls): + """Returns dispatch signatures that have been registered for a given class. + + This function is intended for documentation-generation purposes. + + Args: + cls: The class to search for. Type signatures are searched recursively, so + e.g., if `cls=RaggedTensor`, then information will be returned for all + dispatch targets that have `RaggedTensor` anywhere in their type + annotations (including nested in `typing.Union` or `typing.List`.) + + Returns: + A `dict` mapping `api` -> `signatures`, where `api` is a TensorFlow API + function; and `signatures` is a list of dispatch signatures for `api` + that include `cls`. (Each signature is a dict mapping argument names to + type annotations; see `dispatch_for_api` for more info.) + """ + + def contains_cls(x): + """Returns true if `x` contains `cls`.""" + if isinstance(x, dict): + return any(contains_cls(v) for v in x.values()) + elif x is cls: + return True + elif (type_annotations.is_generic_list(x) or + type_annotations.is_generic_union(x)): + type_args = type_annotations.get_generic_type_args(x) + return any(contains_cls(arg) for arg in type_args) + else: + return False + + result = {} + for api, api_signatures in _TYPE_BASED_DISPATCH_SIGNATURES.items(): + for _, signatures in api_signatures.items(): + filtered = list(filter(contains_cls, signatures)) + if filtered: + result.setdefault(api, []).extend(filtered) + return result + + +# TODO(edloper): Consider using a mechanism like this to automatically add +# the `name` argument to all TensorFlow APIs that are implemented in Python +# (so each Python function doesn't need to do it manually). +def _add_name_scope_wrapper(func, api_signature): + """Wraps `func` to expect a "name" arg, and use it to call `ops.name_scope`. + + If `func` already expects a "name" arg, or if `api_signature` does not + expect a "name" arg, then returns `func` as-is. + + Args: + func: The function to wrap. Signature must match `api_signature` (except + the "name" parameter may be missing. + api_signature: The signature of the original API (used to find the index for + the "name" parameter). + + Returns: + The wrapped function (or the original function if no wrapping is needed). + """ + if "name" not in api_signature.parameters: + return func # no wrapping needed (API has no name parameter). + + func_signature = tf_inspect.signature(func) + func_argspec = tf_inspect.getargspec(func) + if "name" in func_signature.parameters or func_argspec.keywords is not None: + return func # No wrapping needed (already has name parameter). + + name_index = list(api_signature.parameters).index("name") + + def wrapped_func(*args, **kwargs): + if name_index < len(args): + name = args[name_index] + args = args[:name_index] + args[name_index + 1:] + else: + name = kwargs.pop("name", None) + if name is None: + return func(*args, **kwargs) + else: + with ops.name_scope(name): + return func(*args, **kwargs) + + wrapped_func = tf_decorator.make_decorator(func, wrapped_func) + wrapped_func.__signature__ = func_signature.replace( + parameters=(list(func_signature.parameters.values()) + + [api_signature.parameters["name"]])) + del wrapped_func._tf_decorator + return wrapped_func + + +@tf_export("experimental.unregister_dispatch_for") +def unregister_dispatch_for(dispatch_target): + """Unregisters a function that was registered with `@dispatch_for_*`. + + This is primarily intended for testing purposes. + + Example: + + >>> # Define a type and register a dispatcher to override `tf.abs`: + >>> class MyTensor(tf.experimental.ExtensionType): + ... value: tf.Tensor + >>> @tf.experimental.dispatch_for_api(tf.abs) + ... def my_abs(x: MyTensor): + ... return MyTensor(tf.abs(x.value)) + >>> tf.abs(MyTensor(5)) + MyTensor(value=) + + >>> # Unregister the dispatcher, so `tf.abs` no longer calls `my_abs`. + >>> unregister_dispatch_for(my_abs) + >>> tf.abs(MyTensor(5)) + Traceback (most recent call last): + ... + ValueError: Attempt to convert a value ... to a Tensor. + + Args: + dispatch_target: The function to unregister. + + Raises: + ValueError: If `dispatch_target` was not registered using `@dispatch_for`, + `@dispatch_for_unary_elementwise_apis`, or + `@dispatch_for_binary_elementwise_apis`. + """ + found = False + + # Check if dispatch_target registered by `@dispatch_for_api` + for api, signatures in _TYPE_BASED_DISPATCH_SIGNATURES.items(): + if dispatch_target in signatures: + dispatcher = getattr(api, TYPE_BASED_DISPATCH_ATTR) + dispatcher.Unregister(dispatch_target) + del signatures[dispatch_target] + found = True + + # Check if dispatch_target registered by `@dispatch_for_*_elementwise_apis` + elementwise_keys_to_delete = [ + key for (key, handler) in _ELEMENTWISE_API_HANDLERS.items() + if handler is dispatch_target + ] + for key in set(elementwise_keys_to_delete): + for _, target in _ELEMENTWISE_API_TARGETS[key]: + unregister_dispatch_for(target) + del _ELEMENTWISE_API_HANDLERS[key] + del _ELEMENTWISE_API_TARGETS[key] + found = True + + if not found: + raise ValueError(f"Function {dispatch_target} was not registered using " + "a `@dispatch_for_*` decorator.") + + +def register_dispatchable_type(cls): + """Class decorator that registers a type for use with type-based dispatch. + + Should *not* be used with subclasses of `CompositeTensor` or `ExtensionType` + (which are automatically registered). + + Note: this function is intended to support internal legacy use cases (such + as RaggedTensorValue), and will probably not be exposed as a public API. + + Args: + cls: The class to register. + + Returns: + `cls`. + """ + _api_dispatcher.register_dispatchable_type(cls) + return cls + + +def add_type_based_api_dispatcher(target): + """Adds a PythonAPIDispatcher to the given TensorFlow API function.""" + if hasattr(target, TYPE_BASED_DISPATCH_ATTR): + raise ValueError(f"{target} already has a type-based API dispatcher.") + + _, unwrapped = tf_decorator.unwrap(target) + target_argspec = tf_inspect.getargspec(unwrapped) + if target_argspec.varargs or target_argspec.keywords: + # @TODO(b/194903203) Add v2 dispatch support for APIs that take varargs + # and keywords. Examples of APIs that take varargs and kwargs: meshgrid, + # einsum, map_values, map_flat_values. + return target + + setattr( + target, TYPE_BASED_DISPATCH_ATTR, + _api_dispatcher.PythonAPIDispatcher(unwrapped.__name__, + target_argspec.args, + target_argspec.defaults)) + _TYPE_BASED_DISPATCH_SIGNATURES[target] = collections.defaultdict(list) + return target + + +def _check_signature(api_signature, func): + """Checks that a dispatch target's signature is compatible with an API. + + Args: + api_signature: The signature of the TensorFlow API. + func: The dispatch target. + + Raises: + ValueError: if the signatures are incompatible. Two signatures are + considered compatible if they have the same number of parameters, and all + corresponding parameters have the same `name` and `kind`. (Parameters + are not required to have the same default value or the same annotation.) + """ + # Special case: if func_signature is (*args, **kwargs), then assume it's ok. + func_argspec = tf_inspect.getargspec(func) + if (func_argspec.varargs is not None and func_argspec.keywords is not None + and not func_argspec.args): + return + + func_signature = tf_inspect.signature(func) + ok = len(api_signature.parameters) == len(func_signature.parameters) + if ok: + for param_1, param_2 in zip(api_signature.parameters.values(), + func_signature.parameters.values()): + if (param_1.name != param_2.name) or (param_1.kind != param_2.kind): + ok = False + if not ok: + raise ValueError(f"Dispatch function's signature {func_signature} does " + f"not match API's signature {api_signature}.") + + +def _make_signature_checker(api_signature, signature): + """Builds a PySignatureChecker for the given type signature. + + Args: + api_signature: The `inspect.Signature` of the API whose signature is + being checked. + signature: Dictionary mapping parameter names to type annotations. + + Returns: + A `PySignatureChecker`. + """ + if not (isinstance(signature, dict) and + all(isinstance(k, (str, int)) for k in signature)): + raise TypeError("signatures must be dictionaries mapping parameter names " + "to type annotations.") + checkers = [] + + param_names = list(api_signature.parameters) + for param_name, param_type in signature.items(): + # Convert positional parameters to named parameters. + if (isinstance(param_name, int) and + param_name < len(api_signature.parameters)): + param_name = list(api_signature.parameters.values())[param_name].name + + # Check that the parameter exists, and has an appropriate kind. + param = api_signature.parameters.get(param_name, None) + if param is None: + raise ValueError("signature includes annotation for unknown " + f"parameter {param_name!r}.") + if param.kind not in (tf_inspect.Parameter.POSITIONAL_ONLY, + tf_inspect.Parameter.POSITIONAL_OR_KEYWORD): + raise ValueError("Dispatch currently only supports type annotations " + "for positional parameters; can't handle annotation " + f"for {param.kind!r} parameter {param_name}.") + + checker = make_type_checker(param_type) + index = param_names.index(param_name) + checkers.append((index, checker)) + + return _api_dispatcher.PySignatureChecker(checkers) + + +# Cache for InstanceTypeChecker objects (we only want to create one +# InstanceTypeChecker for each type, since each one uses an internal cache +# to avoid repeated calls back into Python's isinstance). +_is_instance_checker_cache = {} + + +def make_type_checker(annotation): + """Builds a PyTypeChecker for the given type annotation.""" + if type_annotations.is_generic_union(annotation): + type_args = type_annotations.get_generic_type_args(annotation) + + # If the union contains two or more simple types, then use a single + # InstanceChecker to check them. + simple_types = [t for t in type_args if isinstance(t, type)] + simple_types = tuple(sorted(simple_types, key=id)) + if len(simple_types) > 1: + if simple_types not in _is_instance_checker_cache: + checker = _api_dispatcher.MakeInstanceChecker(*simple_types) + _is_instance_checker_cache[simple_types] = checker + options = ([_is_instance_checker_cache[simple_types]] + + [make_type_checker(t) for t in type_args + if not isinstance(t, type)]) + return _api_dispatcher.MakeUnionChecker(options) + + options = [make_type_checker(t) for t in type_args] + return _api_dispatcher.MakeUnionChecker(options) + + elif type_annotations.is_generic_list(annotation): + type_args = type_annotations.get_generic_type_args(annotation) + if len(type_args) != 1: + raise AssertionError("Expected List[...] to have a single type parameter") + elt_type = make_type_checker(type_args[0]) + return _api_dispatcher.MakeListChecker(elt_type) + + elif isinstance(annotation, type): + if annotation not in _is_instance_checker_cache: + checker = _api_dispatcher.MakeInstanceChecker(annotation) + _is_instance_checker_cache[annotation] = checker + return _is_instance_checker_cache[annotation] + + elif annotation is None: + return make_type_checker(type(None)) + + else: + raise ValueError(f"Type annotation {annotation} is not currently supported" + " by dispatch. Supported annotations: type objects, " + " List[...], and Union[...]") + + +def _signature_from_annotations(func): + """Builds a dict mapping from parameter names to type annotations.""" + func_signature = tf_inspect.signature(func) + + signature = dict([(name, param.annotation) + for (name, param) in func_signature.parameters.items() + if param.annotation != tf_inspect.Parameter.empty]) + if not signature: + raise ValueError("The dispatch_for_api decorator must be called with at " + "least one signature, or applied to a function that " + "has type annotations on its parameters.") + return signature + + +# Registries for elementwise APIs and API handlers. +# +# _*_ELEMENTWISE_APIS: A list of TensorFlow APIs that have been registered +# as elementwise operations using the `register_*_elementwise_api` +# decorators. +# +# _ELEMENTWISE_API_HANDLERS: Dicts mapping from argument type(s) to API +# handlers that have been registered with the `dispatch_for_*_elementwise_apis` +# decorators. +# +# _ELEMENTWISE_API_TARGETS: Dict mapping from argument type(s) to lists of +# `(api, dispatch_target)` pairs. Used to impelement +# `unregister_elementwise_api_handler`. +_UNARY_ELEMENTWISE_APIS = [] +_BINARY_ELEMENTWISE_APIS = [] +_BINARY_ELEMENTWISE_ASSERT_APIS = [] +_ELEMENTWISE_API_HANDLERS = {} +_ELEMENTWISE_API_TARGETS = {} + +_ASSERT_API_TAG = "ASSERT_API_TAG" + + +@tf_export("experimental.dispatch_for_unary_elementwise_apis") +def dispatch_for_unary_elementwise_apis(x_type): + """Decorator to override default implementation for unary elementwise APIs. + + The decorated function (known as the "elementwise api handler") overrides + the default implementation for any unary elementwise API whenever the value + for the first argument (typically named `x`) matches the type annotation + `x_type`. The elementwise api handler is called with two arguments: + + `elementwise_api_handler(api_func, x)` + + Where `api_func` is a function that takes a single parameter and performs the + elementwise operation (e.g., `tf.abs`), and `x` is the first argument to the + elementwise api. + + The following example shows how this decorator can be used to update all + unary elementwise operations to handle a `MaskedTensor` type: + + >>> class MaskedTensor(tf.experimental.ExtensionType): + ... values: tf.Tensor + ... mask: tf.Tensor + >>> @dispatch_for_unary_elementwise_apis(MaskedTensor) + ... def unary_elementwise_api_handler(api_func, x): + ... return MaskedTensor(api_func(x.values), x.mask) + >>> mt = MaskedTensor([1, -2, -3], [True, False, True]) + >>> abs_mt = tf.abs(mt) + >>> print(f"values={abs_mt.values.numpy()}, mask={abs_mt.mask.numpy()}") + values=[1 2 3], mask=[ True False True] + + For unary elementwise operations that take extra arguments beyond `x`, those + arguments are *not* passed to the elementwise api handler, but are + automatically added when `api_func` is called. E.g., in the following + example, the `dtype` parameter is not passed to + `unary_elementwise_api_handler`, but is added by `api_func`. + + >>> ones_mt = tf.ones_like(mt, dtype=tf.float32) + >>> print(f"values={ones_mt.values.numpy()}, mask={ones_mt.mask.numpy()}") + values=[1.0 1.0 1.0], mask=[ True False True] + + Args: + x_type: A type annotation indicating when the api handler should be called. + See `dispatch_for_api` for a list of supported annotation types. + + Returns: + A decorator. + + #### Registered APIs + + The unary elementwise APIs are: + + <> + """ + + def decorator(handler): + if (x_type,) in _ELEMENTWISE_API_HANDLERS: + raise ValueError("A unary elementwise dispatch handler " + f"({_ELEMENTWISE_API_HANDLERS[(x_type,)]}) " + f"has already been registered for {x_type}.") + _ELEMENTWISE_API_HANDLERS[(x_type,)] = handler + for api in _UNARY_ELEMENTWISE_APIS: + _add_dispatch_for_unary_elementwise_api(api, x_type, handler) + + return handler + + return decorator + + +@tf_export("experimental.dispatch_for_binary_elementwise_apis") +def dispatch_for_binary_elementwise_apis(x_type, y_type): + """Decorator to override default implementation for binary elementwise APIs. + + The decorated function (known as the "elementwise api handler") overrides + the default implementation for any binary elementwise API whenever the value + for the first two arguments (typically named `x` and `y`) match the specified + type annotations. The elementwise api handler is called with two arguments: + + `elementwise_api_handler(api_func, x, y)` + + Where `x` and `y` are the first two arguments to the elementwise api, and + `api_func` is a TensorFlow function that takes two parameters and performs the + elementwise operation (e.g., `tf.add`). + + The following example shows how this decorator can be used to update all + binary elementwise operations to handle a `MaskedTensor` type: + + >>> class MaskedTensor(tf.experimental.ExtensionType): + ... values: tf.Tensor + ... mask: tf.Tensor + >>> @dispatch_for_binary_elementwise_apis(MaskedTensor, MaskedTensor) + ... def binary_elementwise_api_handler(api_func, x, y): + ... return MaskedTensor(api_func(x.values, y.values), x.mask & y.mask) + >>> a = MaskedTensor([1, 2, 3, 4, 5], [True, True, True, True, False]) + >>> b = MaskedTensor([2, 4, 6, 8, 0], [True, True, True, False, True]) + >>> c = tf.add(a, b) + >>> print(f"values={c.values.numpy()}, mask={c.mask.numpy()}") + values=[ 3 6 9 12 5], mask=[ True True True False False] + + Args: + x_type: A type annotation indicating when the api handler should be called. + y_type: A type annotation indicating when the api handler should be called. + + Returns: + A decorator. + + #### Registered APIs + + The binary elementwise APIs are: + + <> + """ + + def decorator(handler): + if (x_type, y_type) in _ELEMENTWISE_API_HANDLERS: + raise ValueError("A binary elementwise dispatch handler " + f"({_ELEMENTWISE_API_HANDLERS[x_type, y_type]}) " + f"has already been registered for ({x_type}, {y_type}).") + _ELEMENTWISE_API_HANDLERS[x_type, y_type] = handler + for api in _BINARY_ELEMENTWISE_APIS: + _add_dispatch_for_binary_elementwise_api(api, x_type, y_type, handler) + + return handler + + return decorator + + +@tf_export("experimental.dispatch_for_binary_elementwise_assert_apis") +def dispatch_for_binary_elementwise_assert_apis(x_type, y_type): + """Decorator to override default implementation for binary elementwise assert APIs. + + The decorated function (known as the "elementwise assert handler") + overrides the default implementation for any binary elementwise assert API + whenever the value for the first two arguments (typically named `x` and `y`) + match the specified type annotations. The handler is called with two + arguments: + + `elementwise_assert_handler(assert_func, x, y)` + + Where `x` and `y` are the first two arguments to the binary elementwise assert + operation, and `assert_func` is a TensorFlow function that takes two + parameters and performs the elementwise assert operation (e.g., + `tf.debugging.assert_equal`). + + The following example shows how this decorator can be used to update all + binary elementwise assert operations to handle a `MaskedTensor` type: + + >>> class MaskedTensor(tf.experimental.ExtensionType): + ... values: tf.Tensor + ... mask: tf.Tensor + >>> @dispatch_for_binary_elementwise_assert_apis(MaskedTensor, MaskedTensor) + ... def binary_elementwise_assert_api_handler(assert_func, x, y): + ... merged_mask = tf.logical_and(x.mask, y.mask) + ... selected_x_values = tf.boolean_mask(x.values, merged_mask) + ... selected_y_values = tf.boolean_mask(y.values, merged_mask) + ... assert_func(selected_x_values, selected_y_values) + >>> a = MaskedTensor([1, 1, 0, 1, 1], [False, False, True, True, True]) + >>> b = MaskedTensor([2, 2, 0, 2, 2], [True, True, True, False, False]) + >>> tf.debugging.assert_equal(a, b) # assert passed; no exception was thrown + + >>> a = MaskedTensor([1, 1, 1, 1, 1], [True, True, True, True, True]) + >>> b = MaskedTensor([0, 0, 0, 0, 2], [True, True, True, True, True]) + >>> tf.debugging.assert_greater(a, b) + Traceback (most recent call last): + ... + InvalidArgumentError: Condition x > y did not hold. + + Args: + x_type: A type annotation indicating when the api handler should be called. + y_type: A type annotation indicating when the api handler should be called. + + Returns: + A decorator. + + #### Registered APIs + + The binary elementwise assert APIs are: + + <> + """ + + def decorator(handler): + api_handler_key = (x_type, y_type, _ASSERT_API_TAG) + if api_handler_key in _ELEMENTWISE_API_HANDLERS: + raise ValueError("A binary elementwise assert dispatch handler " + f"({_ELEMENTWISE_API_HANDLERS[api_handler_key]}) " + f"has already been registered for ({x_type}, {y_type}).") + _ELEMENTWISE_API_HANDLERS[api_handler_key] = handler + for api in _BINARY_ELEMENTWISE_ASSERT_APIS: + _add_dispatch_for_binary_elementwise_api(api, x_type, y_type, handler) + + return handler + + return decorator + + +def register_unary_elementwise_api(func): + """Decorator that registers a TensorFlow op as a unary elementwise API.""" + _UNARY_ELEMENTWISE_APIS.append(func) + for args, handler in _ELEMENTWISE_API_HANDLERS.items(): + if len(args) == 1: + _add_dispatch_for_unary_elementwise_api(func, args[0], handler) + return func + + +def register_binary_elementwise_api(func): + """Decorator that registers a TensorFlow op as a binary elementwise API.""" + _BINARY_ELEMENTWISE_APIS.append(func) + for args, handler in _ELEMENTWISE_API_HANDLERS.items(): + if len(args) == 2: + _add_dispatch_for_binary_elementwise_api(func, args[0], args[1], handler) + return func + + +def register_binary_elementwise_assert_api(func): + """Decorator that registers a TensorFlow op as a binary elementwise assert API. + + Different from `dispatch_for_binary_elementwise_apis`, this decorator is used + for assert apis, such as assert_equal, assert_none_equal, etc, which return + None in eager mode and an op in graph mode. + + Args: + func: The function that implements the binary elementwise assert API. + + Returns: + `func` + """ + _BINARY_ELEMENTWISE_ASSERT_APIS.append(func) + for args, handler in _ELEMENTWISE_API_HANDLERS.items(): + if len(args) == 3 and args[2] is _ASSERT_API_TAG: + _add_dispatch_for_binary_elementwise_api(func, args[0], args[1], handler) + return func + + +def unary_elementwise_apis(): + """Returns a list of APIs that have been registered as unary elementwise.""" + return tuple(_UNARY_ELEMENTWISE_APIS) + + +def binary_elementwise_apis(): + """Returns a list of APIs that have been registered as binary elementwise.""" + return tuple(_BINARY_ELEMENTWISE_APIS) + + +def _add_dispatch_for_unary_elementwise_api(api, x_type, + elementwise_api_handler): + """Registers a unary elementwise handler as a dispatcher for a given API.""" + api_signature = tf_inspect.signature(api) + x_name = list(api_signature.parameters)[0] + name_index = _find_name_index(api_signature) + + need_to_bind_api_args = ( + len(api_signature.parameters) > 2 or + "name" not in api_signature.parameters) + + @dispatch_for_api(api, {x_name: x_type}) + def dispatch_target(*args, **kwargs): + args, kwargs, name = _extract_name_arg(args, kwargs, name_index) + if args: + x, args = args[0], args[1:] + else: + x = kwargs.pop(x_name) + + if need_to_bind_api_args: + tensor_api = lambda v: api(v, *args, **kwargs) + else: + tensor_api = api + + if name is None: + return elementwise_api_handler(tensor_api, x) + else: + with ops.name_scope(name, None, [x]): + return elementwise_api_handler(tensor_api, x) + + dispatch_target.__name__ = "elementwise_dispatch_target_for_" + api.__name__ + dispatch_target.__qualname__ = dispatch_target.__name__ + # Keep track of what targets we've registered (so we can unregister them). + target_list = _ELEMENTWISE_API_TARGETS.setdefault((x_type,), []) + target_list.append((api, dispatch_target)) + + +def _add_dispatch_for_binary_elementwise_api(api, x_type, y_type, + elementwise_api_handler): + """Registers a binary elementwise handler as a dispatcher for a given API.""" + api_signature = tf_inspect.signature(api) + x_name, y_name = list(api_signature.parameters)[:2] + name_index = _find_name_index(api_signature) + + need_to_bind_api_args = (len(api_signature.parameters) > 3 or + "name" not in api_signature.parameters) + + @dispatch_for_api(api, {x_name: x_type, y_name: y_type}) + def dispatch_target(*args, **kwargs): + args, kwargs, name = _extract_name_arg(args, kwargs, name_index) + if len(args) > 1: + x, y, args = args[0], args[1], args[2:] + elif args: + x, args = args[0], args[1:] + y = kwargs.pop(y_name, None) + else: + x = kwargs.pop(x_name, None) + y = kwargs.pop(y_name, None) + + if need_to_bind_api_args: + tensor_api = lambda v1, v2: api(v1, v2, *args, **kwargs) + else: + tensor_api = api + + if name is None: + return elementwise_api_handler(tensor_api, x, y) + else: + with ops.name_scope(name, None, [x, y]): + return elementwise_api_handler(tensor_api, x, y) + + dispatch_target.__name__ = "elementwise_dispatch_target_for_" + api.__name__ + dispatch_target.__qualname__ = dispatch_target.__name__ + # Keep track of what targets we've registered (so we can unregister them). + target_list = _ELEMENTWISE_API_TARGETS.setdefault((x_type, y_type), []) + target_list.append((api, dispatch_target)) + + +def _find_name_index(signature): + """Returns the index of the `name` parameter, or -1 if it's not present.""" + try: + return list(signature.parameters).index("name") + except ValueError: + return -1 + + +def _extract_name_arg(args, kwargs, name_index): + """Extracts the parameter `name` and returns `(args, kwargs, name_value)`.""" + if name_index < 0: + name_value = None + elif name_index < len(args): + name_value = args[name_index] + args = args[:name_index] + args[name_index + 1:] + else: + name_value = kwargs.pop("name", None) + return args, kwargs, name_value + + +def update_docstrings_with_api_lists(): + """Updates the docstrings of dispatch decorators with API lists. + + Updates docstrings for `dispatch_for_api`, + `dispatch_for_unary_elementwise_apis`, and + `dispatch_for_binary_elementwise_apis`, by replacing the string '<>' + with a list of APIs that have been registered for that decorator. + """ + _update_docstring_with_api_list(dispatch_for_unary_elementwise_apis, + _UNARY_ELEMENTWISE_APIS) + _update_docstring_with_api_list(dispatch_for_binary_elementwise_apis, + _BINARY_ELEMENTWISE_APIS) + _update_docstring_with_api_list(dispatch_for_binary_elementwise_assert_apis, + _BINARY_ELEMENTWISE_ASSERT_APIS) + _update_docstring_with_api_list(dispatch_for_api, + _TYPE_BASED_DISPATCH_SIGNATURES) + + +def _update_docstring_with_api_list(target, api_list): + """Replaces `<>` in target.__doc__ with the given list of APIs.""" + lines = [] + for func in api_list: + name = tf_export_lib.get_canonical_name_for_symbol( + func, add_prefix_to_v1_names=True) + if name is not None: + params = tf_inspect.signature(func).parameters.keys() + lines.append(f" * `tf.{name}({', '.join(params)})`") + lines.sort() + target.__doc__ = target.__doc__.replace(" <>", "\n".join(lines)) + + +################################################################################ +# Dispatch Support +################################################################################ +@tf_export("__internal__.dispatch.add_dispatch_support", v1=[]) +def add_dispatch_support(target=None, iterable_parameters=None): + """Decorator that adds a dispatch handling wrapper to a TensorFlow Python API. + + This wrapper adds the decorated function as an API that can be overridden + using the `@dispatch_for_api` decorator. In the following example, we first + define a new API (`double`) that supports dispatch, then define a custom type + (`MaskedTensor`) and finally use `dispatch_for_api` to override the default + implementation of `double` when called with `MaskedTensor` values: + + >>> @add_dispatch_support + ... def double(x): + ... return x * 2 + >>> class MaskedTensor(tf.experimental.ExtensionType): + ... values: tf.Tensor + ... mask: tf.Tensor + >>> @dispatch_for_api(double, {'x': MaskedTensor}) + ... def masked_double(x): + ... return MaskedTensor(x.values * 2, y.mask) + + The optional `iterable_parameter` argument can be used to mark parameters that + can take arbitrary iterable values (such as generator expressions). These + need to be handled specially during dispatch, since just iterating over an + iterable uses up its values. In the following example, we define a new API + whose second argument can be an iterable value; and then override the default + implementatio of that API when the iterable contains MaskedTensors: + + >>> @add_dispatch_support(iterable_parameters=['ys']) + ... def add_tensor_to_list_of_tensors(x, ys): + ... return [x + y for y in ys] + >>> @dispatch_for_api(add_tensor_to_list_of_tensors, + ... {'ys': typing.List[MaskedTensor]}) + ... def masked_add_tensor_to_list_of_tensors(x, ys): + ... return [MaskedTensor(x+y.values, y.mask) for y in ys] + + (Note: the only TensorFlow API that currently supports iterables is `add_n`.) + + Args: + target: The TensorFlow API that should support dispatch. + iterable_parameters: Optional list of parameter names that may be called + with iterables (such as the `inputs` parameter for `tf.add_n`). + + Returns: + A decorator. + """ + + if not (iterable_parameters is None or + (isinstance(iterable_parameters, (list, tuple)) and + all(isinstance(p, str) for p in iterable_parameters))): + raise TypeError("iterable_parameters should be a list or tuple of string.") + + def decorator(dispatch_target): + + # Get the name & index for each iterable parameter. + if iterable_parameters is None: + iterable_params = None + else: + arg_names = tf_inspect.getargspec(dispatch_target).args + iterable_params = [ + (name, arg_names.index(name)) for name in iterable_parameters + ] + + @traceback_utils.filter_traceback + def op_dispatch_handler(*args, **kwargs): + """Call `dispatch_target`, peforming dispatch when appropriate.""" + + # Type-based dispatch system (dispatch v2): + if api_dispatcher is not None: + if iterable_params is not None: + args, kwargs = replace_iterable_params(args, kwargs, iterable_params) + result = api_dispatcher.Dispatch(args, kwargs) + if result is not NotImplemented: + return result + + # Fallback dispatch system (dispatch v1): + try: + return dispatch_target(*args, **kwargs) + except (TypeError, ValueError): + # Note: convert_to_eager_tensor currently raises a ValueError, not a + # TypeError, when given unexpected types. So we need to catch both. + result = dispatch(op_dispatch_handler, args, kwargs) + if result is not OpDispatcher.NOT_SUPPORTED: + return result + else: + raise + + add_fallback_dispatch_list(op_dispatch_handler) + op_dispatch_handler = tf_decorator.make_decorator(dispatch_target, + op_dispatch_handler) + add_type_based_api_dispatcher(op_dispatch_handler) + api_dispatcher = getattr(op_dispatch_handler, TYPE_BASED_DISPATCH_ATTR, + None) + return op_dispatch_handler + + if target is None: + return decorator + else: + return decorator(target) + + +def replace_iterable_params(args, kwargs, iterable_params): + """Returns (args, kwargs) with any iterable parameters converted to lists. + + Args: + args: Positional rguments to a function + kwargs: Keyword arguments to a function. + iterable_params: A list of (name, index) tuples for iterable parameters. + + Returns: + A tuple (args, kwargs), where any positional or keyword parameters in + `iterable_params` have their value converted to a `list`. + """ + args = list(args) + for name, index in iterable_params: + if index < len(args): + args[index] = list(args[index]) + elif name in kwargs: + kwargs[name] = list(kwargs[name]) + return tuple(args), kwargs diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/example_parser_configuration.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/example_parser_configuration.py new file mode 100644 index 0000000000000000000000000000000000000000..fbbc0e66169971ee921cf682adb9c2524fe14f30 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/example_parser_configuration.py @@ -0,0 +1,206 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Extract parse_example op configuration to a proto.""" + +from tensorflow.core.example import example_parser_configuration_pb2 +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util + + +def extract_example_parser_configuration(parse_example_op, sess): + """Returns an ExampleParserConfig proto. + + Args: + parse_example_op: A ParseExample or ParseExampleV2 `Operation` + sess: A tf.compat.v1.Session needed to obtain some configuration values. + Returns: + A ExampleParserConfig proto. + + Raises: + ValueError: If attributes are inconsistent. + """ + if parse_example_op.type == "ParseExample": + return _extract_from_parse_example(parse_example_op, sess) + elif parse_example_op.type == "ParseExampleV2": + return _extract_from_parse_example_v2(parse_example_op, sess) + else: + raise ValueError( + "Found unexpected type when parsing example. Expected `ParseExample` " + f"object. Received type: {parse_example_op.type}") + + +def _extract_from_parse_example(parse_example_op, sess): + """Extract ExampleParserConfig from ParseExample op.""" + config = example_parser_configuration_pb2.ExampleParserConfiguration() + + num_sparse = parse_example_op.get_attr("Nsparse") + num_dense = parse_example_op.get_attr("Ndense") + total_features = num_dense + num_sparse + + sparse_types = parse_example_op.get_attr("sparse_types") + dense_types = parse_example_op.get_attr("Tdense") + dense_shapes = parse_example_op.get_attr("dense_shapes") + + if len(sparse_types) != num_sparse: + raise ValueError("len(sparse_types) attribute does not match " + "Nsparse attribute (%d vs %d)" % + (len(sparse_types), num_sparse)) + + if len(dense_types) != num_dense: + raise ValueError("len(dense_types) attribute does not match " + "Ndense attribute (%d vs %d)" % + (len(dense_types), num_dense)) + + if len(dense_shapes) != num_dense: + raise ValueError("len(dense_shapes) attribute does not match " + "Ndense attribute (%d vs %d)" % + (len(dense_shapes), num_dense)) + + # Skip over the serialized input, and the names input. + fetch_list = parse_example_op.inputs[2:] + + # Fetch total_features key names and num_dense default values. + if len(fetch_list) != (total_features + num_dense): + raise ValueError("len(fetch_list) does not match total features + " + "num_dense (%d vs %d)" % + (len(fetch_list), (total_features + num_dense))) + + fetched = sess.run(fetch_list) + + if len(fetched) != len(fetch_list): + raise ValueError("len(fetched) does not match len(fetch_list) " + "(%d vs %d)" % (len(fetched), len(fetch_list))) + + # Fetch indices. + sparse_keys_start = 0 + dense_keys_start = sparse_keys_start + num_sparse + dense_def_start = dense_keys_start + num_dense + + # Output tensor indices. + sparse_indices_start = 0 + sparse_values_start = num_sparse + sparse_shapes_start = sparse_values_start + num_sparse + dense_values_start = sparse_shapes_start + num_sparse + + # Dense features. + for i in range(num_dense): + key = fetched[dense_keys_start + i] + feature_config = config.feature_map[key] + # Convert the default value numpy array fetched from the session run + # into a TensorProto. + fixed_config = feature_config.fixed_len_feature + + fixed_config.default_value.CopyFrom( + tensor_util.make_tensor_proto(fetched[dense_def_start + i])) + # Convert the shape from the attributes + # into a TensorShapeProto. + fixed_config.shape.CopyFrom( + tensor_shape.TensorShape(dense_shapes[i]).as_proto()) + + fixed_config.dtype = dense_types[i].as_datatype_enum + # Get the output tensor name. + fixed_config.values_output_tensor_name = parse_example_op.outputs[ + dense_values_start + i].name + + # Sparse features. + for i in range(num_sparse): + key = fetched[sparse_keys_start + i] + feature_config = config.feature_map[key] + var_len_feature = feature_config.var_len_feature + var_len_feature.dtype = sparse_types[i].as_datatype_enum + var_len_feature.indices_output_tensor_name = parse_example_op.outputs[ + sparse_indices_start + i].name + var_len_feature.values_output_tensor_name = parse_example_op.outputs[ + sparse_values_start + i].name + var_len_feature.shapes_output_tensor_name = parse_example_op.outputs[ + sparse_shapes_start + i].name + + return config + + +def _extract_from_parse_example_v2(parse_example_op, sess): + """Extract ExampleParserConfig from ParseExampleV2 op.""" + config = example_parser_configuration_pb2.ExampleParserConfiguration() + + dense_types = parse_example_op.get_attr("Tdense") + num_sparse = parse_example_op.get_attr("num_sparse") + sparse_types = parse_example_op.get_attr("sparse_types") + ragged_value_types = parse_example_op.get_attr("ragged_value_types") + ragged_split_types = parse_example_op.get_attr("ragged_split_types") + dense_shapes = parse_example_op.get_attr("dense_shapes") + + num_dense = len(dense_types) + num_ragged = len(ragged_value_types) + assert len(ragged_value_types) == len(ragged_split_types) + assert len(parse_example_op.inputs) == 5 + num_dense + + # Skip over the serialized input, and the names input. + fetched = sess.run(parse_example_op.inputs[2:]) + sparse_keys = fetched[0].tolist() + dense_keys = fetched[1].tolist() + ragged_keys = fetched[2].tolist() + dense_defaults = fetched[3:] + assert len(sparse_keys) == num_sparse + assert len(dense_keys) == num_dense + assert len(ragged_keys) == num_ragged + + # Output tensor indices. + sparse_indices_start = 0 + sparse_values_start = num_sparse + sparse_shapes_start = sparse_values_start + num_sparse + dense_values_start = sparse_shapes_start + num_sparse + ragged_values_start = dense_values_start + num_dense + ragged_row_splits_start = ragged_values_start + num_ragged + + # Dense features. + for i in range(num_dense): + key = dense_keys[i] + feature_config = config.feature_map[key] + # Convert the default value numpy array fetched from the session run + # into a TensorProto. + fixed_config = feature_config.fixed_len_feature + + fixed_config.default_value.CopyFrom( + tensor_util.make_tensor_proto(dense_defaults[i])) + # Convert the shape from the attributes + # into a TensorShapeProto. + fixed_config.shape.CopyFrom( + tensor_shape.TensorShape(dense_shapes[i]).as_proto()) + + fixed_config.dtype = dense_types[i].as_datatype_enum + # Get the output tensor name. + fixed_config.values_output_tensor_name = parse_example_op.outputs[ + dense_values_start + i].name + + # Sparse features. + for i in range(num_sparse): + key = sparse_keys[i] + feature_config = config.feature_map[key] + var_len_feature = feature_config.var_len_feature + var_len_feature.dtype = sparse_types[i].as_datatype_enum + var_len_feature.indices_output_tensor_name = parse_example_op.outputs[ + sparse_indices_start + i].name + var_len_feature.values_output_tensor_name = parse_example_op.outputs[ + sparse_values_start + i].name + var_len_feature.shapes_output_tensor_name = parse_example_op.outputs[ + sparse_shapes_start + i].name + + if num_ragged != 0: + del ragged_values_start # unused + del ragged_row_splits_start # unused + raise ValueError("Ragged features are not yet supported by " + "example_parser_configuration.proto") + + return config diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/fast_module_type.pyi b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/fast_module_type.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a04c7f7468b5695b2140450c76ca2e02095c94d1 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/fast_module_type.pyi @@ -0,0 +1,16 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +def get_fast_module_type_class() -> object: ... diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/fast_module_type.so b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/fast_module_type.so new file mode 100644 index 0000000000000000000000000000000000000000..e7cf381bf2fd9fb447c3bd9a822f5fbf65cae545 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/fast_module_type.so differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/function_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/function_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..743a81343240c1085e0904348c02c3025f81f4cd --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/function_utils.py @@ -0,0 +1,132 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility to retrieve function args.""" + +import functools + +from tensorflow.core.protobuf import config_pb2 +from tensorflow.python.util import tf_decorator +from tensorflow.python.util import tf_inspect + + +def _is_bound_method(fn): + _, fn = tf_decorator.unwrap(fn) + return tf_inspect.ismethod(fn) and (fn.__self__ is not None) + + +def _is_callable_object(obj): + return hasattr(obj, '__call__') and tf_inspect.ismethod(obj.__call__) + + +def fn_args(fn): + """Get argument names for function-like object. + + Args: + fn: Function, or function-like object (e.g., result of `functools.partial`). + + Returns: + `tuple` of string argument names. + + Raises: + ValueError: if partial function has positionally bound arguments + """ + if isinstance(fn, functools.partial): + args = fn_args(fn.func) + args = [a for a in args[len(fn.args):] if a not in (fn.keywords or [])] + else: + if _is_callable_object(fn): + fn = fn.__call__ + args = tf_inspect.getfullargspec(fn).args + if _is_bound_method(fn) and args: + # If it's a bound method, it may or may not have a self/cls first + # argument; for example, self could be captured in *args. + # If it does have a positional argument, it is self/cls. + args.pop(0) + return tuple(args) + + +def has_kwargs(fn): + """Returns whether the passed callable has **kwargs in its signature. + + Args: + fn: Function, or function-like object (e.g., result of `functools.partial`). + + Returns: + `bool`: if `fn` has **kwargs in its signature. + + Raises: + `TypeError`: If fn is not a Function, or function-like object. + """ + if isinstance(fn, functools.partial): + fn = fn.func + elif _is_callable_object(fn): + fn = fn.__call__ + elif not callable(fn): + raise TypeError( + 'Argument `fn` should be a callable. ' + f'Received: fn={fn} (of type {type(fn)})') + return tf_inspect.getfullargspec(fn).varkw is not None + + +def get_func_name(func): + """Returns name of passed callable.""" + _, func = tf_decorator.unwrap(func) + if callable(func): + if tf_inspect.isfunction(func): + return func.__name__ + elif tf_inspect.ismethod(func): + return '%s.%s' % ( + func.__self__.__class__.__name__, + func.__func__.__name__, + ) + else: # Probably a class instance with __call__ + return str(type(func)) + else: + raise ValueError( + 'Argument `func` must be a callable. ' + f'Received func={func} (of type {type(func)})') + + +def get_func_code(func): + """Returns func_code of passed callable, or None if not available.""" + _, func = tf_decorator.unwrap(func) + if callable(func): + if tf_inspect.isfunction(func) or tf_inspect.ismethod(func): + return func.__code__ + # Since the object is not a function or method, but is a callable, we will + # try to access the __call__method as a function. This works with callable + # classes but fails with functool.partial objects despite their __call__ + # attribute. + try: + return func.__call__.__code__ + except AttributeError: + return None + else: + raise ValueError( + 'Argument `func` must be a callable. ' + f'Received func={func} (of type {type(func)})') + + +_rewriter_config_optimizer_disabled = None + + +def get_disabled_rewriter_config(): + global _rewriter_config_optimizer_disabled + if _rewriter_config_optimizer_disabled is None: + config = config_pb2.ConfigProto() + rewriter_config = config.graph_options.rewrite_options + rewriter_config.disable_meta_optimizer = True + _rewriter_config_optimizer_disabled = config.SerializeToString() + return _rewriter_config_optimizer_disabled diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/is_in_graph_mode.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/is_in_graph_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..7fd8ae9e023325cb701522943958b41e9443c466 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/is_in_graph_mode.py @@ -0,0 +1,18 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A function that tells you if the program is running in graph mode.""" +# Call IS_IN_GRAPH_MODE() when you want to know whether the thread is in +# graph mode. By default, we always are. +IS_IN_GRAPH_MODE = lambda: True diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/keras_deps.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/keras_deps.py new file mode 100644 index 0000000000000000000000000000000000000000..8ab710f4b867b48c11245e8764af7ecec7f1b342 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/keras_deps.py @@ -0,0 +1,80 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Interface that provides access to Keras dependencies. + +This library is a common interface that contains Keras functions needed by +TensorFlow and TensorFlow Lite and is required as per the dependency inversion +principle (https://en.wikipedia.org/wiki/Dependency_inversion_principle). As per +this principle, high-level modules (eg: TensorFlow and TensorFlow Lite) should +not depend on low-level modules (eg: Keras) and instead both should depend on a +common interface such as this file. +""" + + +from tensorflow.python.util.tf_export import tf_export + +_KERAS_CALL_CONTEXT_FUNCTION = None +_KERAS_CLEAR_SESSION_FUNCTION = None +_KERAS_GET_SESSION_FUNCTION = None +_KERAS_LOAD_MODEL_FUNCTION = None + +# TODO(b/169898786): Use the Keras public API when TFLite moves out of TF + + +# Register functions +@tf_export('__internal__.register_call_context_function', v1=[]) +def register_call_context_function(func): + global _KERAS_CALL_CONTEXT_FUNCTION + _KERAS_CALL_CONTEXT_FUNCTION = func + + +@tf_export('__internal__.register_clear_session_function', v1=[]) +def register_clear_session_function(func): + global _KERAS_CLEAR_SESSION_FUNCTION + _KERAS_CLEAR_SESSION_FUNCTION = func + + +@tf_export('__internal__.register_get_session_function', v1=[]) +def register_get_session_function(func): + global _KERAS_GET_SESSION_FUNCTION + _KERAS_GET_SESSION_FUNCTION = func + + +@tf_export('__internal__.register_load_model_function', v1=[]) +def register_load_model_function(func): + global _KERAS_LOAD_MODEL_FUNCTION + _KERAS_LOAD_MODEL_FUNCTION = func + + +# Get functions +def get_call_context_function(): + global _KERAS_CALL_CONTEXT_FUNCTION + return _KERAS_CALL_CONTEXT_FUNCTION + + +def get_clear_session_function(): + global _KERAS_CLEAR_SESSION_FUNCTION + return _KERAS_CLEAR_SESSION_FUNCTION + + +def get_get_session_function(): + global _KERAS_GET_SESSION_FUNCTION + return _KERAS_GET_SESSION_FUNCTION + + +def get_load_model_function(): + global _KERAS_LOAD_MODEL_FUNCTION + return _KERAS_LOAD_MODEL_FUNCTION diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/keyword_args.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/keyword_args.py new file mode 100644 index 0000000000000000000000000000000000000000..ddd96b91f9f6699f4e6c49cdcb5dba25f4ff04ad --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/keyword_args.py @@ -0,0 +1,50 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Keyword args functions.""" + +import functools + +from tensorflow.python.util import decorator_utils + + +def keyword_args_only(func): + """Decorator for marking specific function accepting keyword args only. + + This decorator raises a `ValueError` if the input `func` is called with any + non-keyword args. This prevents the caller from providing the arguments in + wrong order. + + Args: + func: The function or method needed to be decorated. + + Returns: + Decorated function or method. + + Raises: + ValueError: If `func` is not callable. + """ + + decorator_utils.validate_callable(func, "keyword_args_only") + @functools.wraps(func) + def new_func(*args, **kwargs): + """Keyword args only wrapper.""" + if args: + raise ValueError( + f"The function {func.__name__} only accepts keyword arguments. " + "Do not pass positional arguments. Received the following positional " + f"arguments: {args}") + return func(**kwargs) + return new_func diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/lazy_loader.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/lazy_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..7d8c186677583fc573b45aa0a674330e41cb5237 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/lazy_loader.py @@ -0,0 +1,224 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A LazyLoader class.""" + +import importlib +import os +import types +from tensorflow.python.platform import tf_logging as logging + +_TENSORFLOW_LAZY_LOADER_PREFIX = "_tfll" + + +class LazyLoader(types.ModuleType): + """Lazily import a module, mainly to avoid pulling in large dependencies. + + `contrib`, and `ffmpeg` are examples of modules that are large and not always + needed, and this allows them to only be loaded when they are used. + """ + + # The lint error here is incorrect. + def __init__(self, local_name, parent_module_globals, name, warning=None): + self._tfll_local_name = local_name + self._tfll_parent_module_globals = parent_module_globals + self._tfll_warning = warning + + # These members allows doctest correctly process this module member without + # triggering self._load(). self._load() mutates parant_module_globals and + # triggers a dict mutated during iteration error from doctest.py. + # - for from_module() + super().__setattr__("__module__", name.rsplit(".", 1)[0]) + # - for is_routine() + super().__setattr__("__wrapped__", None) + + super().__init__(name) + + def _load(self): + """Load the module and insert it into the parent's globals.""" + # Import the target module and insert it into the parent's namespace + module = importlib.import_module(self.__name__) + self._tfll_parent_module_globals[self._tfll_local_name] = module + + # Emit a warning if one was specified + if self._tfll_warning: + logging.warning(self._tfll_warning) + # Make sure to only warn once. + self._tfll_warning = None + + # Update this object's dict so that if someone keeps a reference to the + # LazyLoader, lookups are efficient (__getattr__ is only called on lookups + # that fail). + self.__dict__.update(module.__dict__) + + return module + + def __getattr__(self, name): + module = self._load() + return getattr(module, name) + + def __setattr__(self, name, value): + if name.startswith(_TENSORFLOW_LAZY_LOADER_PREFIX): + super().__setattr__(name, value) + else: + module = self._load() + setattr(module, name, value) + self.__dict__[name] = value + try: + # check if the module has __all__ + if name not in self.__all__ and name != "__all__": + self.__all__.append(name) + except AttributeError: + pass + + def __delattr__(self, name): + if name.startswith(_TENSORFLOW_LAZY_LOADER_PREFIX): + super().__delattr__(name) + else: + module = self._load() + delattr(module, name) + self.__dict__.pop(name) + try: + # check if the module has __all__ + if name in self.__all__: + self.__all__.remove(name) + except AttributeError: + pass + + def __repr__(self): + # Carefully to not trigger _load, since repr may be called in very + # sensitive places. + return f"" + + def __dir__(self): + module = self._load() + return dir(module) + + def __reduce__(self): + return importlib.import_module, (self.__name__,) + + +class KerasLazyLoader(LazyLoader): + """LazyLoader that handles routing to different Keras version.""" + + def __init__( # pylint: disable=super-init-not-called + self, parent_module_globals, mode=None, submodule=None, name="keras"): + self._tfll_parent_module_globals = parent_module_globals + self._tfll_mode = mode + self._tfll_submodule = submodule + self._tfll_name = name + self._tfll_initialized = False + + def _initialize(self): + """Resolve the Keras version to use and initialize the loader.""" + self._tfll_initialized = True + package_name = None + keras_version = None + if os.environ.get("TF_USE_LEGACY_KERAS", None) in ("true", "True", "1"): + try: + import tf_keras # pylint: disable=g-import-not-at-top,unused-import + + keras_version = "tf_keras" + if self._tfll_mode == "v1": + package_name = "tf_keras.api._v1.keras" + else: + package_name = "tf_keras.api._v2.keras" + except ImportError: + logging.warning( + "Your environment has TF_USE_LEGACY_KERAS set to True, but you " + "do not have the tf_keras package installed. You must install it " + "in order to use the legacy tf.keras. Install it via: " + "`pip install tf_keras`" + ) + else: + try: + import keras # pylint: disable=g-import-not-at-top + + if keras.__version__.startswith("3."): + # This is the Keras 3.x case. + keras_version = "keras_3" + package_name = "keras._tf_keras.keras" + else: + # This is the Keras 2.x case. + keras_version = "keras_2" + if self._tfll_mode == "v1": + package_name = "keras.api._v1.keras" + else: + package_name = "keras.api._v2.keras" + except ImportError: + raise ImportError( # pylint: disable=raise-missing-from + "Keras cannot be imported. Check that it is installed." + ) + + self._tfll_keras_version = keras_version + if keras_version is not None: + if self._tfll_submodule is not None: + package_name += "." + self._tfll_submodule + super().__init__( + self._tfll_name, self._tfll_parent_module_globals, package_name + ) + else: + raise ImportError( # pylint: disable=raise-missing-from + "Keras cannot be imported. Check that it is installed." + ) + + def __getattr__(self, item): + if item in ("_tfll_mode", "_tfll_initialized", "_tfll_name"): + return super(types.ModuleType, self).__getattribute__(item) + if not self._tfll_initialized: + self._initialize() + if self._tfll_keras_version == "keras_3": + if ( + self._tfll_mode == "v1" + and not self._tfll_submodule + and item.startswith("compat.v1.") + ): + raise AttributeError( + "`tf.compat.v1.keras` is not available with Keras 3. Keras 3 has " + "no support for TF 1 APIs. You can install the `tf_keras` package " + "as an alternative, and set the environment variable " + "`TF_USE_LEGACY_KERAS=True` to configure TensorFlow to route " + "`tf.compat.v1.keras` to `tf_keras`." + ) + elif ( + self._tfll_mode == "v2" + and not self._tfll_submodule + and item.startswith("compat.v2.") + ): + raise AttributeError( + "`tf.compat.v2.keras` is not available with Keras 3. Just use " + "`import keras` instead." + ) + elif self._tfll_submodule and self._tfll_submodule.startswith( + "__internal__.legacy." + ): + raise AttributeError( + f"`{item}` is not available with Keras 3." + ) + module = self._load() + return getattr(module, item) + + def __repr__(self): + if self._tfll_initialized: + return ( + f"" + ) + return "" + + def __dir__(self): + if not self._tfll_initialized: + self._initialize() + return super().__dir__() diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/lock_util.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/lock_util.py new file mode 100644 index 0000000000000000000000000000000000000000..6832011e1550931432293cb9c7274ff0be9f1646 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/lock_util.py @@ -0,0 +1,130 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Locking related utils.""" + +import threading + + +class GroupLock(object): + """A lock to allow many members of a group to access a resource exclusively. + + This lock provides a way to allow access to a resource by multiple threads + belonging to a logical group at the same time, while restricting access to + threads from all other groups. You can think of this as an extension of a + reader-writer lock, where you allow multiple writers at the same time. We + made it generic to support multiple groups instead of just two - readers and + writers. + + Simple usage example with two groups accessing the same resource: + + ```python + lock = GroupLock(num_groups=2) + + # In a member of group 0: + with lock.group(0): + # do stuff, access the resource + # ... + + # In a member of group 1: + with lock.group(1): + # do stuff, access the resource + # ... + ``` + + Using as a context manager with `.group(group_id)` is the easiest way. You + can also use the `acquire` and `release` method directly. + """ + + __slots__ = ["_ready", "_num_groups", "_group_member_counts"] + + def __init__(self, num_groups=2): + """Initialize a group lock. + + Args: + num_groups: The number of groups that will be accessing the resource under + consideration. Should be a positive number. + + Returns: + A group lock that can then be used to synchronize code. + + Raises: + ValueError: If num_groups is less than 1. + """ + if num_groups < 1: + raise ValueError( + "Argument `num_groups` must be a positive integer. " + f"Received: num_groups={num_groups}") + self._ready = threading.Condition(threading.Lock()) + self._num_groups = num_groups + self._group_member_counts = [0] * self._num_groups + + def group(self, group_id): + """Enter a context where the lock is with group `group_id`. + + Args: + group_id: The group for which to acquire and release the lock. + + Returns: + A context manager which will acquire the lock for `group_id`. + """ + self._validate_group_id(group_id) + return self._Context(self, group_id) + + def acquire(self, group_id): + """Acquire the group lock for a specific group `group_id`.""" + self._validate_group_id(group_id) + + self._ready.acquire() + while self._another_group_active(group_id): + self._ready.wait() + self._group_member_counts[group_id] += 1 + self._ready.release() + + def release(self, group_id): + """Release the group lock for a specific group `group_id`.""" + self._validate_group_id(group_id) + + self._ready.acquire() + self._group_member_counts[group_id] -= 1 + if self._group_member_counts[group_id] == 0: + self._ready.notify_all() + self._ready.release() + + def _another_group_active(self, group_id): + return any( + c > 0 for g, c in enumerate(self._group_member_counts) if g != group_id) + + def _validate_group_id(self, group_id): + if group_id < 0 or group_id >= self._num_groups: + raise ValueError( + "Argument `group_id` should verify `0 <= group_id < num_groups` " + f"(with `num_groups={self._num_groups}`). " + f"Received: group_id={group_id}") + + class _Context(object): + """Context manager helper for `GroupLock`.""" + + __slots__ = ["_lock", "_group_id"] + + def __init__(self, lock, group_id): + self._lock = lock + self._group_id = group_id + + def __enter__(self): + self._lock.acquire(self._group_id) + + def __exit__(self, type_arg, value_arg, traceback_arg): + del type_arg, value_arg, traceback_arg + self._lock.release(self._group_id) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/module_wrapper.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/module_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..0611cc43c79e8ffe47d7e4e38b4e3db888b7a445 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/module_wrapper.py @@ -0,0 +1,283 @@ +# Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Provides wrapper for TensorFlow modules.""" + +import importlib + +from tensorflow.python.eager import monitoring +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.util import fast_module_type +from tensorflow.python.util import tf_decorator +from tensorflow.python.util import tf_inspect +from tensorflow.tools.compatibility import all_renames_v2 + +FastModuleType = fast_module_type.get_fast_module_type_class() +_PER_MODULE_WARNING_LIMIT = 1 +compat_v1_usage_gauge = monitoring.BoolGauge('/tensorflow/api/compat/v1', + 'compat.v1 usage') + + +def get_rename_v2(name): + if name not in all_renames_v2.symbol_renames: + return None + return all_renames_v2.symbol_renames[name] + + +def _call_location(): + """Extracts the caller filename and line number as a string. + + Returns: + A string describing the caller source location. + """ + frame = tf_inspect.currentframe() + assert frame.f_back.f_code.co_name == '_tfmw_add_deprecation_warning', ( + 'This function should be called directly from ' + '_tfmw_add_deprecation_warning, as the caller is identified ' + 'heuristically by chopping off the top stack frames.') + + # We want to get stack frame 3 frames up from current frame, + # i.e. above __getattr__, _tfmw_add_deprecation_warning, + # and _call_location calls. + for _ in range(3): + parent = frame.f_back + if parent is None: + break + frame = parent + return '{}:{}'.format(frame.f_code.co_filename, frame.f_lineno) + + +def contains_deprecation_decorator(decorators): + return any(d.decorator_name == 'deprecated' for d in decorators) + + +def has_deprecation_decorator(symbol): + """Checks if given object has a deprecation decorator. + + We check if deprecation decorator is in decorators as well as + whether symbol is a class whose __init__ method has a deprecation + decorator. + Args: + symbol: Python object. + + Returns: + True if symbol has deprecation decorator. + """ + decorators, symbol = tf_decorator.unwrap(symbol) + if contains_deprecation_decorator(decorators): + return True + if tf_inspect.isfunction(symbol): + return False + if not tf_inspect.isclass(symbol): + return False + if not hasattr(symbol, '__init__'): + return False + init_decorators, _ = tf_decorator.unwrap(symbol.__init__) + return contains_deprecation_decorator(init_decorators) + + +class TFModuleWrapper(FastModuleType): + """Wrapper for TF modules to support deprecation messages and lazyloading.""" + # Ensures that compat.v1 API usage is recorded at most once + compat_v1_usage_recorded = False + + def __init__( + self, + wrapped, + module_name, + public_apis=None, + deprecation=True, + has_lite=False): + super(TFModuleWrapper, self).__init__(wrapped.__name__) + FastModuleType.set_getattr_callback(self, TFModuleWrapper._getattr) + FastModuleType.set_getattribute_callback(self, + TFModuleWrapper._getattribute) + self.__dict__.update(wrapped.__dict__) + # Prefix all local attributes with _tfmw_ so that we can + # handle them differently in attribute access methods. + self._tfmw_wrapped_module = wrapped + self._tfmw_module_name = module_name + self._tfmw_public_apis = public_apis + self._tfmw_print_deprecation_warnings = deprecation + self._tfmw_has_lite = has_lite + self._tfmw_is_compat_v1 = (wrapped.__name__.endswith('.compat.v1')) + # Set __all__ so that import * work for lazy loaded modules + if self._tfmw_public_apis: + self._tfmw_wrapped_module.__all__ = list(self._tfmw_public_apis.keys()) + self.__all__ = list(self._tfmw_public_apis.keys()) + else: + if hasattr(self._tfmw_wrapped_module, '__all__'): + self.__all__ = self._tfmw_wrapped_module.__all__ + else: + self._tfmw_wrapped_module.__all__ = [ + attr for attr in dir(self._tfmw_wrapped_module) + if not attr.startswith('_') + ] + self.__all__ = self._tfmw_wrapped_module.__all__ + + # names we already checked for deprecation + self._tfmw_deprecated_checked = set() + self._tfmw_warning_count = 0 + + def _tfmw_add_deprecation_warning(self, name, attr): + """Print deprecation warning for attr with given name if necessary.""" + if (self._tfmw_warning_count < _PER_MODULE_WARNING_LIMIT and + name not in self._tfmw_deprecated_checked): + + self._tfmw_deprecated_checked.add(name) + + if self._tfmw_module_name: + full_name = 'tf.%s.%s' % (self._tfmw_module_name, name) + else: + full_name = 'tf.%s' % name + rename = get_rename_v2(full_name) + if rename and not has_deprecation_decorator(attr): + call_location = _call_location() + # skip locations in Python source + if not call_location.startswith('<'): + logging.warning( + 'From %s: The name %s is deprecated. Please use %s instead.\n', + _call_location(), full_name, rename) + self._tfmw_warning_count += 1 + return True + return False + + def _tfmw_import_module(self, name): + """Lazily loading the modules.""" + # We ignore 'app' because it is accessed in __init__.py of tf.compat.v1. + # That way, if a user only imports tensorflow.compat.v1, it is not + # considered v1 API usage. + if (self._tfmw_is_compat_v1 and name != 'app' and + not TFModuleWrapper.compat_v1_usage_recorded): + TFModuleWrapper.compat_v1_usage_recorded = True + compat_v1_usage_gauge.get_cell().set(True) + + symbol_loc_info = self._tfmw_public_apis[name] + if symbol_loc_info[0]: + module = importlib.import_module(symbol_loc_info[0]) + attr = getattr(module, symbol_loc_info[1]) + else: + attr = importlib.import_module(symbol_loc_info[1]) + setattr(self._tfmw_wrapped_module, name, attr) + self.__dict__[name] = attr + # Cache the pair + self._fastdict_insert(name, attr) + return attr + + def _getattribute(self, name): + # pylint: disable=g-doc-return-or-yield,g-doc-args + """Imports and caches pre-defined API. + + Warns if necessary. + + This method is a replacement for __getattribute__(). It will be added into + the extended python module as a callback to reduce API overhead. + """ + # Avoid infinite recursions + func__fastdict_insert = object.__getattribute__(self, '_fastdict_insert') + + # Make sure we do not import from tensorflow/lite/__init__.py + if name == 'lite': + if self._tfmw_has_lite: + attr = self._tfmw_import_module(name) + setattr(self._tfmw_wrapped_module, 'lite', attr) + func__fastdict_insert(name, attr) + return attr + # Placeholder for Google-internal contrib error + + attr = object.__getattribute__(self, name) + + # Return and cache dunders and our own members. + # This is necessary to guarantee successful construction. + # In addition, all the accessed attributes used during the construction must + # begin with "__" or "_tfmw" or "_fastdict_". + if name.startswith('__') or name.startswith('_tfmw_') or name.startswith( + '_fastdict_'): + func__fastdict_insert(name, attr) + return attr + + # Print deprecations, only cache functions after deprecation warnings have + # stopped. + if not (self._tfmw_print_deprecation_warnings and + self._tfmw_add_deprecation_warning(name, attr)): + func__fastdict_insert(name, attr) + + return attr + + def _getattr(self, name): + # pylint: disable=g-doc-return-or-yield,g-doc-args + """Imports and caches pre-defined API. + + Warns if necessary. + + This method is a replacement for __getattr__(). It will be added into the + extended python module as a callback to reduce API overhead. Instead of + relying on implicit AttributeError handling, this added callback function + will + be called explicitly from the extended C API if the default attribute lookup + fails. + """ + try: + attr = getattr(self._tfmw_wrapped_module, name) + except AttributeError: + # Placeholder for Google-internal contrib error + + if not self._tfmw_public_apis: + raise + if name not in self._tfmw_public_apis: + raise + attr = self._tfmw_import_module(name) + + if self._tfmw_print_deprecation_warnings: + self._tfmw_add_deprecation_warning(name, attr) + return attr + + def __setattr__(self, arg, val): + if not arg.startswith('_tfmw_'): + setattr(self._tfmw_wrapped_module, arg, val) + self.__dict__[arg] = val + if arg not in self.__all__ and arg != '__all__': + self.__all__.append(arg) + # Update the cache + if self._fastdict_key_in(arg): + self._fastdict_insert(arg, val) + super(TFModuleWrapper, self).__setattr__(arg, val) + + def __dir__(self): + if self._tfmw_public_apis: + return list( + set(self._tfmw_public_apis.keys()).union( + set([ + attr for attr in dir(self._tfmw_wrapped_module) + if not attr.startswith('_') + ]))) + else: + return dir(self._tfmw_wrapped_module) + + def __delattr__(self, name): + if name.startswith('_tfmw_'): + super(TFModuleWrapper, self).__delattr__(name) + else: + delattr(self._tfmw_wrapped_module, name) + self.__dict__.pop(name) + if name in self.__all__: + self.__all__.remove(name) + self._fastdict_pop(name) + # delattr(self._tfmw_wrapped_module, name) + + def __repr__(self): + return self._tfmw_wrapped_module.__repr__() + + def __reduce__(self): + return importlib.import_module, (self.__name__,) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/nest.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/nest.py new file mode 100644 index 0000000000000000000000000000000000000000..748fc3b167f5c816f5f0ae5bc99d1ced8da25787 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/nest.py @@ -0,0 +1,1314 @@ +# Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Functions that work with structures. + +A structure is either: + +* one of the recognized Python collections, holding _nested structures_; +* a value of any other type, typically a TensorFlow data type like Tensor, + Variable, or of compatible types such as int, float, ndarray, etc. these are + commonly referred to as _atoms_ of the structure. + +A structure of type `T` is a structure whose atomic items are of type `T`. +For example, a structure of `tf.Tensor` only contains `tf.Tensor` as its atoms. + +Historically a _nested structure_ was called a _nested sequence_ in TensorFlow. +A nested structure is sometimes called a _nest_ or a _tree_, but the formal +name _nested structure_ is preferred. + +Refer to [Nesting Data Structures] +(https://en.wikipedia.org/wiki/Nesting_(computing)#Data_structures). + +The following collection types are recognized by `tf.nest` as nested +structures: + +* `collections.abc.Sequence` (except `string` and `bytes`). + This includes `list`, `tuple`, and `namedtuple`. +* `collections.abc.Mapping` (with sortable keys). + This includes `dict` and `collections.OrderedDict`. +* `collections.abc.MappingView` (with sortable keys). +* [`attr.s` classes](https://www.attrs.org/). +* Classes (including + [`dataclass`](https://docs.python.org/library/dataclasses.html)) + that implement the `__tf_flatten__` and `__tf_unflatten__` methods. + See examples in + [`nest_util.py`](https://github.com/tensorflow/tensorflow/blob/04869b4e63bfc03cb13627b3e1b879fdd0f69e34/tensorflow/python/util/nest_util.py#L97) + +Any other values are considered **atoms**. Not all collection types are +considered nested structures. For example, the following types are +considered atoms: + +* `set`; `{"a", "b"}` is an atom, while `["a", "b"]` is a nested structure. +* [`dataclass` classes](https://docs.python.org/library/dataclasses.html) that +don't implement the custom flattening/unflattening methods mentioned above. +* `tf.Tensor`. +* `numpy.array`. + +`tf.nest.is_nested` checks whether an object is a nested structure or an atom. +For example: + + >>> tf.nest.is_nested("1234") + False + >>> tf.nest.is_nested([1, 3, [4, 5]]) + True + >>> tf.nest.is_nested(((7, 8), (5, 6))) + True + >>> tf.nest.is_nested([]) + True + >>> tf.nest.is_nested({"a": 1, "b": 2}) + True + >>> tf.nest.is_nested({"a": 1, "b": 2}.keys()) + True + >>> tf.nest.is_nested({"a": 1, "b": 2}.values()) + True + >>> tf.nest.is_nested({"a": 1, "b": 2}.items()) + True + >>> tf.nest.is_nested(set([1, 2])) + False + >>> ones = tf.ones([2, 3]) + >>> tf.nest.is_nested(ones) + False + +Note: A proper structure shall form a tree. The user shall ensure there is no +cyclic references within the items in the structure, +i.e., no references in the structure of the input of these functions +should be recursive. The behavior is undefined if there is a cycle. + +API docstring: tensorflow.nest +""" + +from tensorflow.python.util import _pywrap_nest +from tensorflow.python.util import _pywrap_utils +from tensorflow.python.util import nest_util +from tensorflow.python.util.tf_export import tf_export + + +STRUCTURES_HAVE_MISMATCHING_LENGTHS = ( + nest_util.STRUCTURES_HAVE_MISMATCHING_LENGTHS +) + +STRUCTURES_HAVE_MISMATCHING_TYPES = nest_util.STRUCTURES_HAVE_MISMATCHING_TYPES + +SHALLOW_TREE_HAS_INVALID_KEYS = nest_util.SHALLOW_TREE_HAS_INVALID_KEYS + +INPUT_TREE_SMALLER_THAN_SHALLOW_TREE = ( + nest_util.INPUT_TREE_SMALLER_THAN_SHALLOW_TREE +) + +IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ = ( + "If shallow structure is a sequence, input must also be a sequence. " + "Input has type: {}." +) + +is_namedtuple = nest_util.is_namedtuple +_is_namedtuple = nest_util.is_namedtuple +_is_attrs = _pywrap_utils.IsAttrs +_is_mapping = _pywrap_utils.IsMapping +same_namedtuples = nest_util.same_namedtuples + + +def _yield_value(iterable): + return nest_util.yield_value(nest_util.Modality.CORE, iterable) + + +def _yield_sorted_items(iterable): + return nest_util.yield_sorted_items(nest_util.Modality.CORE, iterable) + + +@tf_export("__internal__.nest.is_mapping", v1=[]) +def is_mapping(obj): + """Returns a true if its input is a collections.Mapping.""" + return _is_mapping(obj) + + +# TODO(b/225045380): Move to a "leaf" library to use in trace_type. +@tf_export("__internal__.nest.is_attrs", v1=[]) +def is_attrs(obj): + """Returns a true if its input is an instance of an attr.s decorated class.""" + return _is_attrs(obj) + + +@tf_export("__internal__.nest.sequence_like", v1=[]) +def _sequence_like(instance, args): + """Converts the sequence `args` to the same type as `instance`. + + Args: + instance: an instance of `tuple`, `list`, `namedtuple`, `dict`, + `collections.OrderedDict`, or `composite_tensor.Composite_Tensor` + or `type_spec.TypeSpec`. + args: items to be converted to the `instance` type. + + Returns: + `args` with the type of `instance`. + """ + return nest_util.sequence_like(instance, args) + + +_is_nested_or_composite = _pywrap_utils.IsNestedOrComposite + + +@tf_export("nest.is_nested") +def is_nested(seq): + """Returns true if its input is a nested structure. + + Refer to [tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest) + for the definition of a nested structure. + + Args: + seq: the value to test. + + Returns: + True if the input is a nested structure. + """ + return nest_util.is_nested(nest_util.Modality.CORE, seq) + + +def is_nested_or_composite(seq): + """Returns true if its input is a nested structure or a composite. + + Refer to [tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest) + for the definition of a nested structure. + + Args: + seq: the value to test. + + Returns: + True if the input is a nested structure or a composite. + """ + return _is_nested_or_composite(seq) + + +def is_sequence_or_composite(seq): + return _is_nested_or_composite(seq) + + +@tf_export("nest.flatten") +def flatten(structure, expand_composites=False): + """Returns a flat list from a given structure. + + Refer to [tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest) + for the definition of a structure. + + If the structure is an atom, then returns a single-item list: [structure]. + + This is the inverse of the `nest.pack_sequence_as` method that takes in a + flattened list and re-packs it into the nested structure. + + In the case of dict instances, the sequence consists of the values, sorted by + key to ensure deterministic behavior. This is true also for OrderedDict + instances: their sequence order is ignored, the sorting order of keys is used + instead. The same convention is followed in `nest.pack_sequence_as`. This + correctly repacks dicts and OrderedDicts after they have been flattened, and + also allows flattening an OrderedDict and then repacking it back using a + corresponding plain dict, or vice-versa. Dictionaries with non-sortable keys + cannot be flattened. + + Users must not modify any collections used in nest while this function is + running. + + Examples: + + 1. Python dict (ordered by key): + + >>> dict = { "key3": "value3", "key1": "value1", "key2": "value2" } + >>> tf.nest.flatten(dict) + ['value1', 'value2', 'value3'] + + 2. For a nested python tuple: + + >>> tuple = ((1.0, 2.0), (3.0, 4.0, 5.0), 6.0) + >>> tf.nest.flatten(tuple) + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] + + 3. For a nested dictionary of dictionaries: + + >>> dict = { "key3": {"c": (1.0, 2.0), "a": (3.0)}, + ... "key1": {"m": "val1", "g": "val2"} } + >>> tf.nest.flatten(dict) + ['val2', 'val1', 3.0, 1.0, 2.0] + + 4. Numpy array (will not flatten): + + >>> array = np.array([[1, 2], [3, 4]]) + >>> tf.nest.flatten(array) + [array([[1, 2], + [3, 4]])] + + 5. `tf.Tensor` (will not flatten): + + >>> tensor = tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]) + >>> tf.nest.flatten(tensor) + [] + + 6. `tf.RaggedTensor`: This is a composite tensor thats representation consists + of a flattened list of 'values' and a list of 'row_splits' which indicate how + to chop up the flattened list into different rows. For more details on + `tf.RaggedTensor`, please visit + https://www.tensorflow.org/api_docs/python/tf/RaggedTensor. + + with `expand_composites=False`, we just return the RaggedTensor as is. + + >>> tensor = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2]]) + >>> tf.nest.flatten(tensor, expand_composites=False) + [] + + with `expand_composites=True`, we return the component Tensors that make up + the RaggedTensor representation (the values and row_splits tensors) + + >>> tensor = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2]]) + >>> tf.nest.flatten(tensor, expand_composites=True) + [, + ] + + Args: + structure: an atom or a nested structure. Note, numpy arrays are considered + atoms and are not flattened. + expand_composites: If true, then composite tensors such as + `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their + component tensors. + + Returns: + A Python list, the flattened version of the input. + + Raises: + TypeError: The nest is or contains a dict with non-sortable keys. + """ + return nest_util.flatten( + nest_util.Modality.CORE, structure, expand_composites + ) + + +@tf_export("nest.assert_same_structure") +def assert_same_structure(nest1, nest2, check_types=True, + expand_composites=False): + """Asserts that two structures are nested in the same way. + + Refer to [tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest) + for the definition of a structure. + + Note the method does not check the types of atoms inside the structures. + + Examples: + + * These atom vs. atom comparisons will pass: + + >>> tf.nest.assert_same_structure(1.5, tf.Variable(1, tf.uint32)) + >>> tf.nest.assert_same_structure("abc", np.array([1, 2])) + + * These nested structure vs. nested structure comparisons will pass: + + >>> structure1 = (((1, 2), 3), 4, (5, 6)) + >>> structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6")) + >>> structure3 = [(("a", "b"), "c"), "d", ["e", "f"]] + >>> tf.nest.assert_same_structure(structure1, structure2) + >>> tf.nest.assert_same_structure(structure1, structure3, check_types=False) + + >>> import collections + >>> tf.nest.assert_same_structure( + ... collections.namedtuple("bar", "a b")(1, 2), + ... collections.namedtuple("foo", "a b")(2, 3), + ... check_types=False) + + >>> tf.nest.assert_same_structure( + ... collections.namedtuple("bar", "a b")(1, 2), + ... { "a": 1, "b": 2 }, + ... check_types=False) + + >>> tf.nest.assert_same_structure( + ... { "a": 1, "b": 2, "c": 3 }, + ... { "c": 6, "b": 5, "a": 4 }) + + >>> ragged_tensor1 = tf.RaggedTensor.from_row_splits( + ... values=[3, 1, 4, 1, 5, 9, 2, 6], + ... row_splits=[0, 4, 4, 7, 8, 8]) + >>> ragged_tensor2 = tf.RaggedTensor.from_row_splits( + ... values=[3, 1, 4], + ... row_splits=[0, 3]) + >>> tf.nest.assert_same_structure( + ... ragged_tensor1, + ... ragged_tensor2, + ... expand_composites=True) + + * These examples will raise exceptions: + + >>> tf.nest.assert_same_structure([0, 1], np.array([0, 1])) + Traceback (most recent call last): + ... + ValueError: The two structures don't have the same nested structure + + >>> tf.nest.assert_same_structure( + ... collections.namedtuple('bar', 'a b')(1, 2), + ... collections.namedtuple('foo', 'a b')(2, 3)) + Traceback (most recent call last): + ... + TypeError: The two structures don't have the same nested structure + + Args: + nest1: an atom or a nested structure. + nest2: an atom or a nested structure. + check_types: if `True` (default) types of structures are checked as well, + including the keys of dictionaries. If set to `False`, for example a list + and a tuple of objects will look the same if they have the same size. Note + that namedtuples with identical name and fields are always considered to + have the same shallow structure. Two types will also be considered the + same if they are both list subtypes (which allows "list" and + "_ListWrapper" from trackable dependency tracking to compare equal). + `check_types=True` only checks type of sub-structures. The types of atoms + are not checked. + expand_composites: If true, then composite tensors such as + `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their + component tensors. + + Raises: + ValueError: If the two structures do not have the same number of atoms or + if the two structures are not nested in the same way. + TypeError: If the two structures differ in the type of sequence in any of + their substructures. Only possible if `check_types` is `True`. + """ + nest_util.assert_same_structure( + nest_util.Modality.CORE, nest1, nest2, check_types, expand_composites + ) + + +def flatten_dict_items(dictionary): + """Returns a dictionary with flattened keys and values. + + This function flattens the keys and values of a dictionary, which can be + arbitrarily nested structures, and returns the flattened version of such + structures: + + ```python + example_dictionary = {(4, 5, (6, 8)): ("a", "b", ("c", "d"))} + result = {4: "a", 5: "b", 6: "c", 8: "d"} + flatten_dict_items(example_dictionary) == result + ``` + + The input dictionary must satisfy two properties: + + 1. Its keys and values should have the same exact nested structure. + 2. The set of all flattened keys of the dictionary must not contain repeated + keys. + + Args: + dictionary: the dictionary to zip + + Returns: + The zipped dictionary. + + Raises: + TypeError: If the input is not a dictionary. + ValueError: If any key and value do not have the same structure layout, or + if keys are not unique. + """ + return _pywrap_nest.FlattenDictItems(dictionary) + + +@tf_export("nest.pack_sequence_as") +def pack_sequence_as(structure, flat_sequence, expand_composites=False): + """Returns a given flattened sequence packed into a given structure. + + Refer to [tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest) + for the definition of a structure. + + If `structure` is an atom, `flat_sequence` must be a single-item list; + in this case the return value is `flat_sequence[0]`. + + If `structure` is or contains a dict instance, the keys will be sorted to + pack the flat sequence in deterministic order. This is true also for + `OrderedDict` instances: their sequence order is ignored, the sorting order of + keys is used instead. The same convention is followed in `flatten`. + This correctly repacks dicts and `OrderedDict`s after they have been + flattened, and also allows flattening an `OrderedDict` and then repacking it + back using a corresponding plain dict, or vice-versa. + Dictionaries with non-sortable keys cannot be flattened. + + Examples: + + 1. Python dict: + + >>> structure = { "key3": "", "key1": "", "key2": "" } + >>> flat_sequence = ["value1", "value2", "value3"] + >>> tf.nest.pack_sequence_as(structure, flat_sequence) + {'key3': 'value3', 'key1': 'value1', 'key2': 'value2'} + + 2. For a nested python tuple: + + >>> structure = (('a','b'), ('c','d','e'), 'f') + >>> flat_sequence = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] + >>> tf.nest.pack_sequence_as(structure, flat_sequence) + ((1.0, 2.0), (3.0, 4.0, 5.0), 6.0) + + 3. For a nested dictionary of dictionaries: + + >>> structure = { "key3": {"c": ('alpha', 'beta'), "a": ('gamma')}, + ... "key1": {"e": "val1", "d": "val2"} } + >>> flat_sequence = ['val2', 'val1', 3.0, 1.0, 2.0] + >>> tf.nest.pack_sequence_as(structure, flat_sequence) + {'key3': {'c': (1.0, 2.0), 'a': 3.0}, 'key1': {'e': 'val1', 'd': 'val2'}} + + 4. Numpy array (considered a scalar): + + >>> structure = ['a'] + >>> flat_sequence = [np.array([[1, 2], [3, 4]])] + >>> tf.nest.pack_sequence_as(structure, flat_sequence) + [array([[1, 2], + [3, 4]])] + + 5. tf.Tensor (considered a scalar): + + >>> structure = ['a'] + >>> flat_sequence = [tf.constant([[1., 2., 3.], [4., 5., 6.]])] + >>> tf.nest.pack_sequence_as(structure, flat_sequence) + [] + + 6. `tf.RaggedTensor`: This is a composite tensor thats representation consists + of a flattened list of 'values' and a list of 'row_splits' which indicate how + to chop up the flattened list into different rows. For more details on + `tf.RaggedTensor`, please visit + https://www.tensorflow.org/api_docs/python/tf/RaggedTensor. + + With `expand_composites=False`, we treat RaggedTensor as a scalar. + + >>> structure = { "foo": tf.ragged.constant([[1, 2], [3]]), + ... "bar": tf.constant([[5]]) } + >>> flat_sequence = [ "one", "two" ] + >>> tf.nest.pack_sequence_as(structure, flat_sequence, + ... expand_composites=False) + {'foo': 'two', 'bar': 'one'} + + With `expand_composites=True`, we expect that the flattened input contains + the tensors making up the ragged tensor i.e. the values and row_splits + tensors. + + >>> structure = { "foo": tf.ragged.constant([[1., 2.], [3.]]), + ... "bar": tf.constant([[5.]]) } + >>> tensors = tf.nest.flatten(structure, expand_composites=True) + >>> print(tensors) + [, + , + ] + >>> verified_tensors = [tf.debugging.check_numerics(t, 'invalid tensor: ') + ... if t.dtype==tf.float32 else t + ... for t in tensors] + >>> tf.nest.pack_sequence_as(structure, verified_tensors, + ... expand_composites=True) + {'foo': , + 'bar': } + + Args: + structure: Nested structure, whose structure is given by nested lists, + tuples, and dicts. Note: numpy arrays and strings are considered + scalars. + flat_sequence: flat sequence to pack. + expand_composites: If true, then composite tensors such as + `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their + component tensors. + + Returns: + packed: `flat_sequence` converted to have the same recursive structure as + `structure`. + + Raises: + ValueError: If `flat_sequence` and `structure` have different + atom counts. + TypeError: `structure` is or contains a dict with non-sortable keys. + """ + return nest_util.pack_sequence_as( + nest_util.Modality.CORE, structure, flat_sequence, expand_composites + ) + + +@tf_export("nest.map_structure") +def map_structure(func, *structure, **kwargs): + """Creates a new structure by applying `func` to each atom in `structure`. + + Refer to [tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest) + for the definition of a structure. + + Applies `func(x[0], x[1], ...)` where x[i] enumerates all atoms in + `structure[i]`. All items in `structure` must have the same arity, + and the return value will contain results with the same structure layout. + + Examples: + + * A single Python dict: + + >>> a = {"hello": 24, "world": 76} + >>> tf.nest.map_structure(lambda p: p * 2, a) + {'hello': 48, 'world': 152} + + * Multiple Python dictionaries: + + >>> d1 = {"hello": 24, "world": 76} + >>> d2 = {"hello": 36, "world": 14} + >>> tf.nest.map_structure(lambda p1, p2: p1 + p2, d1, d2) + {'hello': 60, 'world': 90} + + * A single Python list: + + >>> a = [24, 76, "ab"] + >>> tf.nest.map_structure(lambda p: p * 2, a) + [48, 152, 'abab'] + + * Scalars: + + >>> tf.nest.map_structure(lambda x, y: x + y, 3, 4) + 7 + + * Empty structures: + + >>> tf.nest.map_structure(lambda x: x + 1, ()) + () + + * Check the types of iterables: + + >>> s1 = (((1, 2), 3), 4, (5, 6)) + >>> s1_list = [[[1, 2], 3], 4, [5, 6]] + >>> tf.nest.map_structure(lambda x, y: None, s1, s1_list) + Traceback (most recent call last): + ... + TypeError: The two structures don't have the same nested structure + + * Type check is set to False: + + >>> s1 = (((1, 2), 3), 4, (5, 6)) + >>> s1_list = [[[1, 2], 3], 4, [5, 6]] + >>> tf.nest.map_structure(lambda x, y: None, s1, s1_list, check_types=False) + (((None, None), None), None, (None, None)) + + Args: + func: A callable that accepts as many arguments as there are structures. + *structure: atom or nested structure. + **kwargs: Valid keyword args are: + * `check_types`: If set to `True` (default) the types of iterables within + the structures have to be same (e.g. `map_structure(func, [1], (1,))` + raises a `TypeError` exception). To allow this set this argument to + `False`. Note that namedtuples with identical name and fields are always + considered to have the same shallow structure. + * `expand_composites`: If set to `True`, then composite tensors such as + `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their + component tensors. If `False` (the default), then composite tensors are + not expanded. + + Returns: + A new structure with the same arity as `structure[0]`, whose atoms + correspond to `func(x[0], x[1], ...)` where `x[i]` is the atom in the + corresponding location in `structure[i]`. If there are different structure + types and `check_types` is `False` the structure types of the first + structure will be used. + + Raises: + TypeError: If `func` is not callable or if the structures do not match + each other by depth tree. + ValueError: If no structure is provided or if the structures do not match + each other by type. + ValueError: If wrong keyword arguments are provided. + """ + return nest_util.map_structure( + nest_util.Modality.CORE, func, *structure, **kwargs + ) + + +def map_structure_with_paths(func, *structure, **kwargs): + """Applies `func` to each entry in `structure` and returns a new structure. + + Applies `func(path, x[0], x[1], ..., **kwargs)` where x[i] is an entry in + `structure[i]` and `path` is the common path to x[i] in the structures. All + structures in `structure` must have the same arity, and the return value will + contain the results with the same structure layout. Special kwarg + `check_types` determines whether the types of iterables within the structure + must be the same-- see **kwargs definition below. + + Args: + func: A callable with the signature func(path, *values, **kwargs) that is + evaluated on the leaves of the structure. + *structure: A variable number of compatible structures to process. + **kwargs: Optional kwargs to be passed through to func. Special kwarg + `check_types` is not passed to func, but instead determines whether the + types of iterables within the structures have to be same (e.g., + `map_structure(func, [1], (1,))` raises a `TypeError` exception). By + default, the types must match. To allow iteration over structures of + different types (but common arity), set this kwarg to `False`. + + Returns: + A structure of the same form as the input structures whose leaves are the + result of evaluating func on corresponding leaves of the input structures. + + Raises: + TypeError: If `func` is not callable or if the structures do not match + each other by depth tree. + TypeError: If `check_types` is not `False` and the two structures differ in + the type of sequence in any of their substructures. + ValueError: If no structures are provided. + """ + def wrapper_func(tuple_path, *inputs, **kwargs): + string_path = "/".join(str(s) for s in tuple_path) + return func(string_path, *inputs, **kwargs) + + return nest_util.map_structure_up_to( + nest_util.Modality.CORE, structure[0], wrapper_func, *structure, **kwargs + ) + + +def map_structure_with_tuple_paths(func, *structure, **kwargs): + """Applies `func` to each entry in `structure` and returns a new structure. + + Applies `func(tuple_path, x[0], x[1], ..., **kwargs)` where `x[i]` is an entry + in `structure[i]` and `tuple_path` is a tuple of indices and/or dictionary + keys (as returned by `nest.yield_flat_paths`), which uniquely specifies the + common path to x[i] in the structures. All structures in `structure` must have + the same arity, and the return value will contain the results in the same + structure. Special kwarg `check_types` determines whether the types of + iterables within the structure must be the same-- see **kwargs definition + below. + + Args: + func: A callable with the signature `func(tuple_path, *values, **kwargs)` + that is evaluated on the leaves of the structure. + *structure: A variable number of compatible structures to process. + **kwargs: Optional kwargs to be passed through to func. Special kwarg + `check_types` is not passed to func, but instead determines whether the + types of iterables within the structures have to be same (e.g. + `map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow + this set this argument to `False`. + + Returns: + A structure of the same form as the input structures whose leaves are the + result of evaluating func on corresponding leaves of the input structures. + + Raises: + TypeError: If `func` is not callable or if the structures do not match + each other by depth tree. + TypeError: If `check_types` is not `False` and the two structures differ in + the type of sequence in any of their substructures. + ValueError: If no structures are provided. + """ + return nest_util.map_structure_up_to( + nest_util.Modality.CORE, structure[0], func, *structure, **kwargs + ) + + +def assert_shallow_structure(shallow_tree, + input_tree, + check_types=True, + expand_composites=False): + """Asserts that `shallow_tree` is a shallow structure of `input_tree`. + + That is, this function tests if the `input_tree` structure can be created from + the `shallow_tree` structure by replacing its leaf nodes with deeper + tree structures. + + Examples: + + The following code will raise an exception: + ```python + shallow_tree = {"a": "A", "b": "B"} + input_tree = {"a": 1, "c": 2} + assert_shallow_structure(shallow_tree, input_tree) + ``` + + The following code will raise an exception: + ```python + shallow_tree = ["a", "b"] + input_tree = ["c", ["d", "e"], "f"] + assert_shallow_structure(shallow_tree, input_tree) + ``` + + Args: + shallow_tree: an arbitrarily nested structure. + input_tree: an arbitrarily nested structure. + check_types: if `True` (default) the sequence types of `shallow_tree` and + `input_tree` have to be the same. Note that even with check_types==True, + this function will consider two different namedtuple classes with the same + name and _fields attribute to be the same class. + expand_composites: If true, then composite tensors such as + `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their + component tensors. + Raises: + TypeError: If `shallow_tree` is a sequence but `input_tree` is not. + TypeError: If the sequence types of `shallow_tree` are different from + `input_tree`. Only raised if `check_types` is `True`. + ValueError: If the sequence lengths of `shallow_tree` are different from + `input_tree`. + """ + nest_util.assert_shallow_structure( + nest_util.Modality.CORE, + shallow_tree, + input_tree, + check_types, + expand_composites, + ) + + +@tf_export("__internal__.nest.flatten_up_to", v1=[]) +def flatten_up_to(shallow_tree, input_tree, check_types=True, + expand_composites=False): + """Flattens `input_tree` up to `shallow_tree`. + + Refer to [tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest) + for the definition of a structure. + + Any further depth in structure in `input_tree` is retained as structures in + the partially flatten output. + + If `shallow_tree` and `input_tree` are atoms, this returns a + single-item list: `[input_tree]`. + + Use Case: + + Sometimes we may wish to partially flatten a structure, retaining some + of the nested structure. We achieve this by specifying a shallow structure, + `shallow_tree`, we wish to flatten up to. + + The input, `input_tree`, can be thought of as having the same structure layout + as `shallow_tree`, but with leaf nodes that are themselves tree structures. + + Examples: + + ```python + input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]] + shallow_tree = [[True, True], [False, True]] + + flattened_input_tree = flatten_up_to(shallow_tree, input_tree) + flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree) + + # Output is: + # [[2, 2], [3, 3], [4, 9], [5, 5]] + # [True, True, False, True] + ``` + + ```python + input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]] + shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]] + + input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree) + input_tree_flattened = flatten(input_tree) + + # Output is: + # [('a', 1), ('b', 2), ('c', 3), ('d', 4)] + # ['a', 1, 'b', 2, 'c', 3, 'd', 4] + ``` + + Edge Cases for atoms: + + ```python + flatten_up_to(0, 0) # Output: [0] + flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]] + flatten_up_to([0, 1, 2], 0) # Output: TypeError + flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2] + ``` + + Args: + shallow_tree: a possibly pruned structure of input_tree. + input_tree: an atom or a nested structure. + Note, numpy arrays are considered atoms. + check_types: bool. If True, check that each node in shallow_tree has the + same type as the corresponding node in input_tree. + expand_composites: If true, then composite tensors such as + `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their + component tensors. + + Returns: + A Python list, the partially flattened version of `input_tree` according to + the structure of `shallow_tree`. + + Raises: + TypeError: If `shallow_tree` is a nested structure but `input_tree` is not. + TypeError: If the structure types of `shallow_tree` are different from + `input_tree`. + ValueError: If the structure lengths of `shallow_tree` are different from + `input_tree`. + """ + return nest_util.flatten_up_to( + nest_util.Modality.CORE, + shallow_tree, + input_tree, + check_types, + expand_composites, + ) + + +def flatten_with_tuple_paths_up_to(shallow_tree, + input_tree, + check_types=True, + expand_composites=False): + """Flattens `input_tree` up to `shallow_tree`. + + Any further depth in structure in `input_tree` is retained as structures in + the partially flattened output. + + Returns a list of (path, value) pairs, where value a leaf node in the + flattened tree, and path is the tuple path of that leaf in input_tree. + + If `shallow_tree` and `input_tree` are not sequences, this returns a + single-item list: `[((), input_tree)]`. + + Use Case: + + Sometimes we may wish to partially flatten a nested sequence, retaining some + of the nested structure. We achieve this by specifying a shallow structure, + `shallow_tree`, we wish to flatten up to. + + The input, `input_tree`, can be thought of as having the same structure layout + as `shallow_tree`, but with leaf nodes that are themselves tree structures. + + Examples: + + ```python + input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]] + shallow_tree = [[True, True], [False, True]] + + flattened_input_tree = flatten_with_tuple_paths_up_to(shallow_tree, + input_tree) + flattened_shallow_tree = flatten_with_tuple_paths_up_to(shallow_tree, + shallow_tree) + + # Output is: + # [((0, 0), [2, 2]), + # ((0, 1), [3, 3]), + # ((1, 0), [4, 9]), + # ((1, 1), [5, 5])] + # + # [((0, 0), True), + # ((0, 1), True), + # ((1, 0), False), + # ((1, 1), True)] + ``` + + ```python + input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]] + shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]] + + input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree) + input_tree_flattened = flatten(input_tree) + + # Output is: + # [((0, 0), ('a', 1)), + # ((0, 1, 0), ('b', 2)), + # ((0, 1, 1, 0), ('c', 3)), + # ((0, 1, 1, 1), ('d', 4))] + # ['a', 1, 'b', 2, 'c', 3, 'd', 4] + ``` + + Non-Sequence Edge Cases: + + ```python + flatten_with_tuple_paths_up_to(0, 0) # Output: [(), 0] + + flatten_with_tuple_paths_up_to(0, [0, 1, 2]) # Output: [(), [0, 1, 2]] + + flatten_with_tuple_paths_up_to([0, 1, 2], 0) # Output: TypeError + + flatten_with_tuple_paths_up_to([0, 1, 2], [0, 1, 2]) + # Output: [((0,) 0), ((1,), 1), ((2,), 2)] + ``` + + Args: + shallow_tree: a possibly pruned structure of input_tree. + input_tree: an atom or a nested structure. + Note, numpy arrays are considered atoms. + check_types: bool. If True, check that each node in shallow_tree has the + same type as the corresponding node in input_tree. + expand_composites: If true, then composite tensors such as + `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their + component tensors. + + Returns: + A Python list, the partially flattened version of `input_tree` according to + the structure of `shallow_tree`. + + Raises: + TypeError: If `shallow_tree` is a nested structure but `input_tree` is not. + TypeError: If the structure types of `shallow_tree` are different from + `input_tree`. + ValueError: If the structure lengths of `shallow_tree` are different from + `input_tree`. + """ + is_nested_fn = _is_nested_or_composite if expand_composites else is_nested + assert_shallow_structure(shallow_tree, + input_tree, + check_types=check_types, + expand_composites=expand_composites) + return list( + nest_util.yield_flat_up_to( + nest_util.Modality.CORE, shallow_tree, input_tree, is_nested_fn + ) + ) + + +@tf_export("__internal__.nest.map_structure_up_to", v1=[]) +def map_structure_up_to(shallow_tree, func, *inputs, **kwargs): + """Applies a function or op to a number of partially flattened inputs. + + The `inputs` are flattened up to `shallow_tree` before being mapped. + + Use Case: + + Sometimes we wish to apply a function to a partially flattened + structure (for example when the function itself takes structure inputs). We + achieve this by specifying a shallow structure, `shallow_tree` we wish to + flatten up to. + + The `inputs`, can be thought of as having the same structure layout as + `shallow_tree`, but with leaf nodes that are themselves tree structures. + + This function therefore will return something with the same base structure as + `shallow_tree`. + + Examples: + + ```python + shallow_tree = [None, None] + inp_val = [1, 2, 3] + out = map_structure_up_to(shallow_tree, lambda x: 2 * x, inp_val) + + # Output is: [2, 4] + ``` + + ```python + ab_tuple = collections.namedtuple("ab_tuple", "a, b") + op_tuple = collections.namedtuple("op_tuple", "add, mul") + inp_val = ab_tuple(a=2, b=3) + inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3)) + out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul, + inp_val, inp_ops) + + # Output is: ab_tuple(a=6, b=15) + ``` + + ```python + data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]] + name_list = ['evens', ['odds', 'primes']] + out = map_structure_up_to( + name_list, + lambda name, sec: "first_{}_{}".format(len(sec), name), + name_list, data_list) + + # Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']] + ``` + + Args: + shallow_tree: a shallow structure, common to all the inputs. + func: callable which will be applied to each input individually. + *inputs: structures that are compatible with shallow_tree. The function + `func` is applied to corresponding structures due to partial flattening + of each input, so the function must support arity of `len(inputs)`. + **kwargs: kwargs to feed to func(). Special kwarg + `check_types` is not passed to func, but instead determines whether the + types of iterables within the structures have to be same (e.g. + `map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow + this set this argument to `False`. + + Raises: + TypeError: If `shallow_tree` is a nested structure but `input_tree` is not. + TypeError: If the structure types of `shallow_tree` are different from + `input_tree`. + ValueError: If the structure lengths of `shallow_tree` are different from + `input_tree`. + + Returns: + result of repeatedly applying `func`, with the same structure layout as + `shallow_tree`. + """ + return nest_util.map_structure_up_to( + nest_util.Modality.CORE, + shallow_tree, + lambda _, *values: func(*values), # Discards the path arg. + *inputs, + **kwargs, + ) + + +def map_structure_with_tuple_paths_up_to(shallow_tree, func, *inputs, **kwargs): + """Applies a function or op to a number of partially flattened inputs. + + Like map_structure_up_to(), except that the 'func' argument takes a path + tuple as its first argument, followed by the corresponding values from + *inputs. + + Example: + + ```python + lowercase = {'a': 'a', 'b': ('b0', 'b1')} + uppercase = {'a': 'A', 'b': ('B0', 'B1')} + + def print_path_and_values(path, *values): + print("path: {}, values: {}".format(path, values)) + + shallow_tree = {'a': None} + map_structure_with_tuple_paths_up_to(shallow_tree, + print_path_and_values, + lowercase, + uppercase) + path: ('a',), values: ('a', 'A') + path: ('b', 0), values: ('b0', 'B0') + path: ('b', 1), values: ('b1', 'B1') + + shallow_tree = {'b': None} + map_structure_with_tuple_paths_up_to(shallow_tree, + print_path_and_values, + lowercase, + uppercase, + check_types=False) + path: ('b', 1), values: (('bo', 'b1'), ('B0', 'B1')) + + shallow_tree = {'a': None, 'b': {1: None}} + map_structure_with_tuple_paths_up_to(shallow_tree, + print_path_and_values, + lowercase, + uppercase, + check_types=False) + path: ('a',), values: ('a', 'A') + path: ('b', 1), values: ('b1', B1') + ``` + + Args: + shallow_tree: a shallow structure, common to all the inputs. + func: callable that takes args (path, inputs_0_value, ... , inputs_N_value), + where path is a tuple path to an atom in shallow_tree, and inputs_i_value + is the corresponding value from inputs[i]. + *inputs: structures that are all structurally compatible with shallow_tree. + **kwargs: kwargs to feed to func(). Special kwarg `check_types` is not + passed to func, but instead determines whether the types of iterables + within the structures have to be same (e.g. `map_structure(func, [1], + (1,))` raises a `TypeError` exception). To allow this set this argument to + `False`. + + Raises: + TypeError: If `shallow_tree` is a nested structure but one of `*inputs` is + not. + TypeError: If the structure types of `shallow_tree` are different from + `input_tree`. + ValueError: If the structure lengths of `shallow_tree` are different from + `input_tree`. + + Returns: + Result of repeatedly applying `func`. Has the same structure layout as + `shallow_tree`. + """ + return nest_util.map_structure_up_to( + nest_util.Modality.CORE, shallow_tree, func, *inputs, **kwargs + ) + + +@tf_export("__internal__.nest.get_traverse_shallow_structure", v1=[]) +def get_traverse_shallow_structure(traverse_fn, structure, + expand_composites=False): + """Generates a shallow structure from a `traverse_fn` and `structure`. + + `traverse_fn` must accept any possible subtree of `structure` and return + a depth=1 structure containing `True` or `False` values, describing which + of the top-level subtrees may be traversed. It may also + return scalar `True` or `False` "traversal is OK / not OK for all subtrees." + + Examples are available in the unit tests (nest_test.py). + + Args: + traverse_fn: Function taking a substructure and returning either a scalar + `bool` (whether to traverse that substructure or not) or a depth=1 + shallow structure of the same type, describing which parts of the + substructure to traverse. + structure: The structure to traverse. + expand_composites: If true, then composite tensors such as + `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their + component tensors. + + Returns: + A shallow structure containing python bools, which can be passed to + `map_structure_up_to` and `flatten_up_to`. + + Raises: + TypeError: if `traverse_fn` returns a nested structure for an atom input. + or a structure with depth higher than 1 for a nested structure input, + or if any leaf values in the returned structure or scalar are not type + `bool`. + """ + is_nested_fn = _is_nested_or_composite if expand_composites else is_nested + to_traverse = traverse_fn(structure) + if not is_nested_fn(structure): + if not isinstance(to_traverse, bool): + raise TypeError("traverse_fn returned structure: %s for non-structure: %s" + % (to_traverse, structure)) + return to_traverse + level_traverse = [] + if isinstance(to_traverse, bool): + if not to_traverse: + # Do not traverse this substructure at all. Exit early. + return False + else: + # Traverse the entire substructure. + for branch in nest_util.yield_value(nest_util.Modality.CORE, structure): + level_traverse.append( + get_traverse_shallow_structure(traverse_fn, branch, + expand_composites=expand_composites)) + elif not is_nested_fn(to_traverse): + raise TypeError("traverse_fn returned a non-bool scalar: %s for input: %s" + % (to_traverse, structure)) + else: + # Traverse some subset of this substructure. + assert_shallow_structure(to_traverse, structure, + expand_composites=expand_composites) + for t, branch in zip( + nest_util.yield_value(nest_util.Modality.CORE, to_traverse), + nest_util.yield_value(nest_util.Modality.CORE, structure), + ): + if not isinstance(t, bool): + raise TypeError( + "traverse_fn didn't return a depth=1 structure of bools. saw: %s " + " for structure: %s" % (to_traverse, structure)) + if t: + level_traverse.append( + get_traverse_shallow_structure(traverse_fn, branch)) + else: + level_traverse.append(False) + return nest_util.sequence_like(structure, level_traverse) + + +@tf_export("__internal__.nest.yield_flat_paths", v1=[]) +def yield_flat_paths(nest, expand_composites=False): + """Yields paths for some nested structure. + + Refer to [tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest) + for the definition of a structure. + + Paths are lists of objects which can be str-converted, which may include + integers or other types which are used as indices in a dict. + + The flat list will be in the corresponding order as if you called + `nest.flatten` on the structure. This is handy for naming Tensors such + the TF scope structure matches the tuple structure. + + E.g. if we have a tuple `value = Foo(a=3, b=Bar(c=23, d=42))` + + ```shell + nest.flatten(value) + [3, 23, 42] + list(nest.yield_flat_paths(value)) + [('a',), ('b', 'c'), ('b', 'd')] + ``` + + ```shell + list(nest.yield_flat_paths({'a': [3]})) + [('a', 0)] + list(nest.yield_flat_paths({'a': 3})) + [('a',)] + ``` + + Args: + nest: the value to produce a flattened paths list for. + expand_composites: If true, then composite tensors such as + `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their + component tensors. + + Yields: + Tuples containing index or key values which form the path to a specific + leaf value in the nested structure. + """ + is_nested_fn = _is_nested_or_composite if expand_composites else is_nested + for k, _ in nest_util.yield_flat_up_to( + nest_util.Modality.CORE, nest, nest, is_nested_fn + ): + yield k + + +def flatten_with_joined_string_paths(structure, separator="/", + expand_composites=False): + """Returns a list of (string path, atom) tuples. + + The order of tuples produced matches that of `nest.flatten`. This allows you + to flatten a nested structure while keeping information about where in the + structure each atom was located. See `nest.yield_flat_paths` + for more information. + + Args: + structure: the nested structure to flatten. + separator: string to separate levels of hierarchy in the results, defaults + to '/'. + expand_composites: If true, then composite tensors such as + `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their + component tensors. + + Returns: + A list of (string, atom) tuples. + """ + flat_paths = yield_flat_paths(structure, expand_composites=expand_composites) + def stringify_and_join(path_elements): + return separator.join(str(path_element) for path_element in path_elements) + + flat_string_paths = (stringify_and_join(path) for path in flat_paths) + return list(zip(flat_string_paths, + flatten(structure, expand_composites=expand_composites))) + + +def flatten_with_tuple_paths(structure, expand_composites=False): + """Returns a list of `(tuple_path, atom)` tuples. + + The order of pairs produced matches that of `nest.flatten`. This allows you + to flatten a nested structure while keeping information about where in the + structure each atom was located. See `nest.yield_flat_paths` + for more information about tuple paths. + + Args: + structure: the nested structure to flatten. + expand_composites: If true, then composite tensors such as + `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their + component tensors. + + Returns: + A list of `(tuple_path, atom)` tuples. Each `tuple_path` is a tuple + of indices and/or dictionary keys that uniquely specify the path to + `atom` within `structure`. + """ + return list(zip(yield_flat_paths(structure, + expand_composites=expand_composites), + flatten(structure, expand_composites=expand_composites))) + + +@tf_export("__internal__.nest.list_to_tuple", v1=[]) +def list_to_tuple(structure): + """Replace all lists with tuples. + + The fork of nest that tf.data uses treats lists as atoms, while + tf.nest treats them as structures to recurse into. Keras has chosen to adopt + the latter convention, and must therefore deeply replace all lists with tuples + before passing structures to Dataset.from_generator. + + Args: + structure: A nested structure to be remapped. + + Returns: + structure mapped to replace all lists with tuples. + """ + def sequence_fn(instance, args): + if isinstance(instance, list): + return tuple(args) + return nest_util.sequence_like(instance, args) + + return nest_util.pack_sequence_as( + nest_util.Modality.CORE, + structure, + flatten(structure), + False, + sequence_fn=sequence_fn, + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/nest_util.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/nest_util.py new file mode 100644 index 0000000000000000000000000000000000000000..c53042f7dc11abc5b2ced9ca79f2c7ba329f02b3 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/nest_util.py @@ -0,0 +1,1726 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utility methods for handling nests. + +This module encapsulates different semantics of handling nests by the public +tf.nest APIs and internal tf.data APIs. The difference in semantics exists for +historic reasons and reconciliation would require a non-backwards compatible +change. + +The implementation of the different semantics use a common utility to +avoid / minimize further divergence between the two APIs over time. +""" + +import collections as _collections +import enum + +import wrapt as _wrapt + +from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import +from tensorflow.python.platform import tf_logging +from tensorflow.python.util import _pywrap_utils +from tensorflow.python.util.compat import collections_abc as _collections_abc +from tensorflow.python.util.custom_nest_protocol import CustomNestProtocol + + +_is_mapping_view = _pywrap_utils.IsMappingView +_is_attrs = _pywrap_utils.IsAttrs +_is_composite_tensor = _pywrap_utils.IsCompositeTensor +_is_type_spec = _pywrap_utils.IsTypeSpec +_is_mutable_mapping = _pywrap_utils.IsMutableMapping +_is_mapping = _pywrap_utils.IsMapping +_tf_data_is_nested = _pywrap_utils.IsNestedForData +_tf_data_flatten = _pywrap_utils.FlattenForData +_tf_core_is_nested = _pywrap_utils.IsNested +_is_nested_or_composite = _pywrap_utils.IsNestedOrComposite +# See the swig file (util.i) for documentation. +same_namedtuples = _pywrap_utils.SameNamedtuples + + +STRUCTURES_HAVE_MISMATCHING_TYPES = ( + "The two structures don't have the same sequence type. Input structure has " + "type {input_type}, while shallow structure has type {shallow_type}." +) + +STRUCTURES_HAVE_MISMATCHING_LENGTHS = ( + "The two structures don't have the same sequence length. Input " + "structure has length {input_length}, while shallow structure has length " + "{shallow_length}." +) + +INPUT_TREE_SMALLER_THAN_SHALLOW_TREE = ( + "The input_tree has fewer items than the shallow_tree. Input structure " + "has length {input_size}, while shallow structure has length " + "{shallow_size}." +) + +SHALLOW_TREE_HAS_INVALID_KEYS = ( + "The shallow_tree's keys are not a subset of the input_tree's keys. The " + "shallow_tree has the following keys that are not in the input_tree: {}." +) + + +class Modality(enum.Enum): + """Modality/semantic used for treating nested structures. + + - Modality.CORE follows tensorflow_core/tf.nest semantics. + + The following collection types are recognized by `tf.nest` as nested + structures: + + * `collections.abc.Sequence` (except `string` and `bytes`). + This includes `list`, `tuple`, and `namedtuple`. + * `collections.abc.Mapping` (with sortable keys). + This includes `dict` and `collections.OrderedDict`. + * `collections.abc.MappingView` (with sortable keys). + * [`attr.s` classes](https://www.attrs.org/). + + Any other values are considered **atoms**. Not all collection types are + considered nested structures. For example, the following types are + considered atoms: + + * `set`; `{"a", "b"}` is an atom, while `["a", "b"]` is a nested structure. + * [`dataclass` classes](https://docs.python.org/library/dataclasses.html) + * `tf.Tensor` + * `numpy.array` + + - Modality.DATA follows tf.data's nest semantics. + + This modality makes two changes: + 1. It removes support for lists as a level of nesting in nested structures. + 2. It adds support for `SparseTensorValue` as an atomic element. + + The motivation for this change is twofold: + + 1. It seems more natural for lists to be treated (e.g. in Dataset + constructors) + as tensors, rather than lists of (lists of...) tensors. + 2. This is needed because `SparseTensorValue` is implemented as a `namedtuple` + that would normally be flattened and we want to be able to create sparse + tensor from `SparseTensorValue's similarly to creating tensors from numpy + arrays. + """ + + CORE = "CORE" + DATA = "DATA" + + +class _DotString(object): + __slots__ = [] + + def __str__(self): + return "." + + def __repr__(self): + return "." + + +_DOT = _DotString() + + +def is_nested(modality, structure): + """Returns true if its input is a nested structure. + + For Modality.CORE refer to + [tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest) + for the definition of a nested structure. + + Args: + modality: enum value of supported modality [Modality.CORE or Modality.DATA] + structure: the value to test. + + Returns: + True if the input is a nested structure. + """ + if modality == Modality.CORE: + return _tf_core_is_nested(structure) + elif modality == Modality.DATA: + return _tf_data_is_nested(structure) + else: + raise ValueError( + "Unknown modality used {} for nested structure".format(modality) + ) + + +# TODO(b/225045380): Move to a "leaf" library to use in trace_type. +def is_namedtuple(instance, strict=False): + """Returns True iff `instance` is a `namedtuple`. + + Args: + instance: An instance of a Python object. + strict: If True, `instance` is considered to be a `namedtuple` only if it is + a "plain" namedtuple. For instance, a class inheriting from a `namedtuple` + will be considered to be a `namedtuple` iff `strict=False`. + + Returns: + True if `instance` is a `namedtuple`. + """ + return _pywrap_utils.IsNamedtuple(instance, strict) + + +def sequence_like(instance, args): + """Converts the sequence `args` to the same type as `instance`. + + Args: + instance: an instance of `tuple`, `list`, `namedtuple`, `dict`, + `collections.OrderedDict`, or `composite_tensor.Composite_Tensor` or + `type_spec.TypeSpec`. + args: items to be converted to the `instance` type. + + Returns: + `args` with the type of `instance`. + """ + if _is_mutable_mapping(instance): + # Pack dictionaries in a deterministic order by sorting the keys. + # Notice this means that we ignore the original order of `OrderedDict` + # instances. This is intentional, to avoid potential bugs caused by mixing + # ordered and plain dicts (e.g., flattening a dict but using a + # corresponding `OrderedDict` to pack it back). + result = dict(zip(_tf_core_sorted(instance), args)) + instance_type = type(instance) + if instance_type == _collections.defaultdict: + d = _collections.defaultdict(instance.default_factory) + else: + d = instance_type() + for key in instance: + d[key] = result[key] + return d + elif _is_mapping(instance): + result = dict(zip(_tf_core_sorted(instance), args)) + instance_type = type(instance) + if not getattr(instance_type, "__supported_by_tf_nest__", False): + tf_logging.log_first_n( + tf_logging.WARN, + "Mapping types may not work well with tf.nest. " + "Prefer using MutableMapping for {}".format(instance_type), + 1, + ) + try: + return instance_type((key, result[key]) for key in instance) + except TypeError as err: + # pylint: disable=raise-missing-from + raise TypeError( + "Error creating an object of type {} like {}. Note that " + "it must accept a single positional argument " + "representing an iterable of key-value pairs, in " + "addition to self. Cause: {}".format(type(instance), instance, err) + ) + elif _is_mapping_view(instance): + # We can't directly construct mapping views, so we create a list instead + return list(args) + elif is_namedtuple(instance) or _is_attrs(instance): + if isinstance(instance, _wrapt.ObjectProxy): + instance_type = type(instance.__wrapped__) + else: + instance_type = type(instance) + return instance_type(*args) + elif _is_composite_tensor(instance): + assert len(args) == 1 + spec = instance._type_spec # pylint: disable=protected-access + return spec._from_components(args[0]) # pylint: disable=protected-access + elif _is_type_spec(instance): + # Pack a CompositeTensor's components according to a TypeSpec. + assert len(args) == 1 + return instance._from_components(args[0]) # pylint: disable=protected-access + elif isinstance(instance, range): + return sequence_like(list(instance), args) + elif isinstance(instance, _wrapt.ObjectProxy): + # For object proxies, first create the underlying type and then re-wrap it + # in the proxy type. + return type(instance)(sequence_like(instance.__wrapped__, args)) + elif isinstance(instance, CustomNestProtocol): + metadata = instance.__tf_flatten__()[0] + return instance.__tf_unflatten__(metadata, tuple(args)) + else: + # Not a namedtuple + return type(instance)(args) + + +def _get_attrs_items(obj): + """Returns a list of (name, value) pairs from an attrs instance. + + TODO(b/268078256): check if this comment is valid, and if so, ensure it's + handled in the function below. + The list will be sorted by name. + + Args: + obj: an object. + + Returns: + A list of (attr_name, attr_value) pairs, sorted by attr_name. + """ + attrs = getattr(obj.__class__, "__attrs_attrs__") + attr_names = (a.name for a in attrs) + return [(attr_name, getattr(obj, attr_name)) for attr_name in attr_names] + + +def _tf_core_sorted(dict_): + """Returns a sorted list of the dict keys, with error if keys not sortable.""" + try: + return sorted(dict_.keys()) + except TypeError: + # pylint: disable=raise-missing-from + raise TypeError("nest only supports dicts with sortable keys.") + + +def _tf_data_sorted(dict_): + """Returns a sorted list of the dict keys, with error if keys not sortable.""" + try: + return sorted(list(dict_)) + except TypeError as e: + # pylint: disable=raise-missing-from + raise TypeError( + f"nest only supports dicts with sortable keys. Error: {e.message}" + ) + + +def yield_value(modality, iterable): + """Yield elements of `iterable` in a deterministic order. + + Args: + modality: enum value of supported modality [Modality.CORE or Modality.DATA] + iterable: an iterable. + + Yields: + The iterable elements in a deterministic order. + """ + if modality == Modality.CORE: + yield from _tf_core_yield_value(iterable) + elif modality == Modality.DATA: + yield from _tf_data_yield_value(iterable) + else: + raise ValueError( + "Unknown modality used {} for nested structure".format(modality) + ) + + +def _tf_core_yield_value(iterable): + for _, v in _tf_core_yield_sorted_items(iterable): + yield v + + +def yield_sorted_items(modality, iterable): + if modality == Modality.CORE: + return _tf_core_yield_sorted_items(iterable) + else: + raise ValueError( + "Unknown modality used {} for nested structure".format(modality) + ) + + +def _tf_core_yield_sorted_items(iterable): + """Yield (key, value) pairs for `iterable` in a deterministic order. + + For Sequences, the key will be an int, the array index of a value. + For Mappings, the key will be the dictionary key. + For objects (e.g. namedtuples), the key will be the attribute name. + + In all cases, the keys will be iterated in sorted order. + + Args: + iterable: an iterable. + + Yields: + The iterable's (key, value) pairs, in order of sorted keys. + """ + # Ordered to check common structure types (list, tuple, dict) first. + if isinstance(iterable, list): + for item in enumerate(iterable): + yield item + # namedtuples handled separately to avoid expensive namedtuple check. + elif type(iterable) == tuple: # pylint: disable=unidiomatic-typecheck + for item in enumerate(iterable): + yield item + elif isinstance(iterable, (dict, _collections_abc.Mapping)): + # Iterate through dictionaries in a deterministic order by sorting the + # keys. Notice this means that we ignore the original order of `OrderedDict` + # instances. This is intentional, to avoid potential bugs caused by mixing + # ordered and plain dicts (e.g., flattening a dict but using a + # corresponding `OrderedDict` to pack it back). + for key in _tf_core_sorted(iterable): + yield key, iterable[key] + elif _is_attrs(iterable): + for item in _get_attrs_items(iterable): + yield item + elif is_namedtuple(iterable): + for field in iterable._fields: + yield field, getattr(iterable, field) + elif _is_composite_tensor(iterable): + type_spec = iterable._type_spec # pylint: disable=protected-access + yield type_spec.value_type.__name__, type_spec._to_components(iterable) # pylint: disable=protected-access + elif _is_type_spec(iterable): + # Note: to allow CompositeTensors and their TypeSpecs to have matching + # structures, we need to use the same key string here. + yield iterable.value_type.__name__, iterable._component_specs # pylint: disable=protected-access + elif isinstance(iterable, CustomNestProtocol): + flat_component = iterable.__tf_flatten__()[1] + assert isinstance(flat_component, tuple) + yield from enumerate(flat_component) + else: + for item in enumerate(iterable): + yield item + + +def _tf_data_yield_value(iterable): + """Yield elements of `iterable` in a deterministic order. + + Args: + iterable: an iterable. + + Yields: + The iterable elements in a deterministic order. + """ + # pylint: disable=protected-access + if isinstance(iterable, _collections_abc.Mapping): + # Iterate through dictionaries in a deterministic order by sorting the + # keys. Notice this means that we ignore the original order of `OrderedDict` + # instances. This is intentional, to avoid potential bugs caused by mixing + # ordered and plain dicts (e.g., flattening a dict but using a + # corresponding `OrderedDict` to pack it back). + for key in _tf_data_sorted(iterable): + yield iterable[key] + # To avoid circular imports. sparse_tensor + # depends on tensorflow/python/util/nest.py transitively, and if we try to + # import sparse_tensor again, it results in a circular import. Instead, here + # we check the class name instead of using `isinstance`. + elif iterable.__class__.__name__ == "SparseTensorValue": + yield iterable + elif _is_attrs(iterable): + for _, attr in _get_attrs_items(iterable): + yield attr + elif isinstance(iterable, CustomNestProtocol): + flat_component = iterable.__tf_flatten__()[1] + assert isinstance(flat_component, tuple) + yield from flat_component + else: + for value in iterable: + yield value + + +def assert_same_structure( + modality, nest1, nest2, check_types=True, expand_composites=False +): + """Asserts that two structures are nested in the same way. + + For Modality.CORE refer to + [tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest) + for the definition of a structure. Note the method does not check the types of + atoms inside the structures. + + Examples: + + * These atom vs. atom comparisons will pass: + + >>> tf.nest.assert_same_structure(1.5, tf.Variable(1, tf.uint32)) + >>> tf.nest.assert_same_structure("abc", np.array([1, 2])) + + * These nested structure vs. nested structure comparisons will pass: + + >>> structure1 = (((1, 2), 3), 4, (5, 6)) + >>> structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6")) + >>> structure3 = [(("a", "b"), "c"), "d", ["e", "f"]] + >>> tf.nest.assert_same_structure(structure1, structure2) + >>> tf.nest.assert_same_structure(structure1, structure3, check_types=False) + + >>> import collections + >>> tf.nest.assert_same_structure( + ... collections.namedtuple("bar", "a b")(1, 2), + ... collections.namedtuple("foo", "a b")(2, 3), + ... check_types=False) + + >>> tf.nest.assert_same_structure( + ... collections.namedtuple("bar", "a b")(1, 2), + ... { "a": 1, "b": 2 }, + ... check_types=False) + + >>> tf.nest.assert_same_structure( + ... { "a": 1, "b": 2, "c": 3 }, + ... { "c": 6, "b": 5, "a": 4 }) + + >>> ragged_tensor1 = tf.RaggedTensor.from_row_splits( + ... values=[3, 1, 4, 1, 5, 9, 2, 6], + ... row_splits=[0, 4, 4, 7, 8, 8]) + >>> ragged_tensor2 = tf.RaggedTensor.from_row_splits( + ... values=[3, 1, 4], + ... row_splits=[0, 3]) + >>> tf.nest.assert_same_structure( + ... ragged_tensor1, + ... ragged_tensor2, + ... expand_composites=True) + + * These examples will raise exceptions: + + >>> tf.nest.assert_same_structure([0, 1], np.array([0, 1])) + Traceback (most recent call last): + ... + ValueError: The two structures don't have the same nested structure + + >>> tf.nest.assert_same_structure( + ... collections.namedtuple('bar', 'a b')(1, 2), + ... collections.namedtuple('foo', 'a b')(2, 3)) + Traceback (most recent call last): + ... + TypeError: The two structures don't have the same nested structure + + For Modality.DATA, nested structures are treated differently than + Modality.CORE. Please refer to class Modality's documentation above to read up + on these differences. + + Args: + modality: enum value of supported modality [Modality.CORE or Modality.DATA] + nest1: an atom or a nested structure. + nest2: an atom or a nested structure. + check_types: - For Modality.CORE: if `True` (default) types of structures + are checked as well, including the keys of dictionaries. If set to + `False`, for example a list and a tuple of objects will look the same if + they have the same size. Note that namedtuples with identical name and + fields are always considered to have the same shallow structure. Two types + will also be considered the same if they are both list subtypes (which + allows "list" and "_ListWrapper" from trackable dependency tracking to + compare equal). `check_types=True` only checks type of sub-structures. The + types of atoms are not checked. - For Modality.DATA: if `True` (default) + types of sequences should be same as well. For dictionary, "type" of + dictionary is considered to include its keys. In other words, two + dictionaries with different keys are considered to have a different + "type". If set to `False`, two iterables are considered same as long as + they yield the elements that have same structures. + expand_composites: Arg only valid for Modality.CORE. If true, then composite + tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are + expanded into their component tensors. + + Raises: + ValueError: If the two structures do not have the same number of atoms or + if the two structures are not nested in the same way. + TypeError: If the two structures differ in the type of sequence in any of + their substructures. Only possible if `check_types` is `True`. + """ + if modality == Modality.CORE: + _tf_core_assert_same_structure(nest1, nest2, check_types, expand_composites) + elif modality == Modality.DATA: + _tf_data_assert_same_structure(nest1, nest2, check_types) + else: + raise ValueError( + "Unknown modality used {} for nested structure".format(modality) + ) + + +# pylint: disable=missing-function-docstring +def _tf_core_assert_same_structure( + nest1, nest2, check_types=True, expand_composites=False +): + # Convert to bool explicitly as otherwise pybind will not be able# to handle + # type mismatch message correctly. See GitHub issue 42329 for details. + check_types = bool(check_types) + expand_composites = bool(expand_composites) + try: + _pywrap_utils.AssertSameStructure( + nest1, nest2, check_types, expand_composites + ) + except (ValueError, TypeError) as e: + str1 = str(_tf_core_map_structure(lambda _: _DOT, nest1)) + str2 = str(_tf_core_map_structure(lambda _: _DOT, nest2)) + raise type(e)( + "%s\nEntire first structure:\n%s\nEntire second structure:\n%s" + % (str(e), str1, str2) + ) + + +def _tf_data_assert_same_structure(nest1, nest2, check_types=True): + _pywrap_utils.AssertSameStructureForData(nest1, nest2, check_types) + + +def _tf_core_packed_nest_with_indices( + structure, flat, index, is_nested_fn, sequence_fn=None +): + """Helper function for pack_sequence_as. + + Args: + structure: structure to mimic. + flat: Flattened values to output substructure for. + index: Index at which to start reading from flat. + is_nested_fn: Function used to test if a value should be treated as a nested + structure. + sequence_fn: Function used to generate a new strcuture instance. + + Returns: + The tuple (new_index, child), where: + * new_index - the updated index into `flat` having processed `structure`. + * packed - the subset of `flat` corresponding to `structure`, + having started at `index`, and packed into the same nested + format. + + Raises: + ValueError: if `structure` contains more atoms than `flat` + (assuming indexing starts from `index`). + """ + packed = [] + sequence_fn = sequence_fn or sequence_like + for s in _tf_core_yield_value(structure): + if is_nested_fn(s): + new_index, child = _tf_core_packed_nest_with_indices( + s, flat, index, is_nested_fn, sequence_fn + ) + packed.append(sequence_fn(s, child)) + index = new_index + else: + packed.append(flat[index]) + index += 1 + return index, packed + + +def _tf_data_packed_nest_with_indices(structure, flat, index): + """Helper function for pack_nest_as. + + Args: + structure: Substructure (tuple of elements and/or tuples) to mimic + flat: Flattened values to output substructure for. + index: Index at which to start reading from flat. + + Returns: + The tuple (new_index, child), where: + * new_index - the updated index into `flat` having processed `structure`. + * packed - the subset of `flat` corresponding to `structure`, + having started at `index`, and packed into the same nested + format. + + Raises: + ValueError: if `structure` contains more elements than `flat` + (assuming indexing starts from `index`). + """ + packed = [] + for s in _tf_data_yield_value(structure): + if _tf_data_is_nested(s): + new_index, child = _tf_data_packed_nest_with_indices(s, flat, index) + packed.append(sequence_like(s, child)) # pylint: disable=protected-access + index = new_index + else: + packed.append(flat[index]) + index += 1 + return index, packed + + +def flatten(modality, structure, expand_composites=False): + """Flattens a nested structure. + + - For Modality.CORE: refer to + [tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest) + for the definition of a structure. + + If the structure is an atom, then returns a single-item list: [structure]. + + This is the inverse of the `nest.pack_sequence_as` method that takes in a + flattened list and re-packs it into the nested structure. + + In the case of dict instances, the sequence consists of the values, sorted by + key to ensure deterministic behavior. This is true also for OrderedDict + instances: their sequence order is ignored, the sorting order of keys is used + instead. The same convention is followed in `nest.pack_sequence_as`. This + correctly repacks dicts and OrderedDicts after they have been flattened, and + also allows flattening an OrderedDict and then repacking it back using a + corresponding plain dict, or vice-versa. Dictionaries with non-sortable keys + cannot be flattened. + + Users must not modify any collections used in nest while this function is + running. + + Examples: + + 1. Python dict (ordered by key): + + >>> dict = { "key3": "value3", "key1": "value1", "key2": "value2" } + >>> tf.nest.flatten(dict) + ['value1', 'value2', 'value3'] + + 2. For a nested python tuple: + + >>> tuple = ((1.0, 2.0), (3.0, 4.0, 5.0), 6.0) + >>> tf.nest.flatten(tuple) + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] + + 3. For a nested dictionary of dictionaries: + + >>> dict = { "key3": {"c": (1.0, 2.0), "a": (3.0)}, + ... "key1": {"m": "val1", "g": "val2"} } + >>> tf.nest.flatten(dict) + ['val2', 'val1', 3.0, 1.0, 2.0] + + 4. Numpy array (will not flatten): + + >>> array = np.array([[1, 2], [3, 4]]) + >>> tf.nest.flatten(array) + [array([[1, 2], + [3, 4]])] + + 5. `tf.Tensor` (will not flatten): + + >>> tensor = tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]) + >>> tf.nest.flatten(tensor) + [] + + 6. `tf.RaggedTensor`: This is a composite tensor thats representation consists + of a flattened list of 'values' and a list of 'row_splits' which indicate how + to chop up the flattened list into different rows. For more details on + `tf.RaggedTensor`, please visit + https://www.tensorflow.org/api_docs/python/tf/RaggedTensor. + + with `expand_composites=False`, we just return the RaggedTensor as is. + + >>> tensor = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2]]) + >>> tf.nest.flatten(tensor, expand_composites=False) + [] + + with `expand_composites=True`, we return the component Tensors that make up + the RaggedTensor representation (the values and row_splits tensors) + + >>> tensor = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2]]) + >>> tf.nest.flatten(tensor, expand_composites=True) + [, + ] + + Args: + modality: enum value of supported modality [Modality.CORE or Modality.DATA] + structure: an atom or a nested structure. Note, numpy arrays are considered + atoms and are not flattened. + expand_composites: Arg valid for Modality.CORE only. If true, then composite + tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are + expanded into their component tensors. + + Returns: + A Python list, the flattened version of the input. + + Raises: + TypeError: The nest is or contains a dict with non-sortable keys. + """ + if modality == Modality.CORE: + return _tf_core_flatten(structure, expand_composites) + elif modality == Modality.DATA: + return _tf_data_flatten(structure) + else: + raise ValueError( + "Unknown modality used {} for nested structure".format(modality) + ) + + +def _tf_core_flatten(structure, expand_composites=False): + """See comments for flatten() in tensorflow/python/util/nest.py.""" + if structure is None: + return [None] + expand_composites = bool(expand_composites) + return _pywrap_utils.Flatten(structure, expand_composites) + + +def pack_sequence_as( + modality, structure, flat_sequence, expand_composites, sequence_fn=None +): + """Returns a given flattened sequence packed into a given structure. + + - For Modality.CORE: Refer to + [tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest) + for the definition of a structure. + + If `structure` is an atom, `flat_sequence` must be a single-item list; + in this case the return value is `flat_sequence[0]`. + + If `structure` is or contains a dict instance, the keys will be sorted to + pack the flat sequence in deterministic order. This is true also for + `OrderedDict` instances: their sequence order is ignored, the sorting order of + keys is used instead. The same convention is followed in `flatten`. + This correctly repacks dicts and `OrderedDict`s after they have been + flattened, and also allows flattening an `OrderedDict` and then repacking it + back using a corresponding plain dict, or vice-versa. + Dictionaries with non-sortable keys cannot be flattened. + + Examples: + + 1. Python dict: + + >>> structure = { "key3": "", "key1": "", "key2": "" } + >>> flat_sequence = ["value1", "value2", "value3"] + >>> tf.nest.pack_sequence_as(structure, flat_sequence) + {'key3': 'value3', 'key1': 'value1', 'key2': 'value2'} + + 2. For a nested python tuple: + + >>> structure = (('a','b'), ('c','d','e'), 'f') + >>> flat_sequence = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] + >>> tf.nest.pack_sequence_as(structure, flat_sequence) + ((1.0, 2.0), (3.0, 4.0, 5.0), 6.0) + + 3. For a nested dictionary of dictionaries: + + >>> structure = { "key3": {"c": ('alpha', 'beta'), "a": ('gamma')}, + ... "key1": {"e": "val1", "d": "val2"} } + >>> flat_sequence = ['val2', 'val1', 3.0, 1.0, 2.0] + >>> tf.nest.pack_sequence_as(structure, flat_sequence) + {'key3': {'c': (1.0, 2.0), 'a': 3.0}, 'key1': {'e': 'val1', 'd': 'val2'}} + + 4. Numpy array (considered a scalar): + + >>> structure = ['a'] + >>> flat_sequence = [np.array([[1, 2], [3, 4]])] + >>> tf.nest.pack_sequence_as(structure, flat_sequence) + [array([[1, 2], + [3, 4]])] + + 5. tf.Tensor (considered a scalar): + + >>> structure = ['a'] + >>> flat_sequence = [tf.constant([[1., 2., 3.], [4., 5., 6.]])] + >>> tf.nest.pack_sequence_as(structure, flat_sequence) + [] + + 6. `tf.RaggedTensor`: This is a composite tensor thats representation consists + of a flattened list of 'values' and a list of 'row_splits' which indicate how + to chop up the flattened list into different rows. For more details on + `tf.RaggedTensor`, please visit + https://www.tensorflow.org/api_docs/python/tf/RaggedTensor. + + With `expand_composites=False`, we treat RaggedTensor as a scalar. + + >>> structure = { "foo": tf.ragged.constant([[1, 2], [3]]), + ... "bar": tf.constant([[5]]) } + >>> flat_sequence = [ "one", "two" ] + >>> tf.nest.pack_sequence_as(structure, flat_sequence, + ... expand_composites=False) + {'foo': 'two', 'bar': 'one'} + + With `expand_composites=True`, we expect that the flattened input contains + the tensors making up the ragged tensor i.e. the values and row_splits + tensors. + + >>> structure = { "foo": tf.ragged.constant([[1., 2.], [3.]]), + ... "bar": tf.constant([[5.]]) } + >>> tensors = tf.nest.flatten(structure, expand_composites=True) + >>> print(tensors) + [, + , + ] + >>> verified_tensors = [tf.debugging.check_numerics(t, 'invalid tensor: ') + ... if t.dtype==tf.float32 else t + ... for t in tensors] + >>> tf.nest.pack_sequence_as(structure, verified_tensors, + ... expand_composites=True) + {'foo': , + 'bar': } + + - For Modality.DATA: If `structure` is a scalar, `flat_sequence` must be a + single-element list; + in this case the return value is `flat_sequence[0]`. + + Args: + modality: enum value of supported modality [Modality.CORE or Modality.DATA] + structure: - For Modality.CORE: Nested structure, whose structure is given + by nested lists, tuples, and dicts. Note: numpy arrays and strings are + considered scalars. - For Modality.DATA: tuple or list constructed of + scalars and/or other tuples/lists, or a scalar. Note: numpy arrays are + considered scalars. + flat_sequence: flat sequence to pack. + expand_composites: Arg valid for Modality.CORE only. If true, then composite + tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are + expanded into their component tensors. + sequence_fn: Arg valid for Modality.CORE only. + + Returns: + packed: `flat_sequence` converted to have the same recursive structure as + `structure`. + + Raises: + ValueError: If `flat_sequence` and `structure` have different + atom counts. + TypeError: For Modality.CORE only. `structure` is or contains a dict with + non-sortable keys. + """ + if modality == Modality.CORE: + return _tf_core_pack_sequence_as( + structure, flat_sequence, expand_composites, sequence_fn + ) + elif modality == Modality.DATA: + return _tf_data_pack_sequence_as(structure, flat_sequence) + else: + raise ValueError( + "Unknown modality used {} for nested structure".format(modality) + ) + + +def _tf_core_pack_sequence_as( + structure, flat_sequence, expand_composites, sequence_fn=None +): + """Implements sequence packing, with the option to alter the structure.""" + is_nested_fn = ( + _is_nested_or_composite if expand_composites else _tf_core_is_nested + ) + sequence_fn = sequence_fn or sequence_like + + def truncate(value, length): + value_str = str(value) + return value_str[:length] + (value_str[length:] and "...") + + if not is_nested_fn(flat_sequence): + raise TypeError( + "Attempted to pack value:\n {}\ninto a structure, but found " + "incompatible type `{}` instead.".format( + truncate(flat_sequence, 100), type(flat_sequence) + ) + ) + + if not is_nested_fn(structure): + if len(flat_sequence) != 1: + raise ValueError( + "The target structure is of type `{}`\n {}\nHowever the input " + "is a sequence ({}) of length {}.\n {}\nnest cannot " + "guarantee that it is safe to map one to the other.".format( + type(structure), + truncate(structure, 100), + type(flat_sequence), + len(flat_sequence), + truncate(flat_sequence, 100), + ) + ) + return flat_sequence[0] + + try: + final_index, packed = _tf_core_packed_nest_with_indices( + structure, flat_sequence, 0, is_nested_fn, sequence_fn + ) + if final_index < len(flat_sequence): + raise IndexError + except IndexError: + flat_structure = _tf_core_flatten( + structure, expand_composites=expand_composites + ) + if len(flat_structure) != len(flat_sequence): + # pylint: disable=raise-missing-from + raise ValueError( + "Could not pack sequence. Structure had %d atoms, but " + "flat_sequence had %d items. Structure: %s, flat_sequence: %s." + % (len(flat_structure), len(flat_sequence), structure, flat_sequence) + ) + return sequence_fn(structure, packed) + + +def _tf_data_pack_sequence_as(structure, flat_sequence): + """Returns a given flattened sequence packed into a nest. + + If `structure` is a scalar, `flat_sequence` must be a single-element list; + in this case the return value is `flat_sequence[0]`. + + Args: + structure: tuple or list constructed of scalars and/or other tuples/lists, + or a scalar. Note: numpy arrays are considered scalars. + flat_sequence: flat sequence to pack. + + Returns: + packed: `flat_sequence` converted to have the same recursive structure as + `structure`. + + Raises: + ValueError: If nest and structure have different element counts. + """ + if not (_tf_data_is_nested(flat_sequence) or isinstance(flat_sequence, list)): + raise TypeError( + "Argument `flat_sequence` must be a sequence. Got " + f"'{type(flat_sequence).__name__}'." + ) + + if not _tf_data_is_nested(structure): + if len(flat_sequence) != 1: + raise ValueError( + "Argument `structure` is a scalar but " + f"`len(flat_sequence)`={len(flat_sequence)} > 1" + ) + return flat_sequence[0] + + flat_structure = _tf_data_flatten(structure) + if len(flat_structure) != len(flat_sequence): + raise ValueError( + "Could not pack sequence. Argument `structure` had " + f"{len(flat_structure)} elements, but argument `flat_sequence` had " + f"{len(flat_sequence)} elements. Received structure: " + f"{structure}, flat_sequence: {flat_sequence}." + ) + + _, packed = _tf_data_packed_nest_with_indices(structure, flat_sequence, 0) + return sequence_like(structure, packed) # pylint: disable=protected-access + + +def map_structure(modality, func, *structure, **kwargs): + """Creates a new structure by applying `func` to each atom in `structure`. + + - For Modality.CORE: Refer to + [tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest) + for the definition of a structure. + + Applies `func(x[0], x[1], ...)` where x[i] enumerates all atoms in + `structure[i]`. All items in `structure` must have the same arity, + and the return value will contain results with the same structure layout. + + Examples: + + * A single Python dict: + + >>> a = {"hello": 24, "world": 76} + >>> tf.nest.map_structure(lambda p: p * 2, a) + {'hello': 48, 'world': 152} + + * Multiple Python dictionaries: + + >>> d1 = {"hello": 24, "world": 76} + >>> d2 = {"hello": 36, "world": 14} + >>> tf.nest.map_structure(lambda p1, p2: p1 + p2, d1, d2) + {'hello': 60, 'world': 90} + + * A single Python list: + + >>> a = [24, 76, "ab"] + >>> tf.nest.map_structure(lambda p: p * 2, a) + [48, 152, 'abab'] + + * Scalars: + + >>> tf.nest.map_structure(lambda x, y: x + y, 3, 4) + 7 + + * Empty structures: + + >>> tf.nest.map_structure(lambda x: x + 1, ()) + () + + * Check the types of iterables: + + >>> s1 = (((1, 2), 3), 4, (5, 6)) + >>> s1_list = [[[1, 2], 3], 4, [5, 6]] + >>> tf.nest.map_structure(lambda x, y: None, s1, s1_list) + Traceback (most recent call last): + ... + TypeError: The two structures don't have the same nested structure + + * Type check is set to False: + + >>> s1 = (((1, 2), 3), 4, (5, 6)) + >>> s1_list = [[[1, 2], 3], 4, [5, 6]] + >>> tf.nest.map_structure(lambda x, y: None, s1, s1_list, check_types=False) + (((None, None), None), None, (None, None)) + + - For Modality.DATA: Applies `func(x[0], x[1], ...)` where x[i] is an entry in + `structure[i]`. All structures in `structure` must have the same arity, + and the return value will contain the results in the same structure. + + Args: + modality: enum value of supported modality [Modality.CORE or Modality.DATA] + func: A callable that accepts as many arguments as there are structures. + *structure: - For Modality.CORE: atom or nested structure. - For + Modality.DATA: scalar, or tuple or list of constructed scalars and/or + other tuples/lists, or scalars. Note: numpy arrays are considered + scalars. + **kwargs: Valid keyword args are: * `check_types`: - For Modality.CORE: If + set to `True` (default) the types of iterables within the structures have + to be same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError` + exception). To allow this set this argument to `False`. Note that + namedtuples with identical name and fields are always considered to have + the same shallow structure. - For Modality.DATA: only valid keyword + argument is `check_types`. If set to `True` (default) the types of + iterables within the structures have to be same (e.g. `map_structure(func, + [1], (1,))` raises a `TypeError` exception). To allow this set this + argument to `False`. * `expand_composites`: Valid for Modality.CORE only. + If set to `True`, then composite tensors such as `tf.sparse.SparseTensor` + and `tf.RaggedTensor` are expanded into their component tensors. If + `False` (the default), then composite tensors are not expanded. + + Returns: + A new structure with the same arity as `structure[0]`, whose atoms + correspond to `func(x[0], x[1], ...)` where `x[i]` is the atom in the + corresponding location in `structure[i]`. If there are different structure + types and `check_types` is `False` the structure types of the first + structure will be used. + + Raises: + TypeError: If `func` is not callable or if the structures do not match + each other by depth tree. + ValueError: If no structure is provided or if the structures do not match + each other by type. + ValueError: If wrong keyword arguments are provided. + """ + if modality == Modality.CORE: + return _tf_core_map_structure(func, *structure, **kwargs) + elif modality == Modality.DATA: + return _tf_data_map_structure(func, *structure, **kwargs) + else: + raise ValueError( + "Unknown modality used {} for nested structure".format(modality) + ) + + +# pylint: disable=missing-function-docstring +def _tf_core_map_structure(func, *structure, **kwargs): + if not callable(func): + raise TypeError("func must be callable, got: %s" % func) + + if not structure: + raise ValueError("Must provide at least one structure") + + check_types = kwargs.pop("check_types", True) + expand_composites = kwargs.pop("expand_composites", False) + + if kwargs: + raise ValueError( + "Only valid keyword arguments are `check_types` and " + "`expand_composites`, not: `%s`" + % "`, `".join(kwargs.keys()) + ) + + for other in structure[1:]: + _tf_core_assert_same_structure( + structure[0], + other, + check_types=check_types, + expand_composites=expand_composites, + ) + + flat_structure = (_tf_core_flatten(s, expand_composites) for s in structure) + entries = zip(*flat_structure) + + return _tf_core_pack_sequence_as( + structure[0], + [func(*x) for x in entries], + expand_composites=expand_composites, + ) + + +# pylint: disable=missing-function-docstring +def _tf_data_map_structure(func, *structure, **check_types_dict): + if not callable(func): + raise TypeError(f"Argument `func` must be callable, got: {func}") + + if not structure: + raise ValueError("Must provide at least one structure") + + if check_types_dict: + if "check_types" not in check_types_dict or len(check_types_dict) > 1: + raise ValueError( + "Only valid keyword argument for `check_types_dict` is " + f"'check_types'. Got {check_types_dict}." + ) + check_types = check_types_dict["check_types"] + else: + check_types = True + + for other in structure[1:]: + _tf_data_assert_same_structure(structure[0], other, check_types=check_types) + + flat_structure = (_tf_data_flatten(s) for s in structure) + entries = zip(*flat_structure) + + return _tf_data_pack_sequence_as(structure[0], [func(*x) for x in entries]) + + +def yield_flat_up_to(modality, shallow_tree, input_tree, is_nested_fn, path=()): + """Yields (path, value) pairs of input_tree flattened up to shallow_tree. + + - For Modality.CORE: See comments for _tf_core_yield_flat_up_to() below + - For Modality.DATA: See comments for _tf_data_yield_flat_up_to() below + + Args: + modality: enum value of supported modality [Modality.CORE or Modality.DATA] + shallow_tree: Nested structure. Traverse no further than its leaf nodes. + input_tree: Nested structure. Return the paths and values from this tree. + Must have the same upper structure as shallow_tree. + is_nested_fn: Arg valid for Modality.CORE only. Function used to test if a + value should be treated as a nested structure. + path: Arg valid for Modality.CORE only. Tuple. Optional argument, only used + when recursing. The path from the root of the original shallow_tree, down + to the root of the shallow_tree arg of this recursive call. + + Yields: + Pairs of (path, value), where path the tuple path of a leaf node in + shallow_tree, and value is the value of the corresponding node in + input_tree. + """ + if modality == Modality.CORE: + yield from _tf_core_yield_flat_up_to( + shallow_tree, input_tree, is_nested_fn, path + ) + elif modality == Modality.DATA: + yield from _tf_data_yield_flat_up_to(shallow_tree, input_tree) + else: + raise ValueError( + "Unknown modality used {} for nested structure".format(modality) + ) + + +def _tf_core_yield_flat_up_to(shallow_tree, input_tree, is_nested_fn, path=()): + """Yields (path, value) pairs of input_tree flattened up to shallow_tree. + + Args: + shallow_tree: Nested structure. Traverse no further than its leaf nodes. + input_tree: Nested structure. Return the paths and values from this tree. + Must have the same upper structure as shallow_tree. + is_nested_fn: Function used to test if a value should be treated as a nested + structure. + path: Tuple. Optional argument, only used when recursing. The path from the + root of the original shallow_tree, down to the root of the shallow_tree + arg of this recursive call. + + Yields: + Pairs of (path, value), where path the tuple path of a leaf node in + shallow_tree, and value is the value of the corresponding node in + input_tree. + """ + if not is_nested_fn(shallow_tree): + yield (path, input_tree) + else: + input_tree = dict(_tf_core_yield_sorted_items(input_tree)) + for ( + shallow_key, + shallow_subtree, + ) in _tf_core_yield_sorted_items(shallow_tree): + subpath = path + (shallow_key,) + input_subtree = input_tree[shallow_key] + for leaf_path, leaf_value in _tf_core_yield_flat_up_to( + shallow_subtree, input_subtree, is_nested_fn, path=subpath + ): + yield (leaf_path, leaf_value) + + +def _tf_data_yield_flat_up_to(shallow_tree, input_tree): + """Yields elements `input_tree` partially flattened up to `shallow_tree`.""" + if _tf_data_is_nested(shallow_tree): + for shallow_branch, input_branch in zip( + _tf_data_yield_value(shallow_tree), _tf_data_yield_value(input_tree) + ): + for input_leaf in _tf_data_yield_flat_up_to(shallow_branch, input_branch): + yield input_leaf + else: + yield input_tree + + +def assert_shallow_structure( + modality, + shallow_tree, + input_tree, + check_types=True, + expand_composites=False, +): + """Asserts that `shallow_tree` is a shallow structure of `input_tree`. + + This function tests if the `input_tree` structure can be created from + the `shallow_tree` structure by replacing its leaf nodes with deeper + tree structures. + + Examples: + + The following code will raise an exception: + ```python + shallow_tree = {"a": "A", "b": "B"} + input_tree = {"a": 1, "c": 2} + assert_shallow_structure(shallow_tree, input_tree) + ``` + + The following code will raise an exception: + ```python + shallow_tree = ["a", "b"] + input_tree = ["c", ["d", "e"], "f"] + assert_shallow_structure(shallow_tree, input_tree) + ``` + + Args: + modality: enum value of supported modality [Modality.CORE or Modality.DATA] + shallow_tree: an arbitrarily nested structure. + input_tree: an arbitrarily nested structure. + check_types: if `True` (default) the sequence types of `shallow_tree` and + `input_tree` have to be the same. Note that even with check_types==True, + this function will consider two different namedtuple classes with the same + name and _fields attribute to be the same class. + expand_composites: Valid for Modality.CORE only. If true, then composite + tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are + expanded into their component tensors. + + Raises: + TypeError: If `shallow_tree` is a sequence but `input_tree` is not. + TypeError: If the sequence types of `shallow_tree` are different from + `input_tree`. Only raised if `check_types` is `True`. + ValueError: If the sequence lengths of `shallow_tree` are different from + `input_tree`. + """ + if modality == Modality.CORE: + _tf_core_assert_shallow_structure( + shallow_tree, input_tree, check_types, expand_composites + ) + elif modality == Modality.DATA: + _tf_data_assert_shallow_structure(shallow_tree, input_tree, check_types) + else: + raise ValueError( + "Unknown modality used {} for nested structure".format(modality) + ) + + +# pylint: disable=missing-function-docstring +def _tf_core_assert_shallow_structure( + shallow_tree, input_tree, check_types=True, expand_composites=False +): + is_nested_fn = ( + _is_nested_or_composite if expand_composites else _tf_core_is_nested + ) + if is_nested_fn(shallow_tree): + if not is_nested_fn(input_tree): + raise TypeError( + "If shallow structure is a sequence, input must also be a sequence. " + "Input has type: %s." + % type(input_tree) + ) + + if isinstance(shallow_tree, _wrapt.ObjectProxy): + shallow_type = type(shallow_tree.__wrapped__) + else: + shallow_type = type(shallow_tree) + + if check_types and not isinstance(input_tree, shallow_type): + # Duck-typing means that nest should be fine with two different + # namedtuples with identical name and fields. + shallow_is_namedtuple = is_namedtuple(shallow_tree, False) + input_is_namedtuple = is_namedtuple(input_tree, False) + if shallow_is_namedtuple and input_is_namedtuple: + if not same_namedtuples(shallow_tree, input_tree): + raise TypeError( + STRUCTURES_HAVE_MISMATCHING_TYPES.format( + input_type=type(input_tree), shallow_type=type(shallow_tree) + ) + ) + + elif isinstance(shallow_tree, list) and isinstance(input_tree, list): + # List subclasses are considered the same, + # e.g. python list vs. _ListWrapper. + pass + + elif ( + _is_composite_tensor(shallow_tree) or _is_type_spec(shallow_tree) + ) and (_is_composite_tensor(input_tree) or _is_type_spec(input_tree)): + pass # Compatibility will be checked below. + + elif not ( + isinstance(shallow_tree, _collections_abc.Mapping) + and isinstance(input_tree, _collections_abc.Mapping) + ): + raise TypeError( + STRUCTURES_HAVE_MISMATCHING_TYPES.format( + input_type=type(input_tree), shallow_type=type(shallow_tree) + ) + ) + + if _is_composite_tensor(shallow_tree) or _is_composite_tensor(input_tree): + if not ( + (_is_composite_tensor(input_tree) or _is_type_spec(input_tree)) + and ( + _is_composite_tensor(shallow_tree) or _is_type_spec(shallow_tree) + ) + ): + raise TypeError( + STRUCTURES_HAVE_MISMATCHING_TYPES.format( + input_type=type(input_tree), shallow_type=type(shallow_tree) + ) + ) + # pylint: disable=protected-access + type_spec_1 = ( + shallow_tree + if _is_type_spec(shallow_tree) + else shallow_tree._type_spec + )._without_tensor_names() + type_spec_2 = ( + input_tree if _is_type_spec(input_tree) else input_tree._type_spec + )._without_tensor_names() + # TODO(b/246356867): Replace the most_specific_common_supertype below + # with get_structure. + if hasattr(type_spec_1, "_get_structure") and hasattr( + type_spec_2, "_get_structure" + ): + result = ( + type_spec_1._get_structure() == type_spec_2._get_structure() or None + ) + else: + result = type_spec_1.most_specific_common_supertype([type_spec_2]) + if result is None: + raise ValueError( + "Incompatible CompositeTensor TypeSpecs: %s vs. %s" + % (type_spec_1, type_spec_2) + ) + # pylint: enable=protected-access + + elif _is_type_spec(shallow_tree): + if not _is_type_spec(input_tree): + raise TypeError( + "If shallow structure is a TypeSpec, input must also " + "be a TypeSpec. Input has type: %s." + % type(input_tree) + ) + else: + if len(input_tree) != len(shallow_tree): + raise ValueError( + STRUCTURES_HAVE_MISMATCHING_LENGTHS.format( + input_length=len(input_tree), shallow_length=len(shallow_tree) + ) + ) + elif len(input_tree) < len(shallow_tree): + raise ValueError( + INPUT_TREE_SMALLER_THAN_SHALLOW_TREE.format( + input_size=len(input_tree), shallow_size=len(shallow_tree) + ) + ) + + if isinstance(shallow_tree, _collections_abc.Mapping): + absent_keys = set(shallow_tree) - set(input_tree) + if absent_keys: + raise ValueError( + SHALLOW_TREE_HAS_INVALID_KEYS.format(sorted(absent_keys)) + ) + + for shallow_branch, input_branch in zip( + _tf_core_yield_value(shallow_tree), + _tf_core_yield_value(input_tree), + ): + _tf_core_assert_shallow_structure( + shallow_branch, + input_branch, + check_types=check_types, + expand_composites=expand_composites, + ) + + +# pylint: disable=missing-function-docstring +def _tf_data_assert_shallow_structure( + shallow_tree, input_tree, check_types=True +): + if _tf_data_is_nested(shallow_tree): + if not _tf_data_is_nested(input_tree): + raise TypeError( + "If shallow structure is a sequence, input must also be a sequence. " + f"Input has type: '{type(input_tree).__name__}'." + ) + + if check_types and not isinstance(input_tree, type(shallow_tree)): + raise TypeError( + "The two structures don't have the same sequence type. Input " + f"structure has type '{type(input_tree).__name__}', while shallow " + f"structure has type '{type(shallow_tree).__name__}'." + ) + + if len(input_tree) != len(shallow_tree): + raise ValueError( + "The two structures don't have the same sequence length. Input " + f"structure has length {len(input_tree)}, while shallow structure " + f"has length {len(shallow_tree)}." + ) + + if check_types and isinstance(shallow_tree, _collections_abc.Mapping): + if set(input_tree) != set(shallow_tree): + raise ValueError( + "The two structures don't have the same keys. Input " + f"structure has keys {list(input_tree)}, while shallow structure " + f"has keys {list(shallow_tree)}." + ) + input_tree = sorted(input_tree.items()) + shallow_tree = sorted(shallow_tree.items()) + + for shallow_branch, input_branch in zip(shallow_tree, input_tree): + _tf_data_assert_shallow_structure( + shallow_branch, input_branch, check_types=check_types + ) + + +def flatten_up_to( + modality, + shallow_tree, + input_tree, + check_types=True, + expand_composites=False, +): + # pylint: disable=g-doc-return-or-yield,g-doc-args + """Flattens `input_tree` up to `shallow_tree`. + + - For Modality.CORE: refer to + [tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest) + for the definition of a structure. + + Any further depth in structure in `input_tree` is retained as structures in + the partially flatten output. + + If `shallow_tree` and `input_tree` are atoms, this returns a + single-item list: `[input_tree]`. + + Use Case: + + Sometimes we may wish to partially flatten a structure, retaining some + of the nested structure. We achieve this by specifying a shallow structure, + `shallow_tree`, we wish to flatten up to. + + The input, `input_tree`, can be thought of as having the same structure layout + as `shallow_tree`, but with leaf nodes that are themselves tree structures. + + Examples: + + ```python + input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]] + shallow_tree = [[True, True], [False, True]] + + flattened_input_tree = flatten_up_to(shallow_tree, input_tree) + flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree) + + # Output is: + # [[2, 2], [3, 3], [4, 9], [5, 5]] + # [True, True, False, True] + ``` + + ```python + input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]] + shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]] + + input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree) + input_tree_flattened = flatten(input_tree) + + # Output is: + # [('a', 1), ('b', 2), ('c', 3), ('d', 4)] + # ['a', 1, 'b', 2, 'c', 3, 'd', 4] + ``` + + Edge Cases: + + ```python + flatten_up_to(0, 0) # Output: [0] + flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]] + flatten_up_to([0, 1, 2], 0) # Output: TypeError + flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2] + + ``` + + Args: + modality: enum value of supported modality [Modality.CORE or Modality.DATA] + shallow_tree: a possibly pruned structure of input_tree. + input_tree: an atom or a nested structure. Note, numpy arrays are considered + atoms. + check_types: bool. If True, check that each node in shallow_tree has the + same type as the corresponding node in input_tree. + expand_composites: Arg valid for Modality.CORE only. If true, then composite + tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are + expanded into their component tensors. + + Returns: + A Python list, the partially flattened version of `input_tree` according to + the structure of `shallow_tree`. + + Raises: + TypeError: If `shallow_tree` is a nested structure but `input_tree` is not. + TypeError: If the structure types of `shallow_tree` are different from + `input_tree`. + ValueError: If the structure lengths of `shallow_tree` are different from + `input_tree`. + """ + if modality == Modality.CORE: + return _tf_core_flatten_up_to( + shallow_tree, input_tree, check_types, expand_composites + ) + elif modality == Modality.DATA: + return _tf_data_flatten_up_to(shallow_tree, input_tree) + else: + raise ValueError( + "Unknown modality used {} for nested structure".format(modality) + ) + + +def _tf_core_flatten_up_to( + shallow_tree, input_tree, check_types=True, expand_composites=False +): + is_nested_fn = ( + _is_nested_or_composite if expand_composites else _tf_core_is_nested + ) + _tf_core_assert_shallow_structure( + shallow_tree, + input_tree, + check_types=check_types, + expand_composites=expand_composites, + ) + # Discard paths returned by nest_util._tf_core_yield_flat_up_to. + return [ + v + for _, v in _tf_core_yield_flat_up_to( + shallow_tree, input_tree, is_nested_fn + ) + ] + + +def _tf_data_flatten_up_to(shallow_tree, input_tree): + _tf_data_assert_shallow_structure(shallow_tree, input_tree) + return list(_tf_data_yield_flat_up_to(shallow_tree, input_tree)) + + +def map_structure_up_to(modality, shallow_tree, func, *inputs, **kwargs): + """Applies a function or op to a number of partially flattened inputs. + + The `inputs` are flattened up to `shallow_tree` before being mapped. + + Use Case: + + Sometimes we wish to apply a function to a partially flattened + structure (for example when the function itself takes structure inputs). We + achieve this by specifying a shallow structure, `shallow_tree` we wish to + flatten up to. + + The `inputs`, can be thought of as having the same structure layout as + `shallow_tree`, but with leaf nodes that are themselves tree structures. + + This function therefore will return something with the same base structure as + `shallow_tree`. + + Examples: + + ```python + shallow_tree = [None, None] + inp_val = [1, 2, 3] + out = map_structure_up_to(shallow_tree, lambda x: 2 * x, inp_val) + + # Output is: [2, 4] + ``` + + ```python + ab_tuple = collections.namedtuple("ab_tuple", "a, b") + op_tuple = collections.namedtuple("op_tuple", "add, mul") + inp_val = ab_tuple(a=2, b=3) + inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3)) + out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul, + inp_val, inp_ops) + + # Output is: ab_tuple(a=6, b=15) + ``` + + ```python + data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]] + name_list = ['evens', ['odds', 'primes']] + out = map_structure_up_to( + name_list, + lambda name, sec: "first_{}_{}".format(len(sec), name), + name_list, data_list) + + # Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']] + ``` + + Args: + modality: enum value of supported modality [Modality.CORE or Modality.DATA] + shallow_tree: a shallow structure, common to all the inputs. + func: callable which will be applied to each input individually. + *inputs: structures that are compatible with shallow_tree. The function + `func` is applied to corresponding structures due to partial flattening of + each input, so the function must support arity of `len(inputs)`. + **kwargs: Arg valid for Modality.CORE only. kwargs to feed to func(). + Special kwarg `check_types` is not passed to func, but instead determines + whether the types of iterables within the structures have to be same (e.g. + `map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow + this set this argument to `False`. + + Raises: + TypeError: If `shallow_tree` is a nested structure but `input_tree` is not. + TypeError: If the structure types of `shallow_tree` are different from + `input_tree`. + ValueError: If the structure lengths of `shallow_tree` are different from + `input_tree`. + + Returns: + result of repeatedly applying `func`, with the same structure layout as + `shallow_tree`. + """ + if modality == Modality.CORE: + return _tf_core_map_structure_with_tuple_paths_up_to( + shallow_tree, func, *inputs, **kwargs + ) + elif modality == Modality.DATA: + return _tf_data_map_structure_up_to(shallow_tree, func, *inputs) + else: + raise ValueError( + "Unknown modality used {} for nested structure".format(modality) + ) + + +def _tf_core_map_structure_with_tuple_paths_up_to( + shallow_tree, func, *inputs, **kwargs +): + """See comments for map_structure_with_tuple_paths_up_to() in tensorflow/python/util/nest.py.""" + if not inputs: + raise ValueError("Cannot map over no sequences") + + check_types = kwargs.pop("check_types", True) + expand_composites = kwargs.pop("expand_composites", False) + is_nested_fn = ( + _is_nested_or_composite if expand_composites else _tf_core_is_nested + ) + + for input_tree in inputs: + _tf_core_assert_shallow_structure( + shallow_tree, + input_tree, + check_types=check_types, + expand_composites=expand_composites, + ) + + # Flatten each input separately, apply the function to corresponding items, + # then repack based on the structure of the first input. + flat_value_gen = ( + _tf_core_flatten_up_to( # pylint: disable=g-complex-comprehension + shallow_tree, + input_tree, + check_types, + expand_composites=expand_composites, + ) + for input_tree in inputs + ) + flat_path_gen = ( + path + for path, _ in _tf_core_yield_flat_up_to( + shallow_tree, inputs[0], is_nested_fn + ) + ) + results = [ + func(*args, **kwargs) for args in zip(flat_path_gen, *flat_value_gen) + ] + return _tf_core_pack_sequence_as( + structure=shallow_tree, + flat_sequence=results, + expand_composites=expand_composites, + ) + + +# pylint: disable=missing-function-docstring +def _tf_data_map_structure_up_to(shallow_tree, func, *inputs): + if not inputs: + raise ValueError( + "Argument `inputs` is empty. Cannot map over no sequences." + ) + for input_tree in inputs: + _tf_data_assert_shallow_structure(shallow_tree, input_tree) + + # Flatten each input separately, apply the function to corresponding elements, + # then repack based on the structure of the first input. + all_flattened_up_to = ( + _tf_data_flatten_up_to(shallow_tree, input_tree) for input_tree in inputs + ) + + results = [func(*tensors) for tensors in zip(*all_flattened_up_to)] + return _tf_data_pack_sequence_as( + structure=shallow_tree, flat_sequence=results + ) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/numpy_compat.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/numpy_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..ce2cfe8220e582d7c218fd7a4a9409087fbbe047 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/numpy_compat.py @@ -0,0 +1,143 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Functions for NumPy 1.x vs. 2.x compatibility.""" + +import numpy as np + + +def np_array(values, dtype=None, copy=True, order='K'): + """Creates a NumPy array containing input values. + + It will make a copy of the object. + + In NumPy 2.x and later, strict type casting can lead to errors when values + overflow the specified dtype. This function addresses this by replacing direct + np.array(..., dtype=...) calls with np.array(...).astype(...). This allows for + intended overflows, aligning with the behavior of older NumPy versions. + + Args: + values: Array_like objects. E.g., a python list, tuple, or an object whose + __array__ method returns an array. + dtype: The desired numpy data type for the array. + copy: Bool. If True (default), then the array data is copied. If None, a + copy will only be made if __array__ returns a copy, if obj is a nested + sequence, or if a copy is needed to satisfy any of the other requirements + (dtype, order, etc.). Note that any copy of the data is shallow, i.e., for + arrays with object dtype, the new array will point to the same objects. + For False it raises a ValueError if a copy cannot be avoided. + order: {‘K’, ‘A’, ‘C’, ‘F’}. + + Returns: + A NumPy array with the specified data type. + """ + if dtype is not None and np.issubdtype(dtype, np.number): + return np.array(values, copy=copy, order=order).astype(dtype) + else: + return np.array(values, dtype=dtype, copy=copy, order=order) + + +def np_asarray(values, dtype=None, order=None, copy=None): + """Converts input values to a NumPy array. + + It will not make a copy. + + In NumPy 2.x and later, strict type casting can lead to errors when values + overflow the specified dtype. This function addresses this by replacing direct + np.array(..., dtype=...) calls with np.array(...).astype(...). This allows for + intended overflows, aligning with the behavior of older NumPy versions. + + Args: + values: Array_like objects. E.g., a python list, tuple, or an object whose + __array__ method returns an array. + dtype: The desired numpy data type for the array. + order: {‘C’, ‘F’, ‘A’, ‘K’}. + copy: bool. If True, then the object is copied. If None then the object is + copied only if needed, i.e. if __array__ returns a copy, if obj is a + nested sequence, or if a copy is needed to satisfy any of the other + requirements (dtype, order, etc.). For False it raises a ValueError if a + copy cannot be avoided. + + Returns: + A NumPy array with the specified data type. + """ + if np.lib.NumpyVersion(np.__version__) >= '2.0.0.dev0': + if dtype is not None and np.issubdtype(dtype, np.number): + return np.asarray(values, order=order, copy=copy).astype(dtype) + else: + return np.asarray(values, dtype=dtype, order=order, copy=copy) + else: + return np.asarray(values, dtype=dtype, order=order) + + +def np_where(condition, x=None, y=None): + """Return elements chosen from x or y depending on condition. + + When only condition is provided, np.where(condition) is a shorthand for + np.asarray(condition).nonzero(). See + https://numpy.org/doc/stable/reference/generated/numpy.where.html. NumPy + 2.1.0rc0 disallows 0D input arrays in nonzero, so np.atleast_1d is used here + to remain compatible with NumPy 1.x. See + https://github.com/numpy/numpy/pull/26268. + + Args: + condition: Array_like, bool. Where True, yield x, otherwise yield y. + x: Array_like. Values from which to choose. x, y and condition need to be + broadcastable to some shape. + y: Array_like. Values from which to choose. x, y and condition need to be + broadcastable to some shape. + + Returns: + An array with elements from x where condition is True, and elements from y + elsewhere. Or the indices of the elements that are non-zero. + """ + if x is None and y is None: + if np.lib.NumpyVersion(np.__version__) >= '2.1.0.rc0': + return np.atleast_1d(np.asarray(condition)).nonzero() + return np.where(condition) + return np.where(condition, x, y) + + +def np_reshape(a, /, shape=None, *, newshape=None, order='C', copy=None): + """Reshapes an array without changing its data. + + NumPy 2.1.0rc1 added shape and copy arguments to numpy.reshape. See + https://github.com/numpy/numpy/pull/26292. Both newshape and shape keywords + are supported, but newshape is going to be deprecated. Use `shape` instead. + + Besides, shape cannot be None now. See + https://github.com/numpy/numpy/blob/v2.1.0rc1/numpy/_core/fromnumeric.py#L309. + Previously, np.reshape with newshape=None returned a copy. To maintain this + behavior, we now use asarray to create an ndarray. + + Args: + a: Array_like. Array to be reshaped. + shape: The new shape of the array. + newshape: The new shape of the array (deprecated). + order: {‘C’, ‘F’, ‘K’}. + copy: bool. If True, then the array data is copied. If None, a copy will + only be made if it’s required by order. For False it raises a ValueError if + a copy cannot be avoided. + + Returns: + This will be a new view object if possible; otherwise, it will be a copy. + """ + if shape is None: + shape = newshape + if np.lib.NumpyVersion(np.__version__) >= '2.1.0.rc0': + if shape is None and newshape is None: + return np.asarray(a, order=order, copy=copy) + return np.reshape(a, shape, order=order, copy=copy) + return np.reshape(a, shape, order=order) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/object_identity.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/object_identity.py new file mode 100644 index 0000000000000000000000000000000000000000..0ffa5755604d380d91d5985b9d7e8106c8a5deab --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/object_identity.py @@ -0,0 +1,265 @@ +"""Utilities for collecting objects based on "is" comparison.""" +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +from typing import Any, Set +import weakref + +from tensorflow.python.util.compat import collections_abc + + +# LINT.IfChange +class _ObjectIdentityWrapper: + """Wraps an object, mapping __eq__ on wrapper to "is" on wrapped. + + Since __eq__ is based on object identity, it's safe to also define __hash__ + based on object ids. This lets us add unhashable types like trackable + _ListWrapper objects to object-identity collections. + """ + + __slots__ = ["_wrapped", "__weakref__"] + + def __init__(self, wrapped): + self._wrapped = wrapped + + @property + def unwrapped(self): + return self._wrapped + + def _assert_type(self, other): + if not isinstance(other, _ObjectIdentityWrapper): + raise TypeError("Cannot compare wrapped object with unwrapped object") + + def __lt__(self, other): + self._assert_type(other) + return id(self._wrapped) < id(other._wrapped) # pylint: disable=protected-access + + def __gt__(self, other): + self._assert_type(other) + return id(self._wrapped) > id(other._wrapped) # pylint: disable=protected-access + + def __eq__(self, other): + if other is None: + return False + self._assert_type(other) + return self._wrapped is other._wrapped # pylint: disable=protected-access + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + # Wrapper id() is also fine for weakrefs. In fact, we rely on + # id(weakref.ref(a)) == id(weakref.ref(a)) and weakref.ref(a) is + # weakref.ref(a) in _WeakObjectIdentityWrapper. + return id(self._wrapped) + + def __repr__(self): + return "<{} wrapping {!r}>".format(type(self).__name__, self._wrapped) + + +class _WeakObjectIdentityWrapper(_ObjectIdentityWrapper): + + __slots__ = () + + def __init__(self, wrapped): + super(_WeakObjectIdentityWrapper, self).__init__(weakref.ref(wrapped)) + + @property + def unwrapped(self): + return self._wrapped() + + +class Reference(_ObjectIdentityWrapper): + """Reference that refers an object. + + ```python + x = [1] + y = [1] + + x_ref1 = Reference(x) + x_ref2 = Reference(x) + y_ref2 = Reference(y) + + print(x_ref1 == x_ref2) + ==> True + + print(x_ref1 == y) + ==> False + ``` + """ + + __slots__ = () + + # Disabling super class' unwrapped field. + unwrapped = property() + + def deref(self): + """Returns the referenced object. + + ```python + x_ref = Reference(x) + print(x is x_ref.deref()) + ==> True + ``` + """ + return self._wrapped + + +class ObjectIdentityDictionary(collections_abc.MutableMapping): + """A mutable mapping data structure which compares using "is". + + This is necessary because we have trackable objects (_ListWrapper) which + have behavior identical to built-in Python lists (including being unhashable + and comparing based on the equality of their contents by default). + """ + + __slots__ = ["_storage"] + + def __init__(self): + self._storage = {} + + def _wrap_key(self, key): + return _ObjectIdentityWrapper(key) + + def __getitem__(self, key): + return self._storage[self._wrap_key(key)] + + def __setitem__(self, key, value): + self._storage[self._wrap_key(key)] = value + + def __delitem__(self, key): + del self._storage[self._wrap_key(key)] + + def __len__(self): + return len(self._storage) + + def __iter__(self): + for key in self._storage: + yield key.unwrapped + + def __repr__(self): + return "ObjectIdentityDictionary(%s)" % repr(self._storage) + + +class ObjectIdentityWeakKeyDictionary(ObjectIdentityDictionary): + """Like weakref.WeakKeyDictionary, but compares objects with "is".""" + + __slots__ = ["__weakref__"] + + def _wrap_key(self, key): + return _WeakObjectIdentityWrapper(key) + + def __len__(self): + # Iterate, discarding old weak refs + return len(list(self._storage)) + + def __iter__(self): + keys = self._storage.keys() + for key in keys: + unwrapped = key.unwrapped + if unwrapped is None: + del self[key] + else: + yield unwrapped + + +class ObjectIdentitySet(collections_abc.MutableSet): + """Like the built-in set, but compares objects with "is".""" + + __slots__ = ["_storage", "__weakref__"] + + def __init__(self, *args): + self._storage = set(self._wrap_key(obj) for obj in list(*args)) + + def __le__(self, other: Set[Any]) -> bool: + if not isinstance(other, Set): + return NotImplemented + if len(self) > len(other): + return False + for item in self._storage: + if item not in other: + return False + return True + + def __ge__(self, other: Set[Any]) -> bool: + if not isinstance(other, Set): + return NotImplemented + if len(self) < len(other): + return False + for item in other: + if item not in self: + return False + return True + + @staticmethod + def _from_storage(storage): + result = ObjectIdentitySet() + result._storage = storage # pylint: disable=protected-access + return result + + def _wrap_key(self, key): + return _ObjectIdentityWrapper(key) + + def __contains__(self, key): + return self._wrap_key(key) in self._storage + + def discard(self, key): + self._storage.discard(self._wrap_key(key)) + + def add(self, key): + self._storage.add(self._wrap_key(key)) + + def update(self, items): + self._storage.update([self._wrap_key(item) for item in items]) + + def clear(self): + self._storage.clear() + + def intersection(self, items): + return self._storage.intersection([self._wrap_key(item) for item in items]) + + def difference(self, items): + return ObjectIdentitySet._from_storage( + self._storage.difference([self._wrap_key(item) for item in items])) + + def __len__(self): + return len(self._storage) + + def __iter__(self): + keys = list(self._storage) + for key in keys: + yield key.unwrapped + + +class ObjectIdentityWeakSet(ObjectIdentitySet): + """Like weakref.WeakSet, but compares objects with "is".""" + + __slots__ = () + + def _wrap_key(self, key): + return _WeakObjectIdentityWrapper(key) + + def __len__(self): + # Iterate, discarding old weak refs + return len([_ for _ in self]) + + def __iter__(self): + keys = list(self._storage) + for key in keys: + unwrapped = key.unwrapped + if unwrapped is None: + self.discard(key) + else: + yield unwrapped +# LINT.ThenChange(//tensorflow/python/keras/utils/object_identity.py) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/pywrap_xla_ops.pyi b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/pywrap_xla_ops.pyi new file mode 100644 index 0000000000000000000000000000000000000000..28f484dc3b1285d76637a83dd33eae8d77417097 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/pywrap_xla_ops.pyi @@ -0,0 +1,17 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +def get_cpu_kernel_names() -> list[str]: ... +def get_gpu_kernel_names() -> list[str]: ... diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/pywrap_xla_ops.so b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/pywrap_xla_ops.so new file mode 100644 index 0000000000000000000000000000000000000000..8b09607d5ca87592d29bf1926c45d8e886bcdfc5 Binary files /dev/null and b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/pywrap_xla_ops.so differ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/serialization.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/serialization.py new file mode 100644 index 0000000000000000000000000000000000000000..5b7bf0dde7888d5d39262c1544b7c1760f839da8 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/serialization.py @@ -0,0 +1,78 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for serializing Python objects.""" + +import numpy as np +import wrapt + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import tensor_shape +from tensorflow.python.util.compat import collections_abc + + +def get_json_type(obj): + """Serializes any object to a JSON-serializable structure. + + Args: + obj: the object to serialize + + Returns: + JSON-serializable structure representing `obj`. + + Raises: + TypeError: if `obj` cannot be serialized. + """ + # if obj is a serializable Keras class instance + # e.g. optimizer, layer + if hasattr(obj, 'get_config'): + return {'class_name': obj.__class__.__name__, 'config': obj.get_config()} + + # if obj is any numpy type + if type(obj).__module__ == np.__name__: + if isinstance(obj, np.ndarray): + return obj.tolist() + else: + return obj.item() + + # misc functions (e.g. loss function) + if callable(obj): + return obj.__name__ + + # if obj is a python 'type' + if type(obj).__name__ == type.__name__: + return obj.__name__ + + if isinstance(obj, tensor_shape.Dimension): + return obj.value + + if isinstance(obj, tensor_shape.TensorShape): + return obj.as_list() + + if isinstance(obj, dtypes.DType): + return obj.name + + if isinstance(obj, collections_abc.Mapping): + return dict(obj) + + if obj is Ellipsis: + return {'class_name': '__ellipsis__'} + + if isinstance(obj, wrapt.ObjectProxy): + return obj.__wrapped__ + + raise TypeError(f'Object {obj} is not JSON-serializable. You may implement ' + 'a `get_config()` method on the class ' + '(returning a JSON-serializable dictionary) to make it ' + 'serializable.') diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/tf_contextlib.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/tf_contextlib.py new file mode 100644 index 0000000000000000000000000000000000000000..52f2c3d1c3e3fc94b00ebdb7e1a029b2d70bf92a --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/tf_contextlib.py @@ -0,0 +1,39 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""TFDecorator-aware replacements for the contextlib module.""" +from collections.abc import Callable, Iterator +import contextlib as _contextlib + +from typing import ContextManager, TypeVar + +from tensorflow.python.util import tf_decorator + +_T = TypeVar('_T') + + +def contextmanager( + target: Callable[..., Iterator[_T]], +) -> Callable[..., ContextManager[_T]]: + """A tf_decorator-aware wrapper for `contextlib.contextmanager`. + + Usage is identical to `contextlib.contextmanager`. + + Args: + target: A callable to be wrapped in a contextmanager. + Returns: + A callable that can be used inside of a `with` statement. + """ + context_manager = _contextlib.contextmanager(target) + return tf_decorator.make_decorator(target, context_manager, 'contextmanager') diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/tf_decorator.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/tf_decorator.py new file mode 100644 index 0000000000000000000000000000000000000000..906bf87c61b210e7a997418efbd5045f3d14e27a --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/tf_decorator.py @@ -0,0 +1,361 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Base TFDecorator class and utility functions for working with decorators. + +There are two ways to create decorators that TensorFlow can introspect into. +This is important for documentation generation purposes, so that function +signatures aren't obscured by the (*args, **kwds) signature that decorators +often provide. + +1. Call `tf_decorator.make_decorator` on your wrapper function. If your +decorator is stateless, or can capture all of the variables it needs to work +with through lexical closure, this is the simplest option. Create your wrapper +function as usual, but instead of returning it, return +`tf_decorator.make_decorator(target, your_wrapper)`. This will attach some +decorator introspection metadata onto your wrapper and return it. + +Example: + + def print_hello_before_calling(target): + def wrapper(*args, **kwargs): + print('hello') + return target(*args, **kwargs) + return tf_decorator.make_decorator(target, wrapper) + +2. Derive from TFDecorator. If your decorator needs to be stateful, you can +implement it in terms of a TFDecorator. Store whatever state you need in your +derived class, and implement the `__call__` method to do your work before +calling into your target. You can retrieve the target via +`super(MyDecoratorClass, self).decorated_target`, and call it with whatever +parameters it needs. + +Example: + + class CallCounter(tf_decorator.TFDecorator): + def __init__(self, target): + super(CallCounter, self).__init__('count_calls', target) + self.call_count = 0 + + def __call__(self, *args, **kwargs): + self.call_count += 1 + return super(CallCounter, self).decorated_target(*args, **kwargs) + + def count_calls(target): + return CallCounter(target) +""" +import inspect +from typing import Dict, Any + + +def _make_default_values(fullargspec: inspect.FullArgSpec) -> Dict[str, Any]: + """Returns default values from the function's fullargspec.""" + if fullargspec.defaults is not None: + defaults = { + name: value for name, value in zip( + fullargspec.args[-len(fullargspec.defaults):], fullargspec.defaults) + } + else: + defaults = {} + + if fullargspec.kwonlydefaults is not None: + defaults.update(fullargspec.kwonlydefaults) + + return defaults + + +def fullargspec_to_signature( + fullargspec: inspect.FullArgSpec) -> inspect.Signature: + """Repackages fullargspec information into an equivalent inspect.Signature.""" + defaults = _make_default_values(fullargspec) + parameters = [] + + for arg in fullargspec.args: + parameters.append( + inspect.Parameter( + arg, + inspect.Parameter.POSITIONAL_OR_KEYWORD, + default=defaults.get(arg, inspect.Parameter.empty), + ) + ) + + if fullargspec.varargs is not None: + parameters.append( + inspect.Parameter(fullargspec.varargs, inspect.Parameter.VAR_POSITIONAL) + ) + + for kwarg in fullargspec.kwonlyargs: + parameters.append( + inspect.Parameter( + kwarg, + inspect.Parameter.KEYWORD_ONLY, + default=defaults.get(kwarg, inspect.Parameter.empty), + ) + ) + + if fullargspec.varkw is not None: + parameters.append( + inspect.Parameter(fullargspec.varkw, inspect.Parameter.VAR_KEYWORD) + ) + + return inspect.Signature(parameters) + + +def make_decorator(target, + decorator_func, + decorator_name=None, + decorator_doc='', + decorator_argspec=None): + """Make a decorator from a wrapper and a target. + + Args: + target: The final callable to be wrapped. + decorator_func: The wrapper function. + decorator_name: The name of the decorator. If `None`, the name of the + function calling make_decorator. + decorator_doc: Documentation specific to this application of + `decorator_func` to `target`. + decorator_argspec: Override the signature using FullArgSpec. + + Returns: + The `decorator_func` argument with new metadata attached. + """ + if decorator_name is None: + decorator_name = inspect.currentframe().f_back.f_code.co_name + decorator = TFDecorator(decorator_name, target, decorator_doc, + decorator_argspec) + setattr(decorator_func, '_tf_decorator', decorator) + # Objects that are callables (e.g., a functools.partial object) may not have + # the following attributes. + if hasattr(target, '__name__'): + decorator_func.__name__ = target.__name__ + if hasattr(target, '__qualname__'): + decorator_func.__qualname__ = target.__qualname__ + if hasattr(target, '__module__'): + decorator_func.__module__ = target.__module__ + if hasattr(target, '__dict__'): + # Copy dict entries from target which are not overridden by decorator_func. + for name in target.__dict__: + if name not in decorator_func.__dict__: + decorator_func.__dict__[name] = target.__dict__[name] + if hasattr(target, '__doc__'): + decorator_func.__doc__ = decorator.__doc__ + decorator_func.__wrapped__ = target + # Keeping a second handle to `target` allows callers to detect whether the + # decorator was modified using `rewrap`. + decorator_func.__original_wrapped__ = target + if decorator_argspec: + decorator_func.__signature__ = fullargspec_to_signature( + decorator_argspec) + elif callable(target): + try: + signature = inspect.signature(target) + except (TypeError, ValueError): + # Certain callables such as builtins can not be inspected for signature. + pass + else: + bound_instance = _get_bound_instance(target) + # Present the decorated func as a method as well + if bound_instance and 'self' in signature.parameters: + signature = inspect.Signature(list(signature.parameters.values())[1:]) + decorator_func.__self__ = bound_instance + + decorator_func.__signature__ = signature + + return decorator_func + + +def _get_bound_instance(target): + """Returns the instance any of the targets is attached to.""" + decorators, target = unwrap(target) + for decorator in decorators: + if inspect.ismethod(decorator.decorated_target): + return decorator.decorated_target.__self__ + + +def _has_tf_decorator_attr(obj): + """Checks if object has _tf_decorator attribute. + + This check would work for mocked object as well since it would + check if returned attribute has the right type. + + Args: + obj: Python object. + """ + return (hasattr(obj, '_tf_decorator') and + isinstance(getattr(obj, '_tf_decorator'), TFDecorator)) + + +def rewrap(decorator_func, previous_target, new_target): + """Injects a new target into a function built by make_decorator. + + This function allows replacing a function wrapped by `decorator_func`, + assuming the decorator that wraps the function is written as described below. + + The decorator function must use `.__wrapped__` instead of the + wrapped function that is normally used: + + Example: + + # Instead of this: + def simple_parametrized_wrapper(*args, **kwds): + return wrapped_fn(*args, **kwds) + + tf_decorator.make_decorator(simple_parametrized_wrapper, wrapped_fn) + + # Write this: + def simple_parametrized_wrapper(*args, **kwds): + return simple_parametrized_wrapper.__wrapped__(*args, **kwds) + + tf_decorator.make_decorator(simple_parametrized_wrapper, wrapped_fn) + + Note that this process modifies decorator_func. + + Args: + decorator_func: Callable returned by `wrap`. + previous_target: Callable that needs to be replaced. + new_target: Callable to replace previous_target with. + + Returns: + The updated decorator. If decorator_func is not a tf_decorator, new_target + is returned. + """ + # Because the process mutates the decorator, we only need to alter the + # innermost function that wraps previous_target. + cur = decorator_func + innermost_decorator = None + target = None + while _has_tf_decorator_attr(cur): + innermost_decorator = cur + target = getattr(cur, '_tf_decorator') + if target.decorated_target is previous_target: + break + cur = target.decorated_target + assert cur is not None + + # If decorator_func is not a decorator, new_target replaces it directly. + if innermost_decorator is None: + # Consistency check. The caller should always pass the result of + # tf_decorator.unwrap as previous_target. If decorator_func is not a + # decorator, that will have returned decorator_func itself. + assert decorator_func is previous_target + return new_target + + target.decorated_target = new_target + + if inspect.ismethod(innermost_decorator): + # Bound methods can't be assigned attributes. Thankfully, they seem to + # be just proxies for their unbound counterpart, and we can modify that. + if hasattr(innermost_decorator, '__func__'): + innermost_decorator.__func__.__wrapped__ = new_target + elif hasattr(innermost_decorator, 'im_func'): + innermost_decorator.im_func.__wrapped__ = new_target + else: + innermost_decorator.__wrapped__ = new_target + else: + innermost_decorator.__wrapped__ = new_target + + return decorator_func + + +def unwrap(maybe_tf_decorator): + """Unwraps an object into a list of TFDecorators and a final target. + + Args: + maybe_tf_decorator: Any callable object. + + Returns: + A tuple whose first element is an list of TFDecorator-derived objects that + were applied to the final callable target, and whose second element is the + final undecorated callable target. If the `maybe_tf_decorator` parameter is + not decorated by any TFDecorators, the first tuple element will be an empty + list. The `TFDecorator` list is ordered from outermost to innermost + decorators. + """ + decorators = [] + cur = maybe_tf_decorator + while True: + if isinstance(cur, TFDecorator): + decorators.append(cur) + elif _has_tf_decorator_attr(cur): + decorators.append(getattr(cur, '_tf_decorator')) + else: + break + if not hasattr(decorators[-1], 'decorated_target'): + break + cur = decorators[-1].decorated_target + return decorators, cur + + +class TFDecorator(object): + """Base class for all TensorFlow decorators. + + TFDecorator captures and exposes the wrapped target, and provides details + about the current decorator. + """ + + def __init__(self, + decorator_name, + target, + decorator_doc='', + decorator_argspec=None): + self._decorated_target = target + self._decorator_name = decorator_name + self._decorator_doc = decorator_doc + self._decorator_argspec = decorator_argspec + if hasattr(target, '__name__'): + self.__name__ = target.__name__ + if hasattr(target, '__qualname__'): + self.__qualname__ = target.__qualname__ + if self._decorator_doc: + self.__doc__ = self._decorator_doc + elif hasattr(target, '__doc__') and target.__doc__: + self.__doc__ = target.__doc__ + else: + self.__doc__ = '' + + if decorator_argspec: + self.__signature__ = fullargspec_to_signature(decorator_argspec) + elif callable(target): + try: + self.__signature__ = inspect.signature(target) + except (TypeError, ValueError): + # Certain callables such as builtins can not be inspected for signature. + pass + + def __get__(self, instance, owner): + return self._decorated_target.__get__(instance, owner) + + def __call__(self, *args, **kwargs): + return self._decorated_target(*args, **kwargs) + + @property + def decorated_target(self): + return self._decorated_target + + @decorated_target.setter + def decorated_target(self, decorated_target): + self._decorated_target = decorated_target + + @property + def decorator_name(self): + return self._decorator_name + + @property + def decorator_doc(self): + return self._decorator_doc + + @property + def decorator_argspec(self): + return self._decorator_argspec diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/tf_decorator_export.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/tf_decorator_export.py new file mode 100644 index 0000000000000000000000000000000000000000..58115c7290abc9c5983594a160fab5bac1fc32c1 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/tf_decorator_export.py @@ -0,0 +1,26 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Exports functions from tf_decorator.py to avoid cycles.""" + +from tensorflow.python.util import tf_decorator +from tensorflow.python.util import tf_export + + +make_decorator = tf_export.tf_export( + '__internal__.decorator.make_decorator', v1=[] +)(tf_decorator.make_decorator) +unwrap = tf_export.tf_export('__internal__.decorator.unwrap', v1=[])( + tf_decorator.unwrap +) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/tf_export.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/tf_export.py new file mode 100644 index 0000000000000000000000000000000000000000..1408eb264a4b0616849861d75c97327ef126525a --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/tf_export.py @@ -0,0 +1,398 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for exporting TensorFlow symbols to the API. + +Exporting a function or a class: + +To export a function or a class use tf_export decorator. For e.g.: +```python +@tf_export('foo', 'bar.foo') +def foo(...): + ... +``` + +If a function is assigned to a variable, you can export it by calling +tf_export explicitly. For e.g.: +```python +foo = get_foo(...) +tf_export('foo', 'bar.foo')(foo) +``` + + +Exporting a constant +```python +foo = 1 +tf_export('consts.foo').export_constant(__name__, 'foo') +``` +""" +from collections.abc import Sequence +import functools +import sys +from typing import Any, NamedTuple, Optional, Protocol, TypeVar + +from tensorflow.python.util import tf_decorator +from tensorflow.python.util import tf_inspect + +KERAS_API_NAME = 'keras' +TENSORFLOW_API_NAME = 'tensorflow' + +# List of subpackage names used by TensorFlow components. Have to check that +# TensorFlow core repo does not export any symbols under these names. +SUBPACKAGE_NAMESPACES = [] + + +class _Attributes(NamedTuple): + names: str + constants: str + + +# Attribute values must be unique to each API. +API_ATTRS = { + TENSORFLOW_API_NAME: _Attributes('_tf_api_names', '_tf_api_constants'), + KERAS_API_NAME: _Attributes('_keras_api_names', '_keras_api_constants'), +} + +API_ATTRS_V1 = { + TENSORFLOW_API_NAME: _Attributes( + '_tf_api_names_v1', '_tf_api_constants_v1' + ), + KERAS_API_NAME: _Attributes( + '_keras_api_names_v1', '_keras_api_constants_v1' + ), +} + + +class InvalidSymbolNameError(Exception): + """Raised when trying to export symbol as an invalid or unallowed name.""" + + +_NAME_TO_SYMBOL_MAPPING: dict[str, Any] = dict() + + +def get_symbol_from_name(name: str) -> Optional[Any]: + return _NAME_TO_SYMBOL_MAPPING.get(name) + + +def get_canonical_name_for_symbol( + symbol: Any, + api_name: str = TENSORFLOW_API_NAME, + add_prefix_to_v1_names: bool = False, +) -> Optional[str]: + """Get canonical name for the API symbol. + + Example: + ```python + from tensorflow.python.util import tf_export + cls = tf_export.get_symbol_from_name('keras.optimizers.Adam') + + # Gives `` + print(cls) + + # Gives `keras.optimizers.Adam` + print(tf_export.get_canonical_name_for_symbol(cls, api_name='keras')) + ``` + + Args: + symbol: API function or class. + api_name: API name. Currently, only `tensorflow`. + add_prefix_to_v1_names: Specifies whether a name available only in V1 should + be prefixed with compat.v1. + + Returns: + Canonical name for the API symbol (for e.g. initializers.zeros) if + canonical name could be determined. Otherwise, returns None. + """ + if not hasattr(symbol, '__dict__'): + return None + api_names_attr = API_ATTRS[api_name].names + _, undecorated_symbol = tf_decorator.unwrap(symbol) + if api_names_attr not in undecorated_symbol.__dict__: + return None + api_names = getattr(undecorated_symbol, api_names_attr) + deprecated_api_names = undecorated_symbol.__dict__.get( + '_tf_deprecated_api_names', [] + ) + + canonical_name = get_canonical_name(api_names, deprecated_api_names) + if canonical_name: + return canonical_name + + # If there is no V2 canonical name, get V1 canonical name. + api_names_attr = API_ATTRS_V1[api_name].names + api_names = getattr(undecorated_symbol, api_names_attr) + v1_canonical_name = get_canonical_name(api_names, deprecated_api_names) + if add_prefix_to_v1_names: + return 'compat.v1.%s' % v1_canonical_name + return v1_canonical_name + + +def get_canonical_name( + api_names: Sequence[str], deprecated_api_names: Sequence[str] +) -> Optional[str]: + """Get preferred endpoint name. + + Args: + api_names: API names iterable. + deprecated_api_names: Deprecated API names iterable. + + Returns: + Returns one of the following in decreasing preference: + - first non-deprecated endpoint + - first endpoint + - None + """ + non_deprecated_name = next( + (name for name in api_names if name not in deprecated_api_names), None + ) + if non_deprecated_name: + return non_deprecated_name + if api_names: + return api_names[0] + return None + + +def get_v1_names(symbol: Any) -> Sequence[str]: + """Get a list of TF 1.* names for this symbol. + + Args: + symbol: symbol to get API names for. + + Returns: + List of all API names for this symbol. + """ + names_v1 = [] + tensorflow_api_attr_v1 = API_ATTRS_V1[TENSORFLOW_API_NAME].names + keras_api_attr_v1 = API_ATTRS_V1[KERAS_API_NAME].names + + if not hasattr(symbol, '__dict__'): + return names_v1 + if tensorflow_api_attr_v1 in symbol.__dict__: + names_v1.extend(getattr(symbol, tensorflow_api_attr_v1)) + if keras_api_attr_v1 in symbol.__dict__: + names_v1.extend(getattr(symbol, keras_api_attr_v1)) + return names_v1 + + +def get_v2_names(symbol: Any) -> Sequence[str]: + """Get a list of TF 2.0 names for this symbol. + + Args: + symbol: symbol to get API names for. + + Returns: + List of all API names for this symbol. + """ + names_v2 = [] + tensorflow_api_attr = API_ATTRS[TENSORFLOW_API_NAME].names + keras_api_attr = API_ATTRS[KERAS_API_NAME].names + + if not hasattr(symbol, '__dict__'): + return names_v2 + if tensorflow_api_attr in symbol.__dict__: + names_v2.extend(getattr(symbol, tensorflow_api_attr)) + if keras_api_attr in symbol.__dict__: + names_v2.extend(getattr(symbol, keras_api_attr)) + return names_v2 + + +def get_v1_constants(module: Any) -> Sequence[str]: + """Get a list of TF 1.* constants in this module. + + Args: + module: TensorFlow module. + + Returns: + List of all API constants under the given module. + """ + constants_v1 = [] + tensorflow_constants_attr_v1 = API_ATTRS_V1[TENSORFLOW_API_NAME].constants + + if hasattr(module, tensorflow_constants_attr_v1): + constants_v1.extend(getattr(module, tensorflow_constants_attr_v1)) + return constants_v1 + + +def get_v2_constants(module: Any) -> Sequence[str]: + """Get a list of TF 2.0 constants in this module. + + Args: + module: TensorFlow module. + + Returns: + List of all API constants under the given module. + """ + constants_v2 = [] + tensorflow_constants_attr = API_ATTRS[TENSORFLOW_API_NAME].constants + + if hasattr(module, tensorflow_constants_attr): + constants_v2.extend(getattr(module, tensorflow_constants_attr)) + return constants_v2 + + +T = TypeVar('T') + + +class api_export(object): # pylint: disable=invalid-name + """Provides ways to export symbols to the TensorFlow API.""" + + _names: Sequence[str] + _names_v1: Sequence[str] + _api_name: str + + def __init__( + self, + *args: str, + api_name: str = TENSORFLOW_API_NAME, + v1: Optional[Sequence[str]] = None, + allow_multiple_exports: bool = True, # pylint: disable=unused-argument + ): + """Export under the names *args (first one is considered canonical). + + Args: + *args: API names in dot delimited format. + api_name: API you want to generate Currently, only `tensorflow`. + v1: Names for the TensorFlow V1 API. If not set, we will use V2 API names + both for TensorFlow V1 and V2 APIs. + allow_multiple_exports: Deprecated. + """ + self._names = args + self._names_v1 = v1 if v1 is not None else args + self._api_name = api_name + + self._validate_symbol_names() + + def _validate_symbol_names(self) -> None: + """Validate you are exporting symbols under an allowed package. + + We need to ensure things exported by tf_export, etc. + export symbols under disjoint top-level package names. + + For TensorFlow, we check that it does not export anything under subpackage + names used by components (keras, etc.). + + For each component, we check that it exports everything under its own + subpackage. + + Raises: + InvalidSymbolNameError: If you try to export symbol under disallowed name. + """ + all_symbol_names = set(self._names) | set(self._names_v1) + if self._api_name == TENSORFLOW_API_NAME: + for subpackage in SUBPACKAGE_NAMESPACES: + if any(n.startswith(subpackage) for n in all_symbol_names): + raise InvalidSymbolNameError( + '@tf_export is not allowed to export symbols under %s.*' + % (subpackage) + ) + else: + if not all(n.startswith(self._api_name) for n in all_symbol_names): + raise InvalidSymbolNameError( + 'Can only export symbols under package name of component.' + ) + + def __call__(self, func: T) -> T: + """Calls this decorator. + + Args: + func: decorated symbol (function or class). + + Returns: + The input function with _tf_api_names attribute set. + """ + api_names_attr = API_ATTRS[self._api_name].names + api_names_attr_v1 = API_ATTRS_V1[self._api_name].names + + _, undecorated_func = tf_decorator.unwrap(func) + self.set_attr(undecorated_func, api_names_attr, self._names) + self.set_attr(undecorated_func, api_names_attr_v1, self._names_v1) + + for name in self._names: + _NAME_TO_SYMBOL_MAPPING[name] = func + for name_v1 in self._names_v1: + _NAME_TO_SYMBOL_MAPPING['compat.v1.%s' % name_v1] = func + + return func + + def set_attr( + self, func: Any, api_names_attr: str, names: Sequence[str] + ) -> None: + setattr(func, api_names_attr, names) + + def export_constant(self, module_name: str, name: str) -> None: + """Store export information for constants/string literals. + + Export information is stored in the module where constants/string literals + are defined. + + e.g. + ```python + foo = 1 + bar = 2 + tf_export("consts.foo").export_constant(__name__, 'foo') + tf_export("consts.bar").export_constant(__name__, 'bar') + ``` + + Args: + module_name: (string) Name of the module to store constant at. + name: (string) Current constant name. + """ + module = sys.modules[module_name] + api_constants_attr = API_ATTRS[self._api_name].constants + api_constants_attr_v1 = API_ATTRS_V1[self._api_name].constants + + if not hasattr(module, api_constants_attr): + setattr(module, api_constants_attr, []) + # pylint: disable=protected-access + getattr(module, api_constants_attr).append((self._names, name)) + + if not hasattr(module, api_constants_attr_v1): + setattr(module, api_constants_attr_v1, []) + getattr(module, api_constants_attr_v1).append((self._names_v1, name)) + + +def kwarg_only(f: Any) -> Any: + """A wrapper that throws away all non-kwarg arguments.""" + f_argspec = tf_inspect.getfullargspec(f) + + def wrapper(*args, **kwargs): + if args: + raise TypeError( + '{f} only takes keyword args (possible keys: {kwargs}). ' + 'Please pass these args as kwargs instead.'.format( + f=f.__name__, kwargs=f_argspec.args + ) + ) + return f(**kwargs) + + return tf_decorator.make_decorator(f, wrapper, decorator_argspec=f_argspec) + + +class ExportType(Protocol): + + def __call__( + self, + *v2: str, + v1: Optional[Sequence[str]] = None, + allow_multiple_exports: bool = True, # Deprecated, no-op + ) -> api_export: + ... + + +tf_export: ExportType = functools.partial( + api_export, api_name=TENSORFLOW_API_NAME +) +keras_export: ExportType = functools.partial( + api_export, api_name=KERAS_API_NAME +) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/tf_inspect.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/tf_inspect.py new file mode 100644 index 0000000000000000000000000000000000000000..a716f354ad415fdae8932991d8c318334b7526a8 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/tf_inspect.py @@ -0,0 +1,470 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""TFDecorator-aware replacements for the inspect module.""" +import collections +import functools +import inspect as _inspect + +from tensorflow.python.util import tf_decorator + + +# inspect.signature() is preferred over inspect.getfullargspec() in PY3. +# Note that while it can handle TFDecorators, it will ignore a TFDecorator's +# provided ArgSpec/FullArgSpec and instead return the signature of the +# inner-most function. +def signature(obj, *, follow_wrapped=True): + """TFDecorator-aware replacement for inspect.signature.""" + return _inspect.signature( + tf_decorator.unwrap(obj)[1], follow_wrapped=follow_wrapped) + + +Parameter = _inspect.Parameter +Signature = _inspect.Signature + +if hasattr(_inspect, 'ArgSpec'): + ArgSpec = _inspect.ArgSpec +else: + ArgSpec = collections.namedtuple( + 'ArgSpec', + [ + 'args', + 'varargs', + 'keywords', + 'defaults', + ], + ) + + +if hasattr(_inspect, 'FullArgSpec'): + FullArgSpec = _inspect.FullArgSpec # pylint: disable=invalid-name +else: + FullArgSpec = collections.namedtuple('FullArgSpec', [ + 'args', 'varargs', 'varkw', 'defaults', 'kwonlyargs', 'kwonlydefaults', + 'annotations' + ]) + + +def _convert_maybe_argspec_to_fullargspec(argspec): + if isinstance(argspec, FullArgSpec): + return argspec + return FullArgSpec( + args=argspec.args, + varargs=argspec.varargs, + varkw=argspec.keywords, + defaults=argspec.defaults, + kwonlyargs=[], + kwonlydefaults=None, + annotations={}) + +if hasattr(_inspect, 'getfullargspec'): + _getfullargspec = _inspect.getfullargspec # pylint: disable=invalid-name + + def _getargspec(target): + """A python3 version of getargspec. + + Calls `getfullargspec` and assigns args, varargs, + varkw, and defaults to a python 2/3 compatible `ArgSpec`. + + The parameter name 'varkw' is changed to 'keywords' to fit the + `ArgSpec` struct. + + Args: + target: the target object to inspect. + + Returns: + An ArgSpec with args, varargs, keywords, and defaults parameters + from FullArgSpec. + """ + fullargspecs = getfullargspec(target) + + defaults = fullargspecs.defaults or () + if fullargspecs.kwonlydefaults: + defaults += tuple(fullargspecs.kwonlydefaults.values()) + + if not defaults: + defaults = None + + argspecs = ArgSpec( + args=fullargspecs.args + fullargspecs.kwonlyargs, + varargs=fullargspecs.varargs, + keywords=fullargspecs.varkw, + defaults=defaults, + ) + return argspecs +else: + _getargspec = _inspect.getargspec + + def _getfullargspec(target): + """A python2 version of getfullargspec. + + Args: + target: the target object to inspect. + + Returns: + A FullArgSpec with empty kwonlyargs, kwonlydefaults and annotations. + """ + return _convert_maybe_argspec_to_fullargspec(getargspec(target)) + + +def currentframe(): + """TFDecorator-aware replacement for inspect.currentframe.""" + return _inspect.stack()[1][0] + + +def getargspec(obj): + """TFDecorator-aware replacement for `inspect.getargspec`. + + Note: `getfullargspec` is recommended as the python 2/3 compatible + replacement for this function. + + Args: + obj: A function, partial function, or callable object, possibly decorated. + + Returns: + The `ArgSpec` that describes the signature of the outermost decorator that + changes the callable's signature, or the `ArgSpec` that describes + the object if not decorated. + + Raises: + ValueError: When callable's signature can not be expressed with + ArgSpec. + TypeError: For objects of unsupported types. + """ + if isinstance(obj, functools.partial): + return _get_argspec_for_partial(obj) + + decorators, target = tf_decorator.unwrap(obj) + + spec = next((d.decorator_argspec + for d in decorators + if d.decorator_argspec is not None), None) + if spec: + return spec + + try: + # Python3 will handle most callables here (not partial). + return _getargspec(target) + except TypeError: + pass + + if isinstance(target, type): + try: + return _getargspec(target.__init__) + except TypeError: + pass + + try: + return _getargspec(target.__new__) + except TypeError: + pass + + # The `type(target)` ensures that if a class is received we don't return + # the signature of its __call__ method. + return _getargspec(type(target).__call__) + + +def _get_argspec_for_partial(obj): + """Implements `getargspec` for `functools.partial` objects. + + Args: + obj: The `functools.partial` object + Returns: + An `inspect.ArgSpec` + Raises: + ValueError: When callable's signature can not be expressed with + ArgSpec. + """ + # When callable is a functools.partial object, we construct its ArgSpec with + # following strategy: + # - If callable partial contains default value for positional arguments (ie. + # object.args), then final ArgSpec doesn't contain those positional arguments. + # - If callable partial contains default value for keyword arguments (ie. + # object.keywords), then we merge them with wrapped target. Default values + # from callable partial takes precedence over those from wrapped target. + # + # However, there is a case where it is impossible to construct a valid + # ArgSpec. Python requires arguments that have no default values must be + # defined before those with default values. ArgSpec structure is only valid + # when this presumption holds true because default values are expressed as a + # tuple of values without keywords and they are always assumed to belong to + # last K arguments where K is number of default values present. + # + # Since functools.partial can give default value to any argument, this + # presumption may no longer hold in some cases. For example: + # + # def func(m, n): + # return 2 * m + n + # partialed = functools.partial(func, m=1) + # + # This example will result in m having a default value but n doesn't. This is + # usually not allowed in Python and can not be expressed in ArgSpec correctly. + # + # Thus, we must detect cases like this by finding first argument with default + # value and ensures all following arguments also have default values. When + # this is not true, a ValueError is raised. + + n_prune_args = len(obj.args) + partial_keywords = obj.keywords or {} + + args, varargs, keywords, defaults = getargspec(obj.func) + + # Pruning first n_prune_args arguments. + args = args[n_prune_args:] + + # Partial function may give default value to any argument, therefore length + # of default value list must be len(args) to allow each argument to + # potentially be given a default value. + no_default = object() + all_defaults = [no_default] * len(args) + + if defaults: + all_defaults[-len(defaults):] = defaults + + # Fill in default values provided by partial function in all_defaults. + for kw, default in iter(partial_keywords.items()): + if kw in args: + idx = args.index(kw) + all_defaults[idx] = default + elif not keywords: + raise ValueError(f'{obj} does not have a **kwargs parameter, but ' + f'contains an unknown partial keyword {kw}.') + + # Find first argument with default value set. + first_default = next( + (idx for idx, x in enumerate(all_defaults) if x is not no_default), None) + + # If no default values are found, return ArgSpec with defaults=None. + if first_default is None: + return ArgSpec(args, varargs, keywords, None) + + # Checks if all arguments have default value set after first one. + invalid_default_values = [ + args[i] for i, j in enumerate(all_defaults) + if j is no_default and i > first_default + ] + + if invalid_default_values: + raise ValueError(f'{obj} has some keyword-only arguments, which are not' + f' supported: {invalid_default_values}.') + + return ArgSpec(args, varargs, keywords, tuple(all_defaults[first_default:])) + + +def getfullargspec(obj): + """TFDecorator-aware replacement for `inspect.getfullargspec`. + + This wrapper emulates `inspect.getfullargspec` in[^)]* Python2. + + Args: + obj: A callable, possibly decorated. + + Returns: + The `FullArgSpec` that describes the signature of + the outermost decorator that changes the callable's signature. If the + callable is not decorated, `inspect.getfullargspec()` will be called + directly on the callable. + """ + decorators, target = tf_decorator.unwrap(obj) + + for d in decorators: + if d.decorator_argspec is not None: + return _convert_maybe_argspec_to_fullargspec(d.decorator_argspec) + return _getfullargspec(target) + + +def getcallargs(*func_and_positional, **named): + """TFDecorator-aware replacement for inspect.getcallargs. + + Args: + *func_and_positional: A callable, possibly decorated, followed by any + positional arguments that would be passed to `func`. + **named: The named argument dictionary that would be passed to `func`. + + Returns: + A dictionary mapping `func`'s named arguments to the values they would + receive if `func(*positional, **named)` were called. + + `getcallargs` will use the argspec from the outermost decorator that provides + it. If no attached decorators modify argspec, the final unwrapped target's + argspec will be used. + """ + func = func_and_positional[0] + positional = func_and_positional[1:] + argspec = getfullargspec(func) + call_args = named.copy() + this = getattr(func, 'im_self', None) or getattr(func, '__self__', None) + if ismethod(func) and this: + positional = (this,) + positional + remaining_positionals = [arg for arg in argspec.args if arg not in call_args] + call_args.update(dict(zip(remaining_positionals, positional))) + default_count = 0 if not argspec.defaults else len(argspec.defaults) + if default_count: + for arg, value in zip(argspec.args[-default_count:], argspec.defaults): + if arg not in call_args: + call_args[arg] = value + if argspec.kwonlydefaults is not None: + for k, v in argspec.kwonlydefaults.items(): + if k not in call_args: + call_args[k] = v + return call_args + + +def getframeinfo(*args, **kwargs): + return _inspect.getframeinfo(*args, **kwargs) + + +def getdoc(object): # pylint: disable=redefined-builtin + """TFDecorator-aware replacement for inspect.getdoc. + + Args: + object: An object, possibly decorated. + + Returns: + The docstring associated with the object. + + The outermost-decorated object is intended to have the most complete + documentation, so the decorated parameter is not unwrapped. + """ + return _inspect.getdoc(object) + + +def getfile(object): # pylint: disable=redefined-builtin + """TFDecorator-aware replacement for inspect.getfile.""" + unwrapped_object = tf_decorator.unwrap(object)[1] + + # Work around for the case when object is a stack frame + # and only .pyc files are used. In this case, getfile + # might return incorrect path. So, we get the path from f_globals + # instead. + if (hasattr(unwrapped_object, 'f_globals') and + '__file__' in unwrapped_object.f_globals): + return unwrapped_object.f_globals['__file__'] + return _inspect.getfile(unwrapped_object) + + +def getmembers(object, predicate=None): # pylint: disable=redefined-builtin + """TFDecorator-aware replacement for inspect.getmembers.""" + return _inspect.getmembers(object, predicate) + + +def getmodule(object): # pylint: disable=redefined-builtin + """TFDecorator-aware replacement for inspect.getmodule.""" + return _inspect.getmodule(object) + + +def getmro(cls): + """TFDecorator-aware replacement for inspect.getmro.""" + return _inspect.getmro(cls) + + +def getsource(object): # pylint: disable=redefined-builtin + """TFDecorator-aware replacement for inspect.getsource.""" + return _inspect.getsource(tf_decorator.unwrap(object)[1]) + + +def getsourcefile(object): # pylint: disable=redefined-builtin + """TFDecorator-aware replacement for inspect.getsourcefile.""" + return _inspect.getsourcefile(tf_decorator.unwrap(object)[1]) + + +def getsourcelines(object): # pylint: disable=redefined-builtin + """TFDecorator-aware replacement for inspect.getsourcelines.""" + return _inspect.getsourcelines(tf_decorator.unwrap(object)[1]) + + +def isbuiltin(object): # pylint: disable=redefined-builtin + """TFDecorator-aware replacement for inspect.isbuiltin.""" + return _inspect.isbuiltin(tf_decorator.unwrap(object)[1]) + + +def isclass(object): # pylint: disable=redefined-builtin + """TFDecorator-aware replacement for inspect.isclass.""" + return _inspect.isclass(tf_decorator.unwrap(object)[1]) + + +def isfunction(object): # pylint: disable=redefined-builtin + """TFDecorator-aware replacement for inspect.isfunction.""" + return _inspect.isfunction(tf_decorator.unwrap(object)[1]) + + +def isframe(object): # pylint: disable=redefined-builtin + """TFDecorator-aware replacement for inspect.ismodule.""" + return _inspect.isframe(tf_decorator.unwrap(object)[1]) + + +def isgenerator(object): # pylint: disable=redefined-builtin + """TFDecorator-aware replacement for inspect.isgenerator.""" + return _inspect.isgenerator(tf_decorator.unwrap(object)[1]) + + +def isgeneratorfunction(object): # pylint: disable=redefined-builtin + """TFDecorator-aware replacement for inspect.isgeneratorfunction.""" + return _inspect.isgeneratorfunction(tf_decorator.unwrap(object)[1]) + + +def ismethod(object): # pylint: disable=redefined-builtin + """TFDecorator-aware replacement for inspect.ismethod.""" + return _inspect.ismethod(tf_decorator.unwrap(object)[1]) + + +def isanytargetmethod(object): # pylint: disable=redefined-builtin + # pylint: disable=g-doc-args,g-doc-return-or-yield + """Checks if `object` or a TF Decorator wrapped target contains self or cls. + + This function could be used along with `tf_inspect.getfullargspec` to + determine if the first argument of `object` argspec is self or cls. If the + first argument is self or cls, it needs to be excluded from argspec when we + compare the argspec to the input arguments and, if provided, the tf.function + input_signature. + + Like `tf_inspect.getfullargspec` and python `inspect.getfullargspec`, it + does not unwrap python decorators. + + Args: + obj: An method, function, or functool.partial, possibly decorated by + TFDecorator. + + Returns: + A bool indicates if `object` or any target along the chain of TF decorators + is a method. + """ + decorators, target = tf_decorator.unwrap(object) + for decorator in decorators: + if _inspect.ismethod(decorator.decorated_target): + return True + + # TODO(b/194845243): Implement the long term solution with inspect.signature. + # A functools.partial object is not a function or method. But if the wrapped + # func is a method, the argspec will contain self/cls. + while isinstance(target, functools.partial): + target = target.func + + # `target` is a method or an instance with __call__ + return callable(target) and not _inspect.isfunction(target) + + +def ismodule(object): # pylint: disable=redefined-builtin + """TFDecorator-aware replacement for inspect.ismodule.""" + return _inspect.ismodule(tf_decorator.unwrap(object)[1]) + + +def isroutine(object): # pylint: disable=redefined-builtin + """TFDecorator-aware replacement for inspect.isroutine.""" + return _inspect.isroutine(tf_decorator.unwrap(object)[1]) + + +def stack(context=1): + """TFDecorator-aware replacement for inspect.stack.""" + return _inspect.stack(context)[1:] diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/tf_should_use.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/tf_should_use.py new file mode 100644 index 0000000000000000000000000000000000000000..8f45edd6874ab092799bb10bd1d683d2497ba316 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/tf_should_use.py @@ -0,0 +1,311 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Decorator that provides a warning if the wrapped object is never used.""" +import copy +import sys +import textwrap +import traceback +import types + +from tensorflow.python.eager import context +from tensorflow.python.framework import ops +from tensorflow.python.platform import tf_logging +from tensorflow.python.util import tf_decorator + + +class _TFShouldUseHelper(object): + """Object stored in TFShouldUse-wrapped objects. + + When it is deleted it will emit a warning or error if its `sate` method + has not been called by time of deletion, and Tensorflow is not executing + eagerly or inside a tf.function (which use autodeps and resolve the + main issues this wrapper warns about). + """ + + def __init__(self, type_, repr_, stack_frame, error_in_function, + warn_in_eager): + self._type = type_ + self._repr = repr_ + self._stack_frame = stack_frame + self._error_in_function = error_in_function + if context.executing_eagerly(): + # If warn_in_eager, sated == False. Otherwise true. + self._sated = not warn_in_eager + elif ops.inside_function(): + if error_in_function: + self._sated = False + ops.add_exit_callback_to_default_func_graph( + lambda: self._check_sated(raise_error=True)) + else: + self._sated = True + else: + # TF1 graph building mode + self._sated = False + + def sate(self): + self._sated = True + self._type = None + self._repr = None + self._stack_frame = None + self._logging_module = None + + def _check_sated(self, raise_error): + """Check if the object has been sated.""" + if self._sated: + return + creation_stack = ''.join( + [line.rstrip() + for line in traceback.format_stack(self._stack_frame, limit=5)]) + if raise_error: + try: + raise RuntimeError( + 'Object was never used (type {}): {}. If you want to mark it as ' + 'used call its "mark_used()" method. It was originally created ' + 'here:\n{}'.format(self._type, self._repr, creation_stack)) + finally: + self.sate() + else: + tf_logging.error( + '==================================\n' + 'Object was never used (type {}):\n{}\nIf you want to mark it as ' + 'used call its "mark_used()" method.\nIt was originally created ' + 'here:\n{}\n' + '==================================' + .format(self._type, self._repr, creation_stack)) + + def __del__(self): + self._check_sated(raise_error=False) + + +def _new__init__(self, wrapped_value, tf_should_use_helper): + # pylint: disable=protected-access + self._tf_should_use_helper = tf_should_use_helper + self._tf_should_use_wrapped_value = wrapped_value + + +def _new__setattr__(self, key, value): + if key in ('_tf_should_use_helper', '_tf_should_use_wrapped_value'): + return object.__setattr__(self, key, value) + return setattr( + object.__getattribute__(self, '_tf_should_use_wrapped_value'), + key, value) + + +def _new__getattribute__(self, key): + if key not in ('_tf_should_use_helper', '_tf_should_use_wrapped_value'): + object.__getattribute__(self, '_tf_should_use_helper').sate() + if key in ( + '_tf_should_use_wrapped_value', + '_tf_should_use_helper', + 'mark_used', + '__setattr__', + ): + return object.__getattribute__(self, key) + return getattr( + object.__getattribute__(self, '_tf_should_use_wrapped_value'), key) + + +def _new_mark_used(self, *args, **kwargs): + object.__getattribute__(self, '_tf_should_use_helper').sate() + try: + mu = object.__getattribute__( + object.__getattribute__(self, '_tf_should_use_wrapped_value'), + 'mark_used') + return mu(*args, **kwargs) + except AttributeError: + pass + +OVERLOADABLE_OPERATORS = { + '__add__', + '__radd__', + '__sub__', + '__rsub__', + '__mul__', + '__rmul__', + '__div__', + '__rdiv__', + '__truediv__', + '__rtruediv__', + '__floordiv__', + '__rfloordiv__', + '__mod__', + '__rmod__', + '__lt__', + '__le__', + '__gt__', + '__ge__', + '__ne__', + '__eq__', + '__and__', + '__rand__', + '__or__', + '__ror__', + '__xor__', + '__rxor__', + '__getitem__', + '__pow__', + '__rpow__', + '__invert__', + '__neg__', + '__abs__', + '__matmul__', + '__rmatmul__', +} + + +_WRAPPERS = {} + + +class ShouldUseWrapper(object): + pass + + +def _get_wrapper(x, tf_should_use_helper): + """Create a wrapper for object x, whose class subclasses type(x). + + The wrapper will emit a warning if it is deleted without any of its + properties being accessed or methods being called. + + Args: + x: The instance to wrap. + tf_should_use_helper: The object that tracks usage. + + Returns: + An object wrapping `x`, of type `type(x)`. + """ + type_x = type(x) + memoized = _WRAPPERS.get(type_x, None) + if memoized: + return memoized(x, tf_should_use_helper) + + # Make a copy of `object` + tx = copy.deepcopy(ShouldUseWrapper) + # Prefer using __orig_bases__, which preserve generic type arguments. + bases = getattr(tx, '__orig_bases__', tx.__bases__) + + def set_body(ns): + ns.update(tx.__dict__) + return ns + + copy_tx = types.new_class(tx.__name__, bases, exec_body=set_body) + copy_tx.__init__ = _new__init__ + copy_tx.__getattribute__ = _new__getattribute__ + for op in OVERLOADABLE_OPERATORS: + if hasattr(type_x, op): + setattr(copy_tx, op, getattr(type_x, op)) + + copy_tx.mark_used = _new_mark_used + copy_tx.__setattr__ = _new__setattr__ + _WRAPPERS[type_x] = copy_tx + + return copy_tx(x, tf_should_use_helper) + + +def _add_should_use_warning(x, error_in_function=False, warn_in_eager=False): + """Wraps object x so that if it is never used, a warning is logged. + + Args: + x: Python object. + error_in_function: Python bool. If `True`, a `RuntimeError` is raised + if the returned value is never used when created during `tf.function` + tracing. + warn_in_eager: Python bool. If `True` raise warning if in Eager mode as well + as graph mode. + + Returns: + An instance of `TFShouldUseWarningWrapper` which subclasses `type(x)` + and is a very shallow wrapper for `x` which logs access into `x`. + """ + if x is None or (isinstance(x, list) and not x): + return x + + if context.executing_eagerly() and not warn_in_eager: + return x + + if ops.inside_function() and not error_in_function: + # We don't currently log warnings in tf.function calls, so just skip it. + return x + + # Extract the current frame for later use by traceback printing. + try: + raise ValueError() + except ValueError: + stack_frame = sys.exc_info()[2].tb_frame.f_back + + tf_should_use_helper = _TFShouldUseHelper( + type_=type(x), + repr_=repr(x), + stack_frame=stack_frame, + error_in_function=error_in_function, + warn_in_eager=warn_in_eager) + + return _get_wrapper(x, tf_should_use_helper) + + +def should_use_result(fn=None, warn_in_eager=False, error_in_function=False): + """Function wrapper that ensures the function's output is used. + + If the output is not used, a `logging.error` is logged. If + `error_in_function` is set, then a `RuntimeError` will be raised at the + end of function tracing if the output is not used by that point. + + An output is marked as used if any of its attributes are read, modified, or + updated. Examples when the output is a `Tensor` include: + + - Using it in any capacity (e.g. `y = t + 0`, `sess.run(t)`) + - Accessing a property (e.g. getting `t.name` or `t.op`). + - Calling `t.mark_used()`. + + Note, certain behaviors cannot be tracked - for these the object may not + be marked as used. Examples include: + + - `t != 0`. In this case, comparison is done on types / ids. + - `isinstance(t, tf.Tensor)`. Similar to above. + + Args: + fn: The function to wrap. + warn_in_eager: Whether to create warnings in Eager as well. + error_in_function: Whether to raise an error when creating a tf.function. + + Returns: + The wrapped function. + """ + def decorated(fn): + """Decorates the input function.""" + def wrapped(*args, **kwargs): + return _add_should_use_warning(fn(*args, **kwargs), + warn_in_eager=warn_in_eager, + error_in_function=error_in_function) + fn_doc = fn.__doc__ or '' + split_doc = fn_doc.split('\n', 1) + if len(split_doc) == 1: + updated_doc = fn_doc + else: + brief, rest = split_doc + updated_doc = '\n'.join([brief, textwrap.dedent(rest)]) + + note = ('\n\nNote: The output of this function should be used. If it is ' + 'not, a warning will be logged or an error may be raised. ' + 'To mark the output as used, call its .mark_used() method.') + return tf_decorator.make_decorator( + target=fn, + decorator_func=wrapped, + decorator_name='should_use_result', + decorator_doc=updated_doc + note) + + if fn is not None: + return decorated(fn) + else: + return decorated diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/tf_stack.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/tf_stack.py new file mode 100644 index 0000000000000000000000000000000000000000..ae68e39e265ebf086c4b036c11e9c78d59a72e5c --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/tf_stack.py @@ -0,0 +1,187 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functions used to extract and analyze stacks. Faster than Python libs.""" +# pylint: disable=g-bad-name +import collections +import inspect +import threading + +from tensorflow.core.framework import graph_debug_info_pb2 +from tensorflow.python.util import _tf_stack + +# Generally such lookups should be done using `threading.local()`. See +# https://blogs.gnome.org/jamesh/2008/06/11/tls-python/ for a detailed +# explanation of why. However the transform stacks are expected to be empty +# when a thread is joined, so reusing the key does not introduce a correctness +# issue. Moreover, get_ident is faster than storing and retrieving a unique +# key in a thread local store. +_get_thread_key = threading.get_ident + + +# TODO(mdan): Move these to C++ as well. +# Moving to C++ can further avoid extra copies made by get_effective_map. +_source_mapper_stacks = collections.defaultdict(lambda: [SentinelMapper()]) +_source_filter_stacks = collections.defaultdict(lambda: [SentinelFilter()]) + + +class StackTraceTransform(object): + """Base class for stack trace transformation functions.""" + + _stack_dict = None # Subclasses should override + _thread_key = None + + def __enter__(self): + # Any given instance is assumed to be used by a single thread, which reduces + # expensive thread local lookups. + if self._thread_key is None: + self._thread_key = _get_thread_key() + else: + assert self._thread_key == _get_thread_key(), 'Shared across threads?' + + stack = self._stack_dict[self._thread_key] + self.parent = stack[-1] + stack.append(self) + self.update() + return self + + def __exit__(self, unused_type, unused_value, unused_traceback): + top = self._stack_dict[self._thread_key].pop() + assert top is self, 'Concurrent access?' + + def update(self): + raise NotImplementedError('subclasses need to override this') + + +class StackTraceMapper(StackTraceTransform): + """Allows remapping traceback information to different source code.""" + _stack_dict = _source_mapper_stacks + + def __init__(self): + self.internal_map = _tf_stack.PyBindSourceMap() + + def update(self): + self.internal_map.update_to(tuple(self.get_effective_source_map().items())) + + def get_effective_source_map(self): + """Returns a map (filename, lineno) -> (filename, lineno, function_name).""" + raise NotImplementedError('subclasses need to override this') + + +EMPTY_DICT = {} + + +class SentinelMapper(StackTraceMapper): + + def get_effective_source_map(self): + return EMPTY_DICT + + +class StackTraceFilter(StackTraceTransform): + """Allows filtering traceback information by removing superfluous frames.""" + _stack_dict = _source_filter_stacks + + def __init__(self): + self.internal_set = _tf_stack.PyBindFileSet() + + def update(self): + self.internal_set.update_to(set(self.get_filtered_filenames())) + + def get_filtered_filenames(self): + raise NotImplementedError('subclasses need to override this') + + +EMPTY_SET = frozenset() + + +class SentinelFilter(StackTraceFilter): + + def get_filtered_filenames(self): + return EMPTY_SET + + +class CurrentModuleFilter(StackTraceFilter): + """Filters stack frames from the module where this is used (best effort).""" + + def __init__(self): + super().__init__() + filter_filename = None + outer_f = None + f = inspect.currentframe() + try: + if f is not None: + # The current frame is __init__. The first outer frame should be the + # caller. + outer_f = f.f_back + if outer_f is not None: + filter_filename = inspect.getsourcefile(outer_f) + self._filename = filter_filename + # This may be called repeatedly: once on entry by the superclass, then by + # each child context manager. + self._cached_set = None + finally: + # Avoid reference cycles, see: + # https://docs.python.org/3.7/library/inspect.html#the-interpreter-stack + del f + del outer_f + + def get_filtered_filenames(self): + if self._cached_set is not None: + return self._cached_set + + filtered_filenames = frozenset((self._filename,)) + if self.parent is not None: + filtered_filenames |= self.parent.get_filtered_filenames() + self._cached_set = filtered_filenames + return filtered_filenames + + +def extract_stack(stacklevel=1): + """An eager-friendly alternative to traceback.extract_stack. + + Args: + stacklevel: number of initial frames to skip when producing the stack. + + Returns: + A list-like FrameSummary containing StackFrame-like objects, which are + namedtuple-like objects with the following fields: filename, lineno, name, + line, meant to masquerade as traceback.FrameSummary objects. + """ + thread_key = _get_thread_key() + return _tf_stack.extract_stack( + _source_mapper_stacks[thread_key][-1].internal_map, + _source_filter_stacks[thread_key][-1].internal_set, + stacklevel, + ) + + +def LoadTracesFromDebugInfo(debug_info): + return _tf_stack.LoadTracesFromDebugInfo(debug_info.SerializeToString()) + + +class GraphDebugInfoBuilder(_tf_stack.GraphDebugInfoBuilder): + + def AppendGraphDebugInfo(self, fn_name, fn_debug_info): + debug_info_str = fn_debug_info.SerializeToString() + super().AppendGraphDebugInfo(fn_name, debug_info_str) + + def Build(self): + debug_info_str = super().Build() + debug_info = graph_debug_info_pb2.GraphDebugInfo() + debug_info.ParseFromString(debug_info_str) + return debug_info + + +StackSummary = _tf_stack.StackTrace +FrameSummary = _tf_stack.StackFrame diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/traceback_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/traceback_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8bff91ea4c3f90bff11354e50799719325e65555 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/traceback_utils.py @@ -0,0 +1,157 @@ +# Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities related to TensorFlow exception stack trace prettifying.""" + +import os +import sys +import threading +import traceback +import types +from tensorflow.python.util import tf_decorator +from tensorflow.python.util.tf_export import tf_export + + +_ENABLE_TRACEBACK_FILTERING = threading.local() +_EXCLUDED_PATHS = ( + os.path.abspath(os.path.join(__file__, '..', '..')), +) + + +@tf_export('debugging.is_traceback_filtering_enabled') +def is_traceback_filtering_enabled(): + """Check whether traceback filtering is currently enabled. + + See also `tf.debugging.enable_traceback_filtering()` and + `tf.debugging.disable_traceback_filtering()`. Note that filtering out + internal frames from the tracebacks of exceptions raised by TensorFlow code + is the default behavior. + + Returns: + True if traceback filtering is enabled + (e.g. if `tf.debugging.enable_traceback_filtering()` was called), + and False otherwise (e.g. if `tf.debugging.disable_traceback_filtering()` + was called). + """ + value = getattr(_ENABLE_TRACEBACK_FILTERING, 'value', True) + return value + + +@tf_export('debugging.enable_traceback_filtering') +def enable_traceback_filtering(): + """Enable filtering out TensorFlow-internal frames in exception stack traces. + + Raw TensorFlow stack traces involve many internal frames, which can be + challenging to read through, while not being actionable for end users. + By default, TensorFlow filters internal frames in most exceptions that it + raises, to keep stack traces short, readable, and focused on what's + actionable for end users (their own code). + + If you have previously disabled traceback filtering via + `tf.debugging.disable_traceback_filtering()`, you can re-enable it via + `tf.debugging.enable_traceback_filtering()`. + + Raises: + RuntimeError: If Python version is not at least 3.7. + """ + if sys.version_info.major != 3 or sys.version_info.minor < 7: + raise RuntimeError( + f'Traceback filtering is only available with Python 3.7 or higher. ' + f'This Python version: {sys.version}') + global _ENABLE_TRACEBACK_FILTERING + _ENABLE_TRACEBACK_FILTERING.value = True + + +@tf_export('debugging.disable_traceback_filtering') +def disable_traceback_filtering(): + """Disable filtering out TensorFlow-internal frames in exception stack traces. + + Raw TensorFlow stack traces involve many internal frames, which can be + challenging to read through, while not being actionable for end users. + By default, TensorFlow filters internal frames in most exceptions that it + raises, to keep stack traces short, readable, and focused on what's + actionable for end users (their own code). + + Calling `tf.debugging.disable_traceback_filtering` disables this filtering + mechanism, meaning that TensorFlow exceptions stack traces will include + all frames, in particular TensorFlow-internal ones. + + **If you are debugging a TensorFlow-internal issue, you need to call + `tf.debugging.disable_traceback_filtering`**. + To re-enable traceback filtering afterwards, you can call + `tf.debugging.enable_traceback_filtering()`. + """ + global _ENABLE_TRACEBACK_FILTERING + _ENABLE_TRACEBACK_FILTERING.value = False + + +def include_frame(fname): + for exclusion in _EXCLUDED_PATHS: + if exclusion in fname: + return False + return True + + +def _process_traceback_frames(tb): + new_tb = None + tb_list = list(traceback.walk_tb(tb)) + for f, line_no in reversed(tb_list): + if include_frame(f.f_code.co_filename): + new_tb = types.TracebackType(new_tb, f, f.f_lasti, line_no) + if new_tb is None and tb_list: + f, line_no = tb_list[-1] + new_tb = types.TracebackType(new_tb, f, f.f_lasti, line_no) + return new_tb + + +def filter_traceback(fn): + """Decorator to filter out TF-internal stack trace frames in exceptions. + + Raw TensorFlow stack traces involve many internal frames, which can be + challenging to read through, while not being actionable for end users. + By default, TensorFlow filters internal frames in most exceptions that it + raises, to keep stack traces short, readable, and focused on what's + actionable for end users (their own code). + + Arguments: + fn: The function or method to decorate. Any exception raised within the + function will be reraised with its internal stack trace frames filtered + out. + + Returns: + Decorated function or method. + """ + if sys.version_info.major != 3 or sys.version_info.minor < 7: + return fn + + def error_handler(*args, **kwargs): + try: + if not is_traceback_filtering_enabled(): + return fn(*args, **kwargs) + except NameError: + # In some very rare cases, + # `is_traceback_filtering_enabled` (from the outer scope) may not be + # accessible from inside this function + return fn(*args, **kwargs) + + filtered_tb = None + try: + return fn(*args, **kwargs) + except Exception as e: + filtered_tb = _process_traceback_frames(e.__traceback__) + raise e.with_traceback(filtered_tb) from None + finally: + del filtered_tb + + return tf_decorator.make_decorator(fn, error_handler) diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/type_annotations.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/type_annotations.py new file mode 100644 index 0000000000000000000000000000000000000000..97196bd16a7103130c90f3841248841d57b8f8f6 --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/type_annotations.py @@ -0,0 +1,59 @@ +# Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for accessing Python generic type annotations (typing.*).""" + +import collections.abc +import typing + + +def is_generic_union(tp): + """Returns true if `tp` is a parameterized typing.Union value.""" + return (tp is not typing.Union and + getattr(tp, '__origin__', None) is typing.Union) + + +def is_generic_tuple(tp): + """Returns true if `tp` is a parameterized typing.Tuple value.""" + return (tp not in (tuple, typing.Tuple) and + getattr(tp, '__origin__', None) in (tuple, typing.Tuple)) + + +def is_generic_list(tp): + """Returns true if `tp` is a parameterized typing.List value.""" + return (tp not in (list, typing.List) and + getattr(tp, '__origin__', None) in (list, typing.List)) + + +def is_generic_mapping(tp): + """Returns true if `tp` is a parameterized typing.Mapping value.""" + return (tp not in (collections.abc.Mapping, typing.Mapping) and getattr( + tp, '__origin__', None) in (collections.abc.Mapping, typing.Mapping)) + + +def is_forward_ref(tp): + """Returns true if `tp` is a typing forward reference.""" + if hasattr(typing, 'ForwardRef'): + return isinstance(tp, typing.ForwardRef) + elif hasattr(typing, '_ForwardRef'): + return isinstance(tp, typing._ForwardRef) # pylint: disable=protected-access + else: + return False + + +# Note: typing.get_args was added in Python 3.8. +if hasattr(typing, 'get_args'): + get_generic_type_args = typing.get_args +else: + get_generic_type_args = lambda tp: tp.__args__ diff --git a/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/variable_utils.py b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/variable_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9680b338e4943134078552b7583b1d5a245889ba --- /dev/null +++ b/SwarmUI/dlbackend/ComfyUI/venv/lib/python3.10/site-packages/tensorflow/python/util/variable_utils.py @@ -0,0 +1,83 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility to manipulate resource variables.""" + +from tensorflow.python.framework import composite_tensor +from tensorflow.python.framework import ops +from tensorflow.python.util import _pywrap_utils +from tensorflow.python.util import nest + + +def convert_variables_to_tensors(values): + """Converts `ResourceVariable`s in `values` to `Tensor`s. + + If an object is a `CompositeTensor` and overrides its + `_convert_variables_to_tensors` method, its `ResourceVariable` components + will also be converted to `Tensor`s. Objects other than `ResourceVariable`s + in `values` will be returned unchanged. + + Args: + values: A nested structure of `ResourceVariable`s, or any other objects. + + Returns: + A new structure with `ResourceVariable`s in `values` converted to `Tensor`s. + """ + def _convert_resource_variable_to_tensor(x): + if _pywrap_utils.IsResourceVariable(x): + return ops.convert_to_tensor(x) + elif isinstance(x, composite_tensor.CompositeTensor): + return composite_tensor.convert_variables_to_tensors(x) + else: + return x + + return nest.map_structure(_convert_resource_variable_to_tensor, values) + + +def replace_variables_with_atoms(values): + """Replaces `ResourceVariable`s in `values` with tf.nest atoms. + + This function is mostly for backward compatibility. Historically, + `ResourceVariable`s are treated as tf.nest atoms. This is no + longer the case after `ResourceVariable` becoming `CompositeTensor`. + Unfortunately, tf.nest doesn't allow customization of what objects + are treated as atoms. Calling this function to manually convert + `ResourceVariable`s to atoms to avoid breaking tf.assert_same_structure + with inputs of a `ResourceVariable` and an atom, like a `Tensor`. + + The specific implementation uses 0 as the tf.nest atom, but other tf.nest + atoms could also serve the purpose. Note, the `TypeSpec` of None is not a + tf.nest atom. + + Objects other than `ResourceVariable`s in `values` will be returned unchanged. + + Note: this function does not look into `CompositeTensor`s. Replacing + `ResourceVariable`s in a `CompositeTensor` with atoms will change the + `TypeSpec` of the `CompositeTensor`, which violates the semantics of + `CompositeTensor` and tf.nest. So `ResourceVariable`s in `CompositeTensor`s + will be returned as they are. + + Args: + values: A nested structure of `ResourceVariable`s, or any other objects. + + Returns: + A new structure with `ResourceVariable`s in `values` converted to atoms. + """ + def _replace_resource_variable_with_atom(x): + if _pywrap_utils.IsResourceVariable(x): + return 0 # tf.nest treats 0 or tf.constant(0) as an atom. + else: + return x + + return nest.map_structure(_replace_resource_variable_with_atom, values)