diff --git a/.gitattributes b/.gitattributes index 2e63ff8ca94d552adc6ef365f5467f44e021c93a..9d763df862814a7d3a9b949a86f59287b4de15ba 100644 --- a/.gitattributes +++ b/.gitattributes @@ -808,3 +808,5 @@ parrot/lib/python3.10/site-packages/torch/__pycache__/_meta_registrations.cpytho mplug_owl2/lib/python3.10/site-packages/nvidia/curand/lib/libcurand.so.10 filter=lfs diff=lfs merge=lfs -text videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_py_exception_registry.so filter=lfs diff=lfs merge=lfs -text videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_tfcompile.so filter=lfs diff=lfs merge=lfs -text +videochat2/lib/python3.10/site-packages/tensorflow/python/flags_pybind.so filter=lfs diff=lfs merge=lfs -text +videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_quantize_training.so filter=lfs diff=lfs merge=lfs -text diff --git a/llava_next/share/terminfo/h/h-100bw b/llava_next/share/terminfo/h/h-100bw new file mode 100644 index 0000000000000000000000000000000000000000..19003800f547d0f1cafa1839c290ca7e8f8e49fd Binary files /dev/null and b/llava_next/share/terminfo/h/h-100bw differ diff --git a/llava_next/share/terminfo/h/h100bw b/llava_next/share/terminfo/h/h100bw new file mode 100644 index 0000000000000000000000000000000000000000..19003800f547d0f1cafa1839c290ca7e8f8e49fd Binary files /dev/null and b/llava_next/share/terminfo/h/h100bw differ diff --git a/llava_next/share/terminfo/h/h19-a b/llava_next/share/terminfo/h/h19-a new file mode 100644 index 0000000000000000000000000000000000000000..34216ac51f3f2e330f3d1b8a871744c74acb7d16 Binary files /dev/null and b/llava_next/share/terminfo/h/h19-a differ diff --git a/llava_next/share/terminfo/h/h19-bs b/llava_next/share/terminfo/h/h19-bs new file mode 100644 index 0000000000000000000000000000000000000000..cd50d9280d4f2475fa88226589b198834a7ae18a Binary files /dev/null and b/llava_next/share/terminfo/h/h19-bs differ diff --git a/llava_next/share/terminfo/h/h19-g b/llava_next/share/terminfo/h/h19-g new file mode 100644 index 0000000000000000000000000000000000000000..d5a19094157a043cb4c56bb7eb676423a1daf66d Binary files /dev/null and b/llava_next/share/terminfo/h/h19-g differ diff --git a/llava_next/share/terminfo/h/h19kermit b/llava_next/share/terminfo/h/h19kermit new file mode 100644 index 0000000000000000000000000000000000000000..41f40b096a38c4ccb3208f5add938e5f6fdcb9e6 Binary files /dev/null and b/llava_next/share/terminfo/h/h19kermit differ diff --git a/llava_next/share/terminfo/h/h19us b/llava_next/share/terminfo/h/h19us new file mode 100644 index 0000000000000000000000000000000000000000..cbcbac1de175eedcf88f9a355853bbadca1c430c Binary files /dev/null and b/llava_next/share/terminfo/h/h19us differ diff --git a/llava_next/share/terminfo/h/h29a-kc-bc b/llava_next/share/terminfo/h/h29a-kc-bc new file mode 100644 index 0000000000000000000000000000000000000000..4204502b9a97d695d4066af2af18ea20ef89c483 Binary files /dev/null and b/llava_next/share/terminfo/h/h29a-kc-bc differ diff --git a/llava_next/share/terminfo/h/h29a-nkc-bc b/llava_next/share/terminfo/h/h29a-nkc-bc new file mode 100644 index 0000000000000000000000000000000000000000..67108b029a6fe7494934f7aa84361e480831be56 Binary files /dev/null and b/llava_next/share/terminfo/h/h29a-nkc-bc differ diff --git a/llava_next/share/terminfo/h/ha8675 b/llava_next/share/terminfo/h/ha8675 new file mode 100644 index 0000000000000000000000000000000000000000..b52144d48aa3ac418175cadb781e00f1a0d4ce10 Binary files /dev/null and b/llava_next/share/terminfo/h/ha8675 differ diff --git a/llava_next/share/terminfo/h/hds200 b/llava_next/share/terminfo/h/hds200 new file mode 100644 index 0000000000000000000000000000000000000000..145559d702e377a7a8781fd48d8fc04ea89ce2d9 Binary files /dev/null and b/llava_next/share/terminfo/h/hds200 differ diff --git a/llava_next/share/terminfo/h/heath-ansi b/llava_next/share/terminfo/h/heath-ansi new file mode 100644 index 0000000000000000000000000000000000000000..34216ac51f3f2e330f3d1b8a871744c74acb7d16 Binary files /dev/null and b/llava_next/share/terminfo/h/heath-ansi differ diff --git a/llava_next/share/terminfo/h/heathkit b/llava_next/share/terminfo/h/heathkit new file mode 100644 index 0000000000000000000000000000000000000000..63b3b318f6117985ed510d9f546d01e3bd5b92f6 Binary files /dev/null and b/llava_next/share/terminfo/h/heathkit differ diff --git a/llava_next/share/terminfo/h/heathkit-a b/llava_next/share/terminfo/h/heathkit-a new file mode 100644 index 0000000000000000000000000000000000000000..34216ac51f3f2e330f3d1b8a871744c74acb7d16 Binary files /dev/null and b/llava_next/share/terminfo/h/heathkit-a differ diff --git a/llava_next/share/terminfo/h/hft-c b/llava_next/share/terminfo/h/hft-c new file mode 100644 index 0000000000000000000000000000000000000000..5778800e03009af479d141c023b453108701f145 Binary files /dev/null and b/llava_next/share/terminfo/h/hft-c differ diff --git a/llava_next/share/terminfo/h/hirez100-w b/llava_next/share/terminfo/h/hirez100-w new file mode 100644 index 0000000000000000000000000000000000000000..a91af72d39b56103b3d05355544f85dccab9f024 Binary files /dev/null and b/llava_next/share/terminfo/h/hirez100-w differ diff --git a/llava_next/share/terminfo/h/hmod1 b/llava_next/share/terminfo/h/hmod1 new file mode 100644 index 0000000000000000000000000000000000000000..b8f0d0f5294ad1e182499c0729e6b7371fff6a5e Binary files /dev/null and b/llava_next/share/terminfo/h/hmod1 differ diff --git a/llava_next/share/terminfo/h/hp b/llava_next/share/terminfo/h/hp new file mode 100644 index 0000000000000000000000000000000000000000..b3f1b13bb6638eff5aa3c2d86ea5494e9fa98637 Binary files /dev/null and b/llava_next/share/terminfo/h/hp differ diff --git a/llava_next/share/terminfo/h/hp+arrows b/llava_next/share/terminfo/h/hp+arrows new file mode 100644 index 0000000000000000000000000000000000000000..d0d3fbea88f390a148b968091746b5c18ac76141 Binary files /dev/null and b/llava_next/share/terminfo/h/hp+arrows differ diff --git a/llava_next/share/terminfo/h/hp+printer b/llava_next/share/terminfo/h/hp+printer new file mode 100644 index 0000000000000000000000000000000000000000..29ef6e6fe27662ad38269003f59d11700d41de28 Binary files /dev/null and b/llava_next/share/terminfo/h/hp+printer differ diff --git a/llava_next/share/terminfo/h/hp236 b/llava_next/share/terminfo/h/hp236 new file mode 100644 index 0000000000000000000000000000000000000000..713202d6fa315829d8e7f64249308a1b45e46c78 Binary files /dev/null and b/llava_next/share/terminfo/h/hp236 differ diff --git a/llava_next/share/terminfo/h/hp2621-a b/llava_next/share/terminfo/h/hp2621-a new file mode 100644 index 0000000000000000000000000000000000000000..6baa75ccef8bbddd1d6a1fbeb305353e87517e61 Binary files /dev/null and b/llava_next/share/terminfo/h/hp2621-a differ diff --git a/llava_next/share/terminfo/h/hp2621-fl b/llava_next/share/terminfo/h/hp2621-fl new file mode 100644 index 0000000000000000000000000000000000000000..744dc4a3c829b4cd79de08859019d9363022f79a Binary files /dev/null and b/llava_next/share/terminfo/h/hp2621-fl differ diff --git a/llava_next/share/terminfo/h/hp2621-k45 b/llava_next/share/terminfo/h/hp2621-k45 new file mode 100644 index 0000000000000000000000000000000000000000..0449f5de2b30d19eafec6436d1b978621f2d3820 Binary files /dev/null and b/llava_next/share/terminfo/h/hp2621-k45 differ diff --git a/llava_next/share/terminfo/h/hp2621A b/llava_next/share/terminfo/h/hp2621A new file mode 100644 index 0000000000000000000000000000000000000000..e01ee357fa29b1c99c377c4c60daddbba6065bb6 Binary files /dev/null and b/llava_next/share/terminfo/h/hp2621A differ diff --git a/llava_next/share/terminfo/h/hp2621a-a b/llava_next/share/terminfo/h/hp2621a-a new file mode 100644 index 0000000000000000000000000000000000000000..6baa75ccef8bbddd1d6a1fbeb305353e87517e61 Binary files /dev/null and b/llava_next/share/terminfo/h/hp2621a-a differ diff --git a/llava_next/share/terminfo/h/hp2621b-kx b/llava_next/share/terminfo/h/hp2621b-kx new file mode 100644 index 0000000000000000000000000000000000000000..21b76e597b5e6f834a16b85b68b9a4e094590a25 Binary files /dev/null and b/llava_next/share/terminfo/h/hp2621b-kx differ diff --git a/llava_next/share/terminfo/h/hp2623 b/llava_next/share/terminfo/h/hp2623 new file mode 100644 index 0000000000000000000000000000000000000000..78809dfe4eaee5d60c686337a55db12752101ddc Binary files /dev/null and b/llava_next/share/terminfo/h/hp2623 differ diff --git a/llava_next/share/terminfo/h/hp2623a b/llava_next/share/terminfo/h/hp2623a new file mode 100644 index 0000000000000000000000000000000000000000..78809dfe4eaee5d60c686337a55db12752101ddc Binary files /dev/null and b/llava_next/share/terminfo/h/hp2623a differ diff --git a/llava_next/share/terminfo/h/hp2624 b/llava_next/share/terminfo/h/hp2624 new file mode 100644 index 0000000000000000000000000000000000000000..b5a829ab86f0b528d7bab60e486d9287757eb8aa Binary files /dev/null and b/llava_next/share/terminfo/h/hp2624 differ diff --git a/llava_next/share/terminfo/h/hp2624-10p b/llava_next/share/terminfo/h/hp2624-10p new file mode 100644 index 0000000000000000000000000000000000000000..194ab137f331374fa0e1f11c870f7958d40cc20e Binary files /dev/null and b/llava_next/share/terminfo/h/hp2624-10p differ diff --git a/llava_next/share/terminfo/h/hp2624b-10p-p b/llava_next/share/terminfo/h/hp2624b-10p-p new file mode 100644 index 0000000000000000000000000000000000000000..3a846f106457bb702a9e7ad1e0bd8a86e7f5df45 Binary files /dev/null and b/llava_next/share/terminfo/h/hp2624b-10p-p differ diff --git a/llava_next/share/terminfo/h/hp2626-12 b/llava_next/share/terminfo/h/hp2626-12 new file mode 100644 index 0000000000000000000000000000000000000000..292c3ce22078c57290cb0984a3d19249c350fb25 Binary files /dev/null and b/llava_next/share/terminfo/h/hp2626-12 differ diff --git a/llava_next/share/terminfo/h/hp2626-12-s b/llava_next/share/terminfo/h/hp2626-12-s new file mode 100644 index 0000000000000000000000000000000000000000..e326b9e0a122042a2b5bd3145ff595f9080409ad Binary files /dev/null and b/llava_next/share/terminfo/h/hp2626-12-s differ diff --git a/llava_next/share/terminfo/h/hp2627a-rev b/llava_next/share/terminfo/h/hp2627a-rev new file mode 100644 index 0000000000000000000000000000000000000000..65bdb1d2af0c862f8e9352ee1330ce0c6705bac6 Binary files /dev/null and b/llava_next/share/terminfo/h/hp2627a-rev differ diff --git a/llava_next/share/terminfo/h/hp2641a b/llava_next/share/terminfo/h/hp2641a new file mode 100644 index 0000000000000000000000000000000000000000..8600355a3ad339ce09c2b000af631cb7053184b8 Binary files /dev/null and b/llava_next/share/terminfo/h/hp2641a differ diff --git a/llava_next/share/terminfo/h/hp2645 b/llava_next/share/terminfo/h/hp2645 new file mode 100644 index 0000000000000000000000000000000000000000..9cff16b981cb80afc710ca1eee53166579e0093d Binary files /dev/null and b/llava_next/share/terminfo/h/hp2645 differ diff --git a/llava_next/share/terminfo/h/hp2645a b/llava_next/share/terminfo/h/hp2645a new file mode 100644 index 0000000000000000000000000000000000000000..8600355a3ad339ce09c2b000af631cb7053184b8 Binary files /dev/null and b/llava_next/share/terminfo/h/hp2645a differ diff --git a/llava_next/share/terminfo/h/hp700 b/llava_next/share/terminfo/h/hp700 new file mode 100644 index 0000000000000000000000000000000000000000..15a4121bddd779513dd530f4d8d519eda1d629b3 Binary files /dev/null and b/llava_next/share/terminfo/h/hp700 differ diff --git a/llava_next/share/terminfo/h/hp70092 b/llava_next/share/terminfo/h/hp70092 new file mode 100644 index 0000000000000000000000000000000000000000..45f91b2d7ff732e760267add44036fd5b3ea6304 Binary files /dev/null and b/llava_next/share/terminfo/h/hp70092 differ diff --git a/llava_next/share/terminfo/h/hp70092A b/llava_next/share/terminfo/h/hp70092A new file mode 100644 index 0000000000000000000000000000000000000000..45f91b2d7ff732e760267add44036fd5b3ea6304 Binary files /dev/null and b/llava_next/share/terminfo/h/hp70092A differ diff --git a/llava_next/share/terminfo/h/hp70092a b/llava_next/share/terminfo/h/hp70092a new file mode 100644 index 0000000000000000000000000000000000000000..45f91b2d7ff732e760267add44036fd5b3ea6304 Binary files /dev/null and b/llava_next/share/terminfo/h/hp70092a differ diff --git a/llava_next/share/terminfo/h/hp98721 b/llava_next/share/terminfo/h/hp98721 new file mode 100644 index 0000000000000000000000000000000000000000..7e57ca7080fe277c78af1a673f85c392ff0a9a4b Binary files /dev/null and b/llava_next/share/terminfo/h/hp98721 differ diff --git a/llava_next/share/terminfo/h/hpansi b/llava_next/share/terminfo/h/hpansi new file mode 100644 index 0000000000000000000000000000000000000000..15a4121bddd779513dd530f4d8d519eda1d629b3 Binary files /dev/null and b/llava_next/share/terminfo/h/hpansi differ diff --git a/llava_next/share/terminfo/h/hpex b/llava_next/share/terminfo/h/hpex new file mode 100644 index 0000000000000000000000000000000000000000..321c1aee7fecc945d6917d26242fe0c9c7778341 Binary files /dev/null and b/llava_next/share/terminfo/h/hpex differ diff --git a/llava_next/share/terminfo/h/hpex2 b/llava_next/share/terminfo/h/hpex2 new file mode 100644 index 0000000000000000000000000000000000000000..6f8c911b04a3bf74a875b63e073d9f6887543ed0 Binary files /dev/null and b/llava_next/share/terminfo/h/hpex2 differ diff --git a/llava_next/share/terminfo/h/hpgeneric b/llava_next/share/terminfo/h/hpgeneric new file mode 100644 index 0000000000000000000000000000000000000000..b3f1b13bb6638eff5aa3c2d86ea5494e9fa98637 Binary files /dev/null and b/llava_next/share/terminfo/h/hpgeneric differ diff --git a/llava_next/share/terminfo/h/hpterm b/llava_next/share/terminfo/h/hpterm new file mode 100644 index 0000000000000000000000000000000000000000..99dd05ad4aa8d132bbf463f405a3df26e72a10a0 Binary files /dev/null and b/llava_next/share/terminfo/h/hpterm differ diff --git a/llava_next/share/terminfo/h/hz1520-noesc b/llava_next/share/terminfo/h/hz1520-noesc new file mode 100644 index 0000000000000000000000000000000000000000..2e30d6841e829aadf3b3c9351e55a17b38c96359 Binary files /dev/null and b/llava_next/share/terminfo/h/hz1520-noesc differ diff --git a/llava_next/share/terminfo/h/hz1552-rv b/llava_next/share/terminfo/h/hz1552-rv new file mode 100644 index 0000000000000000000000000000000000000000..be0b17406f677551443bf65c1e7123980eacb20f Binary files /dev/null and b/llava_next/share/terminfo/h/hz1552-rv differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_quantize_training.so b/videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_quantize_training.so new file mode 100644 index 0000000000000000000000000000000000000000..3a30755ca6bcc8ed6dadc776b663bd76b5c5aa8d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_quantize_training.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7e60658780349444dab4f4df617a5c1df2fc6cc4e1ec742e812202bbff3526f +size 256968 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/flags_pybind.so b/videochat2/lib/python3.10/site-packages/tensorflow/python/flags_pybind.so new file mode 100644 index 0000000000000000000000000000000000000000..1ffbdacf557a7cd445da9a909408998833087d41 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/flags_pybind.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7b74fe3a31eb82b808ca0e6505c3e8429a3b34219dc433f39dd788333cb28cd +size 257664 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_checkpoint_reader.pyi b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_checkpoint_reader.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1402d60148afeb9f28c35c15f12c974d31be0322 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_checkpoint_reader.pyi @@ -0,0 +1,25 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from typing import Any + +class CheckpointReader: + def __init__(self, arg0: str) -> None: ... + @classmethod + def CheckpointReader_GetTensor(cls, arg0: CheckpointReader, arg1: str) -> object: ... + def _GetVariableToDataTypeMap(self, *args, **kwargs) -> Any: ... + def _HasTensor(self, arg0: str) -> bool: ... + def debug_string(self) -> bytes: ... + def get_variable_to_shape_map(self, *args, **kwargs) -> Any: ... diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_kernel_registry.pyi b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_kernel_registry.pyi new file mode 100644 index 0000000000000000000000000000000000000000..bb54143b2887435e7c941b72962787748728749e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_kernel_registry.pyi @@ -0,0 +1,16 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +def TryFindKernelClass(arg0: str) -> bytes: ... diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_nest.pyi b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_nest.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8ea01514027cfa4a50933e819051af34b85c0af2 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_nest.pyi @@ -0,0 +1,16 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +def FlattenDictItems(arg0: object) -> object: ... diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_stat_summarizer.pyi b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_stat_summarizer.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ba10303b2f04406b888af65e4b76e59b6525cf05 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_stat_summarizer.pyi @@ -0,0 +1,26 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from typing import overload + +class StatSummarizer: + @overload + def __init__(self, arg0: str) -> None: ... + @overload + def __init__(self) -> None: ... + def GetOutputString(self) -> str: ... + def PrintStepStats(self) -> None: ... + def ProcessStepStats(self, arg0) -> None: ... + def ProcessStepStatsStr(self, arg0: str) -> None: ... diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_tfprof.pyi b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_tfprof.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a307be13a4c60efd2477c4e90c1c57f491a9fc04 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_tfprof.pyi @@ -0,0 +1,23 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +def AddStep(arg0: int, arg1: str, arg2: str, arg3: str) -> float: ... +def DeleteProfiler() -> None: ... +def NewProfiler(arg0: str, arg1: str) -> bool: ... +def PrintModelAnalysis(arg0: str, arg1: str, arg2: str, arg3: str, arg4: str) -> bytes: ... +def Profile(arg0: str, arg1: str) -> bytes: ... +def ProfilerFromFile(arg0: str) -> None: ... +def SerializeToString() -> bytes: ... +def WriteProfile(arg0: str) -> None: ... diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_transform_graph.pyi b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_transform_graph.pyi new file mode 100644 index 0000000000000000000000000000000000000000..0de83df042d914119b7ad1167cdc3e0e508c2958 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/_pywrap_transform_graph.pyi @@ -0,0 +1,16 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +def TransformGraphWithStringInputs(arg0: object, arg1: object, arg2: object, arg3: object) -> bytes: ... diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/compat.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..7a4659e0f62251858a3754de05a9b31bd79f6e0a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/compat.py @@ -0,0 +1,211 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Compatibility functions. + +The `tf.compat` module contains two sets of compatibility functions. + +## Tensorflow 1.x and 2.x APIs + +The `compat.v1` and `compat.v2` submodules provide a complete copy of both the +`v1` and `v2` APIs for backwards and forwards compatibility across TensorFlow +versions 1.x and 2.x. See the +[migration guide](https://www.tensorflow.org/guide/migrate) for details. + +## Utilities for writing compatible code + +Aside from the `compat.v1` and `compat.v2` submodules, `tf.compat` also contains +a set of helper functions for writing code that works in both: + +* TensorFlow 1.x and 2.x +* Python 2 and 3 + + +## Type collections + +The compatibility module also provides the following aliases for common +sets of python types: + +* `bytes_or_text_types` +* `complex_types` +* `integral_types` +* `real_types` + +API docstring: tensorflow.compat +""" + +import codecs +import collections.abc as collections_abc # pylint: disable=unused-import +import numbers as _numbers + +import numpy as _np + +from tensorflow.python.util.tf_export import tf_export + + +def as_bytes(bytes_or_text, encoding='utf-8'): + """Converts `bytearray`, `bytes`, or unicode python input types to `bytes`. + + Uses utf-8 encoding for text by default. + + Args: + bytes_or_text: A `bytearray`, `bytes`, `str`, or `unicode` object. + encoding: A string indicating the charset for encoding unicode. + + Returns: + A `bytes` object. + + Raises: + TypeError: If `bytes_or_text` is not a binary or unicode string. + """ + # Validate encoding, a LookupError will be raised if invalid. + encoding = codecs.lookup(encoding).name + if isinstance(bytes_or_text, bytearray): + return bytes(bytes_or_text) + elif isinstance(bytes_or_text, str): + return bytes_or_text.encode(encoding) + elif isinstance(bytes_or_text, bytes): + return bytes_or_text + else: + raise TypeError('Expected binary or unicode string, got %r' % + (bytes_or_text,)) + + +def as_text(bytes_or_text, encoding='utf-8'): + """Converts any string-like python input types to unicode. + + Returns the input as a unicode string. Uses utf-8 encoding for text + by default. + + Args: + bytes_or_text: A `bytes`, `str`, or `unicode` object. + encoding: A string indicating the charset for decoding unicode. + + Returns: + A `unicode` (Python 2) or `str` (Python 3) object. + + Raises: + TypeError: If `bytes_or_text` is not a binary or unicode string. + """ + # Validate encoding, a LookupError will be raised if invalid. + encoding = codecs.lookup(encoding).name + if isinstance(bytes_or_text, str): + return bytes_or_text + elif isinstance(bytes_or_text, bytes): + return bytes_or_text.decode(encoding) + else: + raise TypeError('Expected binary or unicode string, got %r' % bytes_or_text) + + +def as_str(bytes_or_text, encoding='utf-8'): + return as_text(bytes_or_text, encoding) + +tf_export('compat.as_text')(as_text) +tf_export('compat.as_bytes')(as_bytes) +tf_export('compat.as_str')(as_str) + + +@tf_export('compat.as_str_any') +def as_str_any(value, encoding='utf-8'): + """Converts input to `str` type. + + Uses `str(value)`, except for `bytes` typed inputs, which are converted + using `as_str`. + + Args: + value: A object that can be converted to `str`. + encoding: Encoding for `bytes` typed inputs. + + Returns: + A `str` object. + """ + if isinstance(value, bytes): + return as_str(value, encoding=encoding) + else: + return str(value) + + +@tf_export('compat.path_to_str') +def path_to_str(path): + r"""Converts input which is a `PathLike` object to `str` type. + + Converts from any python constant representation of a `PathLike` object to + a string. If the input is not a `PathLike` object, simply returns the input. + + Args: + path: An object that can be converted to path representation. + + Returns: + A `str` object. + + Usage: + In case a simplified `str` version of the path is needed from an + `os.PathLike` object. + + Examples: + ```python + $ tf.compat.path_to_str('C:\XYZ\tensorflow\./.././tensorflow') + 'C:\XYZ\tensorflow\./.././tensorflow' # Windows OS + $ tf.compat.path_to_str(Path('C:\XYZ\tensorflow\./.././tensorflow')) + 'C:\XYZ\tensorflow\..\tensorflow' # Windows OS + $ tf.compat.path_to_str(Path('./corpus')) + 'corpus' # Linux OS + $ tf.compat.path_to_str('./.././Corpus') + './.././Corpus' # Linux OS + $ tf.compat.path_to_str(Path('./.././Corpus')) + '../Corpus' # Linux OS + $ tf.compat.path_to_str(Path('./..////../')) + '../..' # Linux OS + + ``` + """ + if hasattr(path, '__fspath__'): + path = as_str_any(path.__fspath__()) + return path + + +def path_to_bytes(path): + r"""Converts input which is a `PathLike` object to `bytes`. + + Converts from any python constant representation of a `PathLike` object + or `str` to bytes. + + Args: + path: An object that can be converted to path representation. + + Returns: + A `bytes` object. + + Usage: + In case a simplified `bytes` version of the path is needed from an + `os.PathLike` object. + """ + if hasattr(path, '__fspath__'): + path = path.__fspath__() + return as_bytes(path) + + +# Numpy 1.8 scalars don't inherit from numbers.Integral in Python 3, so we +# need to check them specifically. The same goes from Real and Complex. +integral_types = (_numbers.Integral, _np.integer) +tf_export('compat.integral_types').export_constant(__name__, 'integral_types') +real_types = (_numbers.Real, _np.integer, _np.floating) +tf_export('compat.real_types').export_constant(__name__, 'real_types') +complex_types = (_numbers.Complex, _np.number) +tf_export('compat.complex_types').export_constant(__name__, 'complex_types') + +# Either bytes or text. +bytes_or_text_types = (bytes, str) +tf_export('compat.bytes_or_text_types').export_constant(__name__, + 'bytes_or_text_types') diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/custom_nest_protocol.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/custom_nest_protocol.py new file mode 100644 index 0000000000000000000000000000000000000000..1da4e463604b5fddd474e0221af3643dc4bc96aa --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/custom_nest_protocol.py @@ -0,0 +1,120 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Protocol class for custom tf.nest support.""" + +import typing +from typing import Protocol + + +@typing.runtime_checkable +class CustomNestProtocol(Protocol): + """Protocol for adding custom tf.nest support in user-defined classes. + + User classes should implement the two methods defined in this protocol in + order to be supported by nest functions. + - `__tf_flatten__` for generating the flattened components and the metadata + of the current object. + - `__tf_unflatten__` for creating a new object based on the input metadata + and the components. + See the method doc for details. + + In terms of support level, classes implementing this protocol + - are supported by tf.nest and tf.data functions. + - have limited support from tf.function, which requires writing a custom + TraceType subclass to be used as the input or output of a tf.function. + - are NOT supported by SavedModel. + + Code Examples: + + >>> import dataclasses + >>> @dataclasses.dataclass + ... class MaskedTensor: + ... mask: bool + ... value: tf.Tensor + ... + ... def __tf_flatten__(self): + ... metadata = (self.mask,) # static config. + ... components = (self.value,) # dynamic values. + ... return metadata, components + ... + ... @classmethod + ... def __tf_unflatten__(cls, metadata, components): + ... mask = metadata[0] + ... value = components[0] + ... return MaskedTensor(mask=mask, value=value) + ... + >>> mt = MaskedTensor(mask=True, value=tf.constant([1])) + >>> mt + MaskedTensor(mask=True, value=) + >>> tf.nest.is_nested(mt) + True + >>> mt2 = MaskedTensor(mask=False, value=tf.constant([2])) + >>> tf.nest.assert_same_structure(mt, mt2) + + >>> leaves = tf.nest.flatten(mt) + >>> leaves + [] + + >>> mt3 = tf.nest.pack_sequence_as(mt, leaves) + >>> mt3 + MaskedTensor(mask=True, value=) + >>> bool(mt == mt3) + True + + >>> tf.nest.map_structure(lambda x: x * 2, mt) + MaskedTensor(mask=True, value=) + + More examples are available in the unit tests (nest_test.py). + """ + + def __tf_flatten__(self): + """Flatten current object into (metadata, components). + + Returns: + A `tuple` of (metadata, components), where + - metadata is a custom Python object that stands for the static config + of the current object, which is supposed to be fixed and not affected + by data transformation. + - components is a `tuple` that contains the modifiable fields of the + current object. + + Implementation Note: + - This method should not invoke any TensorFlow ops. + - This method only needs to flatten the current level. If current object has + an attribute that also need custom flattening, nest functions (such as + `nest.flatten`) will utilize this method to do recursive flattening. + - Components must ba a `tuple`, not a `list` + """ + + @classmethod + def __tf_unflatten__(cls, metadata, components): + """Create a user-defined object from (metadata, components). + + Args: + metadata: a custom Python objet that stands for the static config for + reconstructing a new object of the current class. + components: a `tuple` that contains the dynamic data fields of the current + class, for object reconstruction. + + Returns: + The user-defined object, with the same class of the current object. + + Implementation Note: + - This method should not invoke any TensorFlow ops. + - This method only needs to unflatten the current level. If the object has + an attribute that also need custom unflattening, nest functions will + utilize this method to do recursive unflattening. + """ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/deprecation.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/deprecation.py new file mode 100644 index 0000000000000000000000000000000000000000..36a62bddedc3f5f94a5de92bef929a194ae7e430 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/deprecation.py @@ -0,0 +1,763 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tensor utility functions.""" +import collections +import functools +import inspect +import re + +from tensorflow.python.framework import strict_mode +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.util import decorator_utils +from tensorflow.python.util import is_in_graph_mode +from tensorflow.python.util import tf_contextlib +from tensorflow.python.util import tf_decorator +from tensorflow.python.util import tf_inspect +from tensorflow.tools.docs import doc_controls + +# Allow deprecation warnings to be silenced temporarily with a context manager. +_PRINT_DEPRECATION_WARNINGS = True + +# Remember which deprecation warnings have been printed already. +_PRINTED_WARNING = {} + + +class DeprecatedNamesAlreadySetError(Exception): + """Raised when setting deprecated names multiple times for the same symbol.""" + + +def _log_deprecation(msg, *args, **kwargs): + """Raises errors for deprecated methods if in strict mode, warns otherwise.""" + if strict_mode.STRICT_MODE: + logging.error(msg, *args, **kwargs) + raise RuntimeError( + 'This behavior has been deprecated, which raises an error in strict' + ' mode.' + ) + else: + logging.warning(msg, *args, **kwargs) + + +def _add_deprecated_function_notice_to_docstring(doc, date, instructions): + """Adds a deprecation notice to a docstring for deprecated functions.""" + main_text = [ + 'THIS FUNCTION IS DEPRECATED. It will be removed %s.' + % ('in a future version' if date is None else ('after %s' % date)) + ] + if instructions: + main_text.append('Instructions for updating:') + return decorator_utils.add_notice_to_docstring( + doc, + instructions, + 'DEPRECATED FUNCTION', + '(deprecated)', + main_text, + notice_type='Deprecated') + + +def _add_deprecated_arg_notice_to_docstring(doc, date, instructions, + deprecated_names): + """Adds a deprecation notice to a docstring for deprecated arguments.""" + + deprecation_string = ', '.join(sorted(deprecated_names)) + + return decorator_utils.add_notice_to_docstring( + doc, + instructions, + 'DEPRECATED FUNCTION ARGUMENTS', + '(deprecated arguments)', [ + 'SOME ARGUMENTS ARE DEPRECATED: `(%s)`. ' + 'They will be removed %s.' % + (deprecation_string, 'in a future version' if date is None else + ('after %s' % date)), 'Instructions for updating:' + ], + notice_type='Deprecated') + + +def _add_deprecated_arg_value_notice_to_docstring(doc, date, instructions, + deprecated_name_value_dict): + """Adds a deprecation notice to a docstring for deprecated arguments.""" + + deprecation_string = ', '.join( + '%s=%r' % (key, value) + for key, value in sorted(deprecated_name_value_dict.items())) + + when = 'in a future version' if date is None else ('after %s' % date) + + return decorator_utils.add_notice_to_docstring( + doc, + instructions, + 'DEPRECATED FUNCTION ARGUMENT VALUES', + '(deprecated argument values)', [ + 'SOME ARGUMENT VALUES ARE DEPRECATED: `(%s)`. ' + 'They will be removed %s.' % + (deprecation_string, when), 'Instructions for updating:' + ], + notice_type='Deprecated') + + +def _validate_deprecation_args(date, instructions): + if date is not None and not re.match(r'20\d\d-[01]\d-[0123]\d', date): + raise ValueError(f'Date must be in format YYYY-MM-DD. Received: {date}') + if not instructions: + raise ValueError( + 'Don\'t deprecate things without conversion instructions! Specify ' + 'the `instructions` argument.') + + +def _call_location(outer=False): + """Returns call location given level up from current call.""" + # Two up: <_call_location>, <_call_location's caller> + # tf_inspect is not required here. Please ignore the lint warning by adding + # DISABLE_IMPORT_INSPECT_CHECK=TRUE to your cl description. Using it caused + # test timeouts (b/189384061). + f = inspect.currentframe().f_back.f_back + parent = f and f.f_back + if outer and parent is not None: + f = parent + return '{}:{}'.format(f.f_code.co_filename, f.f_lineno) + + +def _safe_eq(a, b): + if a is None or b is None: + return a is None and b is None + return a == b + + +def _wrap_decorator(wrapped_function, decorator_name): + """Indicate that one function wraps another. + + This decorator wraps a function using `tf_decorator.make_decorator` + so that doc generation scripts can pick up original function + signature. + It would be better to use @functools.wrap decorator, but it would + not update function signature to match wrapped function in Python 2. + + Args: + wrapped_function: The function that decorated function wraps. + decorator_name: The name of the decorator. + + Returns: + Function that accepts wrapper function as an argument and returns + `TFDecorator` instance. + """ + + def wrapper(wrapper_func): + return tf_decorator.make_decorator(wrapped_function, wrapper_func, + decorator_name) + + return wrapper + + +def deprecated_alias(deprecated_name, name, func_or_class, warn_once=True): + """Deprecate a symbol in favor of a new name with identical semantics. + + This function is meant to be used when defining a backwards-compatibility + alias for a symbol which has been moved. For example: + + module1.py: + ```python + class NewNameForClass: pass + ``` + + module2.py: + ```python + import module1 + + DeprecatedNameForClass = deprecated_alias( + deprecated_name='module2.DeprecatedNameForClass', + name='module1.NewNameForClass', + func_or_class=module1.NewNameForClass) + ``` + + This function works for classes and functions. + + For classes, it creates a new class which is functionally identical (it + inherits from the original, and overrides its constructor), but which prints + a deprecation warning when an instance is created. It also adds a deprecation + notice to the class' docstring. + + For functions, it returns a function wrapped by `tf_decorator.make_decorator`. + That function prints a warning when used, and has a deprecation notice in its + docstring. This is more or less equivalent (the deprecation warning has + slightly different text) to writing: + + ```python + @deprecated + def deprecated_alias(original_args): + real_function(original_args) + ``` + + Args: + deprecated_name: The name of the symbol that is being deprecated, to be used + in the warning message. This should be its fully qualified name to avoid + confusion. + name: The name of the symbol that is to be used instead of the deprecated + name. This should be a fully qualified name to avoid confusion. + func_or_class: The (non-deprecated) class or function for which a deprecated + alias should be created. + warn_once: If True (the default), only print a deprecation warning the first + time this function is used, or the class is instantiated. + + Returns: + A wrapped version of `func_or_class` which prints a deprecation warning on + use and has a modified docstring. + """ + if tf_inspect.isclass(func_or_class): + + # Make a new class with __init__ wrapped in a warning. + class _NewClass(func_or_class): # pylint: disable=missing-docstring + __doc__ = decorator_utils.add_notice_to_docstring( + func_or_class.__doc__, + 'Please use %s instead.' % name, + 'DEPRECATED CLASS', + '(deprecated)', [('THIS CLASS IS DEPRECATED. ' + 'It will be removed in a future version. ')], + notice_type='Deprecated') + __name__ = func_or_class.__name__ + __module__ = _call_location(outer=True) + + @_wrap_decorator(func_or_class.__init__, 'deprecated_alias') + def __init__(self, *args, **kwargs): + if hasattr(_NewClass.__init__, '__func__'): + # Python 2 + _NewClass.__init__.__func__.__doc__ = func_or_class.__init__.__doc__ + else: + # Python 3 + _NewClass.__init__.__doc__ = func_or_class.__init__.__doc__ + + if _PRINT_DEPRECATION_WARNINGS: + # We're making the alias as we speak. The original may have other + # aliases, so we cannot use it to check for whether it's already been + # warned about. + if _NewClass.__init__ not in _PRINTED_WARNING: + if warn_once: + _PRINTED_WARNING[_NewClass.__init__] = True + _log_deprecation( + 'From %s: The name %s is deprecated. Please use %s instead.\n', + _call_location(), deprecated_name, name) + super(_NewClass, self).__init__(*args, **kwargs) + + return _NewClass + else: + decorator_utils.validate_callable(func_or_class, 'deprecated') + + # Make a wrapper for the original + @functools.wraps(func_or_class) + def new_func(*args, **kwargs): # pylint: disable=missing-docstring + if _PRINT_DEPRECATION_WARNINGS: + # We're making the alias as we speak. The original may have other + # aliases, so we cannot use it to check for whether it's already been + # warned about. + if new_func not in _PRINTED_WARNING: + if warn_once: + _PRINTED_WARNING[new_func] = True + _log_deprecation( + 'From %s: The name %s is deprecated. Please use %s instead.\n', + _call_location(), deprecated_name, name) + return func_or_class(*args, **kwargs) + + return tf_decorator.make_decorator( + func_or_class, new_func, 'deprecated', + _add_deprecated_function_notice_to_docstring( + func_or_class.__doc__, None, 'Please use %s instead.' % name)) + + +def deprecated_endpoints(*args): + """Decorator for marking endpoints deprecated. + + This decorator does not print deprecation messages. + TODO(annarev): eventually start printing deprecation warnings when + @deprecation_endpoints decorator is added. + + Args: + *args: Deprecated endpoint names. + + Returns: + A function that takes symbol as an argument and adds + _tf_deprecated_api_names to that symbol. + _tf_deprecated_api_names would be set to a list of deprecated + endpoint names for the symbol. + """ + + def deprecated_wrapper(func): + # pylint: disable=protected-access + if '_tf_deprecated_api_names' in func.__dict__: + raise DeprecatedNamesAlreadySetError( + f'Cannot set deprecated names for {func.__name__} to {args}. ' + 'Deprecated names are already set to ' + f'{func._tf_deprecated_api_names}.') + func._tf_deprecated_api_names = args + # pylint: disable=protected-access + return func + + return deprecated_wrapper + + +def deprecated(date, instructions, warn_once=True): + """Decorator for marking functions or methods deprecated. + + This decorator logs a deprecation warning whenever the decorated function is + called. It has the following format: + + (from ) is deprecated and will be removed after . + Instructions for updating: + + + If `date` is None, 'after ' is replaced with 'in a future version'. + will include the class name if it is a method. + + It also edits the docstring of the function: ' (deprecated)' is appended + to the first line of the docstring and a deprecation notice is prepended + to the rest of the docstring. + + Args: + date: String or None. The date the function is scheduled to be removed. Must + be ISO 8601 (YYYY-MM-DD), or None. + instructions: String. Instructions on how to update code using the + deprecated function. + warn_once: Boolean. Set to `True` to warn only the first time the decorated + function is called. Otherwise, every call will log a warning. + + Returns: + Decorated function or method. + + Raises: + ValueError: If date is not None or in ISO 8601 format, or instructions are + empty. + """ + _validate_deprecation_args(date, instructions) + + def deprecated_wrapper(func_or_class): + """Deprecation wrapper.""" + if isinstance(func_or_class, type): + # If a class is deprecated, you actually want to wrap the constructor. + cls = func_or_class + if cls.__new__ is object.__new__: + # If a class defaults to its parent's constructor, wrap that instead. + func = cls.__init__ + constructor_name = '__init__' + decorators, _ = tf_decorator.unwrap(func) + for decorator in decorators: + if decorator.decorator_name == 'deprecated': + # If the parent is already deprecated, there's nothing to do. + return cls + else: + func = cls.__new__ + constructor_name = '__new__' + + else: + cls = None + constructor_name = None + func = func_or_class + + decorator_utils.validate_callable(func, 'deprecated') + + @_wrap_decorator(func, 'deprecated') + def new_func(*args, **kwargs): # pylint: disable=missing-docstring + if _PRINT_DEPRECATION_WARNINGS: + if func not in _PRINTED_WARNING and cls not in _PRINTED_WARNING: + if warn_once: + _PRINTED_WARNING[func] = True + if cls: + _PRINTED_WARNING[cls] = True + _log_deprecation( + 'From %s: %s (from %s) is deprecated and will be removed %s.\n' + 'Instructions for updating:\n%s', _call_location(), + decorator_utils.get_qualified_name(func), + func_or_class.__module__, + 'in a future version' if date is None else ('after %s' % date), + instructions) + return func(*args, **kwargs) + + doc_controls.set_deprecated(new_func) + new_func = tf_decorator.make_decorator( + func, new_func, 'deprecated', + _add_deprecated_function_notice_to_docstring(func.__doc__, date, + instructions)) + new_func.__signature__ = inspect.signature(func) + + if cls is None: + return new_func + else: + # Insert the wrapped function as the constructor + setattr(cls, constructor_name, new_func) + + # And update the docstring of the class. + cls.__doc__ = _add_deprecated_function_notice_to_docstring( + cls.__doc__, date, instructions) + + return cls + + return deprecated_wrapper + + +DeprecatedArgSpec = collections.namedtuple( + 'DeprecatedArgSpec', ['position', 'has_ok_value', 'ok_value']) + + +def deprecated_args(date, instructions, *deprecated_arg_names_or_tuples, + **kwargs): + """Decorator for marking specific function arguments as deprecated. + + This decorator logs a deprecation warning whenever the decorated function is + called with the deprecated argument. It has the following format: + + Calling (from ) with is deprecated and will be + removed after . Instructions for updating: + + + If `date` is None, 'after ' is replaced with 'in a future version'. + includes the class name if it is a method. + + It also edits the docstring of the function: ' (deprecated arguments)' is + appended to the first line of the docstring and a deprecation notice is + prepended to the rest of the docstring. + + Args: + date: String or None. The date the function is scheduled to be removed. Must + be ISO 8601 (YYYY-MM-DD), or None. + instructions: String. Instructions on how to update code using the + deprecated function. + *deprecated_arg_names_or_tuples: String or 2-Tuple (String, ok_val). The + string is the deprecated argument name. Optionally, an ok-value may be + provided. If the user provided argument equals this value, the warning is + suppressed. + **kwargs: If `warn_once=False` is passed, every call with a deprecated + argument will log a warning. The default behavior is to only warn the + first time the function is called with any given deprecated argument. All + other kwargs raise `ValueError`. + + Returns: + Decorated function or method. + + Raises: + ValueError: If date is not None or in ISO 8601 format, instructions are + empty, the deprecated arguments are not present in the function + signature, the second element of a deprecated_tuple is not a + list, or if a kwarg other than `warn_once` is passed. + """ + _validate_deprecation_args(date, instructions) + if not deprecated_arg_names_or_tuples: + raise ValueError('Specify which argument is deprecated.') + if kwargs and list(kwargs.keys()) != ['warn_once']: + kwargs.pop('warn_once', None) + raise ValueError(f'Illegal argument passed to deprecated_args: {kwargs}') + warn_once = kwargs.get('warn_once', True) + + def _get_arg_names_to_ok_vals(): + """Returns a dict mapping arg_name to DeprecatedArgSpec w/o position.""" + d = {} + for name_or_tuple in deprecated_arg_names_or_tuples: + if isinstance(name_or_tuple, tuple): + d[name_or_tuple[0]] = DeprecatedArgSpec(-1, True, name_or_tuple[1]) + else: + d[name_or_tuple] = DeprecatedArgSpec(-1, False, None) + return d + + def _get_deprecated_positional_arguments(names_to_ok_vals, arg_spec): + """Builds a dictionary from deprecated arguments to their spec. + + Returned dict is keyed by argument name. + Each value is a DeprecatedArgSpec with the following fields: + position: The zero-based argument position of the argument + within the signature. None if the argument isn't found in + the signature. + ok_values: Values of this argument for which warning will be + suppressed. + + Args: + names_to_ok_vals: dict from string arg_name to a list of values, possibly + empty, which should not elicit a warning. + arg_spec: Output from tf_inspect.getfullargspec on the called function. + + Returns: + Dictionary from arg_name to DeprecatedArgSpec. + """ + # Extract argument list + arg_space = arg_spec.args + arg_spec.kwonlyargs + arg_name_to_pos = {name: pos for pos, name in enumerate(arg_space)} + deprecated_positional_args = {} + for arg_name, spec in iter(names_to_ok_vals.items()): + if arg_name in arg_name_to_pos: + pos = arg_name_to_pos[arg_name] + deprecated_positional_args[arg_name] = DeprecatedArgSpec( + pos, spec.has_ok_value, spec.ok_value) + return deprecated_positional_args + + deprecated_arg_names = _get_arg_names_to_ok_vals() + + def deprecated_wrapper(func): + """Deprecation decorator.""" + decorator_utils.validate_callable(func, 'deprecated_args') + + arg_spec = tf_inspect.getfullargspec(func) + deprecated_positions = _get_deprecated_positional_arguments( + deprecated_arg_names, arg_spec) + + is_varargs_deprecated = arg_spec.varargs in deprecated_arg_names + is_kwargs_deprecated = arg_spec.varkw in deprecated_arg_names + + if (len(deprecated_positions) + is_varargs_deprecated + is_kwargs_deprecated + != len(deprecated_arg_names_or_tuples)): + known_args = ( + arg_spec.args + arg_spec.kwonlyargs + + [arg_spec.varargs, arg_spec.varkw]) + missing_args = [ + arg_name for arg_name in deprecated_arg_names + if arg_name not in known_args + ] + raise ValueError('The following deprecated arguments are not present ' + f'in the function signature: {missing_args}. ' + 'Expected arguments from the following list: ' + f'{known_args}.') + + def _same_value(a, b): + """A comparison operation that works for multiple object types. + + Returns True for two empty lists, two numeric values with the + same value, etc. + + Returns False for (pd.DataFrame, None), and other pairs which + should not be considered equivalent. + + Args: + a: value one of the comparison. + b: value two of the comparison. + + Returns: + A boolean indicating whether the two inputs are the same value + for the purposes of deprecation. + """ + if a is b: + return True + try: + equality = a == b + if isinstance(equality, bool): + return equality + except TypeError: + return False + return False + + @functools.wraps(func) + def new_func(*args, **kwargs): + """Deprecation wrapper.""" + # TODO(apassos) figure out a way to have reasonable performance with + # deprecation warnings and eager mode. + if is_in_graph_mode.IS_IN_GRAPH_MODE() and _PRINT_DEPRECATION_WARNINGS: + invalid_args = [] + named_args = tf_inspect.getcallargs(func, *args, **kwargs) + for arg_name, spec in iter(deprecated_positions.items()): + if (spec.position < len(args) and + not (spec.has_ok_value and + _same_value(named_args[arg_name], spec.ok_value))): + invalid_args.append(arg_name) + if is_varargs_deprecated and len(args) > len(arg_spec.args): + invalid_args.append(arg_spec.varargs) + if is_kwargs_deprecated and kwargs: + invalid_args.append(arg_spec.varkw) + for arg_name in deprecated_arg_names: + if (arg_name in kwargs and + not (deprecated_positions[arg_name].has_ok_value and + _same_value(named_args[arg_name], + deprecated_positions[arg_name].ok_value))): + invalid_args.append(arg_name) + for arg_name in invalid_args: + if (func, arg_name) not in _PRINTED_WARNING: + if warn_once: + _PRINTED_WARNING[(func, arg_name)] = True + _log_deprecation( + 'From %s: calling %s (from %s) with %s is deprecated and will ' + 'be removed %s.\nInstructions for updating:\n%s', + _call_location(), decorator_utils.get_qualified_name(func), + func.__module__, arg_name, + 'in a future version' if date is None else ('after %s' % date), + instructions) + return func(*args, **kwargs) + + doc = _add_deprecated_arg_notice_to_docstring( + func.__doc__, date, instructions, sorted(deprecated_arg_names.keys())) + return tf_decorator.make_decorator(func, new_func, 'deprecated', doc) + + return deprecated_wrapper + + +def deprecated_arg_values(date, + instructions, + warn_once=True, + **deprecated_kwargs): + """Decorator for marking specific function argument values as deprecated. + + This decorator logs a deprecation warning whenever the decorated function is + called with the deprecated argument values. It has the following format: + + Calling (from ) with = is deprecated and + will be removed after . Instructions for updating: + + + If `date` is None, 'after ' is replaced with 'in a future version'. + will include the class name if it is a method. + + It also edits the docstring of the function: ' (deprecated arguments)' is + appended to the first line of the docstring and a deprecation notice is + prepended to the rest of the docstring. + + Args: + date: String or None. The date the function is scheduled to be removed. Must + be ISO 8601 (YYYY-MM-DD), or None + instructions: String. Instructions on how to update code using the + deprecated function. + warn_once: If `True`, warn only the first time this function is called with + deprecated argument values. Otherwise, every call (with a deprecated + argument value) will log a warning. + **deprecated_kwargs: The deprecated argument values. + + Returns: + Decorated function or method. + + Raises: + ValueError: If date is not None or in ISO 8601 format, or instructions are + empty. + """ + _validate_deprecation_args(date, instructions) + if not deprecated_kwargs: + raise ValueError('Specify which argument values are deprecated.') + + def deprecated_wrapper(func): + """Deprecation decorator.""" + decorator_utils.validate_callable(func, 'deprecated_arg_values') + + @functools.wraps(func) + def new_func(*args, **kwargs): + """Deprecation wrapper.""" + if _PRINT_DEPRECATION_WARNINGS: + named_args = tf_inspect.getcallargs(func, *args, **kwargs) + for arg_name, arg_value in deprecated_kwargs.items(): + if arg_name in named_args and _safe_eq(named_args[arg_name], + arg_value): + if (func, arg_name) not in _PRINTED_WARNING: + if warn_once: + _PRINTED_WARNING[(func, arg_name)] = True + _log_deprecation( + 'From %s: calling %s (from %s) with %s=%s is deprecated and ' + 'will be removed %s.\nInstructions for updating:\n%s', + _call_location(), decorator_utils.get_qualified_name(func), + func.__module__, arg_name, arg_value, + 'in a future version' if date is None else + ('after %s' % date), instructions) + return func(*args, **kwargs) + + doc = _add_deprecated_arg_value_notice_to_docstring(func.__doc__, date, + instructions, + deprecated_kwargs) + return tf_decorator.make_decorator(func, new_func, 'deprecated', doc) + + return deprecated_wrapper + + +def deprecated_argument_lookup(new_name, new_value, old_name, old_value): + """Looks up deprecated argument name and ensures both are not used. + + Args: + new_name: new name of argument + new_value: value of new argument (or None if not used) + old_name: old name of argument + old_value: value of old argument (or None if not used) + + Returns: + The effective argument that should be used. + Raises: + ValueError: if new_value and old_value are both non-null + """ + if old_value is not None: + if new_value is not None: + raise ValueError(f"Cannot specify both '{old_name}' and '{new_name}'.") + return old_value + return new_value + + +def rewrite_argument_docstring(old_doc, old_argument, new_argument): + return old_doc.replace('`%s`' % old_argument, + '`%s`' % new_argument).replace('%s:' % old_argument, + '%s:' % new_argument) + + +@tf_contextlib.contextmanager +def silence(): + """Temporarily silence deprecation warnings.""" + global _PRINT_DEPRECATION_WARNINGS + print_deprecation_warnings = _PRINT_DEPRECATION_WARNINGS + _PRINT_DEPRECATION_WARNINGS = False + yield + _PRINT_DEPRECATION_WARNINGS = print_deprecation_warnings + + +def deprecate_moved_module(deprecated_name, new_module, deletion_version): + """Logs a warning when a module that has been moved to a new location is used. + + Copy the following code into the old module: + + ``` + import deprecation + import new_module + + __getattr__ = deprecation.deprecate_moved_module( + __name__, new_module, "2.9") # adjust version number. + ``` + + Args: + deprecated_name: Name of old module. + new_module: Module to replace the old module. + deletion_version: Version of TensorFlow in which the old module will be + removed. + + Returns: + A function that logs a warning and returns the symbol from the new module. + Set this function as the module's `__getattr__`. + """ + + def getter(name): + if getter not in _PRINTED_WARNING and _PRINT_DEPRECATION_WARNINGS: + _PRINTED_WARNING[getter] = True + _log_deprecation( + 'Please fix your imports. Module %s has been moved to %s. The old ' + 'module will be deleted in version %s.', deprecated_name, + new_module.__name__, deletion_version) + return getattr(new_module, name) + + return getter + + +class HiddenTfApiAttribute(property): + """Hides a class attribute from the public API. + + Attributes in public classes can be hidden from the API by having an '_' in + front of the name (e.g. ClassName._variables). This doesn't work when + attributes or methods are inherited from a parent class. To hide inherited + attributes, set their values to be `deprecation.hide_attribute_from_api`. + """ + + def __init__(self, deprecation_message): + + def raise_error(unused_self): + raise AttributeError(deprecation_message) + + super(HiddenTfApiAttribute, self).__init__(raise_error) + + +hide_attribute_from_api = HiddenTfApiAttribute # pylint: disable=invalid-name + +# TODO(kathywu): Remove once cl/246395236 is submitted. +HIDDEN_ATTRIBUTE = HiddenTfApiAttribute('This attribute has been deprecated.') diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/dispatch.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/dispatch.py new file mode 100644 index 0000000000000000000000000000000000000000..2605c2a17c7695896b0e44168cefc2c7ddbbe574 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/dispatch.py @@ -0,0 +1,1302 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Type-based dispatch for TensorFlow's Python APIs. + +"Python APIs" refers to Python functions that have been exported with +`tf_export`, such as `tf.add` and `tf.linalg.matmul`; they are sometimes also +referred to as "ops". + +There are currently two dispatch systems for TensorFlow: + + * The "fallback dispatch" system calls an API's standard implementation first, + and only tries to perform dispatch if that standard implementation raises a + TypeError (or ValueError) exception. + + * The "type-based dispatch" system checks the types of the parameters passed + to an API, and performs dispatch if those types match any signatures that + have been registered for dispatch. + +The fallback dispatch system was the original dispatch system, but it was +somewhat brittle and had limitations, such as an inability to support dispatch +for some operations (like convert_to_tensor). We plan to remove the fallback +dispatch system in favor of the type-based dispatch system, once all users have +been switched over to use it. + +### Fallback Dispatch + +The fallback dispatch system is based on "operation dispatchers", which can be +used to override the behavior for TensorFlow ops when they are called with +otherwise unsupported argument types. In particular, when an operation is +called with arguments that would cause it to raise a TypeError, it falls back on +its registered operation dispatchers. If any registered dispatchers can handle +the arguments, then its result is returned. Otherwise, the original TypeError is +raised. + +### Type-based Dispatch + +The main interface for the type-based dispatch system is the `dispatch_for_api` +decorator, which overrides the default implementation for a TensorFlow API. +The decorated function (known as the "dispatch target") will override the +default implementation for the API when the API is called with parameters that +match a specified type signature. + +### Dispatch Support + +By default, dispatch support is added to the generated op wrappers for any +visible ops by default. APIs/ops that are implemented in Python can opt in to +dispatch support using the `add_dispatch_support` decorator. +""" + +import collections +import itertools +import typing # pylint: disable=unused-import (used in doctests) + +from tensorflow.python.framework import _pywrap_python_api_dispatcher as _api_dispatcher +from tensorflow.python.framework import ops +from tensorflow.python.util import tf_decorator +from tensorflow.python.util import tf_export as tf_export_lib +from tensorflow.python.util import tf_inspect +from tensorflow.python.util import traceback_utils +from tensorflow.python.util import type_annotations +from tensorflow.python.util.tf_export import tf_export + + +# Private function attributes used to store dispatchers on TensorFlow APIs. +FALLBACK_DISPATCH_ATTR = "_tf_fallback_dispatchers" +TYPE_BASED_DISPATCH_ATTR = "_tf_type_based_dispatcher" + +# OpDispatchers which should be used for all operations. +_GLOBAL_DISPATCHERS = [] + + +################################################################################ +# Fallback Dispatch +################################################################################ + + +@tf_export("__internal__.dispatch.OpDispatcher", v1=[]) +class OpDispatcher(object): + """Abstract base class for TensorFlow operator dispatchers. + + Each operation dispatcher acts as an override handler for a single + TensorFlow operation, and its results are used when the handler indicates + that it can handle the operation's arguments (by returning any value other + than `OpDispatcher.NOT_SUPPORTED`). + """ + + # Sentinel value that can be returned to indicate that an operation + # dispatcher does not support a given set of arguments. + NOT_SUPPORTED = object() + + def handle(self, args, kwargs): # pylint: disable=unused-argument + """Handle this dispatcher's operation with the specified arguments. + + If this operation dispatcher can handle the given arguments, then + return an appropriate value (or raise an appropriate exception). + + Args: + args: The arguments to the operation. + kwargs: They keyword arguments to the operation. + + Returns: + The result of the operation, or `OpDispatcher.NOT_SUPPORTED` if this + dispatcher can not handle the given arguments. + """ + return self.NOT_SUPPORTED + + def register(self, op): + """Register this dispatcher as a handler for `op`. + + Args: + op: Python function: the TensorFlow operation that should be handled. Must + have a dispatch list (which is added automatically for generated ops, + and can be added to Python ops using the `add_dispatch_support` + decorator). + """ + if not hasattr(op, FALLBACK_DISPATCH_ATTR): + raise AssertionError("Dispatching not enabled for %s" % op) + getattr(op, FALLBACK_DISPATCH_ATTR).append(self) + + +@tf_export("__internal__.dispatch.GlobalOpDispatcher", v1=[]) +class GlobalOpDispatcher(object): + """Abstract base class for TensorFlow global operator dispatchers.""" + + NOT_SUPPORTED = OpDispatcher.NOT_SUPPORTED + + def handle(self, op, args, kwargs): + """Handle the specified operation with the specified arguments.""" + + def register(self): + """Register this dispatcher as a handler for all ops.""" + _GLOBAL_DISPATCHERS.append(self) + + +def dispatch(op, args, kwargs): + """Returns the result from the first successful dispatcher for a given op. + + Calls the `handle` method of each `OpDispatcher` that has been registered + to handle `op`, and returns the value from the first successful handler. + + Args: + op: Python function: the operation to dispatch for. + args: The arguments to the operation. + kwargs: They keyword arguments to the operation. + + Returns: + The result of the operation, or `NOT_SUPPORTED` if no registered + dispatcher can handle the given arguments. + """ + for dispatcher in getattr(op, FALLBACK_DISPATCH_ATTR): + result = dispatcher.handle(args, kwargs) + if result is not OpDispatcher.NOT_SUPPORTED: + return result + for dispatcher in _GLOBAL_DISPATCHERS: + result = dispatcher.handle(op, args, kwargs) + if result is not OpDispatcher.NOT_SUPPORTED: + return result + return OpDispatcher.NOT_SUPPORTED + + +class _TypeBasedDispatcher(OpDispatcher): + """Dispatcher that handles op if any arguments have a specified type. + + Checks the types of the arguments and keyword arguments (including elements + of lists or tuples), and if any argument values have the indicated type(s), + then delegates to an override function. + """ + + def __init__(self, override_func, types): + self._types = types + self._override_func = override_func + + def _handles(self, args, kwargs): + for arg in itertools.chain(args, kwargs.values()): + if (isinstance(arg, self._types) or + (isinstance(arg, (list, tuple)) and + any(isinstance(elt, self._types) for elt in arg))): + return True + return False + + def handle(self, args, kwargs): + if self._handles(args, kwargs): + return self._override_func(*args, **kwargs) + else: + return self.NOT_SUPPORTED + + +def _remove_annotation(sig): + """Removes annotation from a python Signature.""" + parameters = [p.replace(annotation=p.empty) for p in sig.parameters.values()] + return sig.replace(parameters=parameters, return_annotation=sig.empty) + + +def _get_required_param_names(sig): + """Returns a list of required parameter names from a python Signature.""" + params = [] + for p in sig.parameters.values(): + if p.kind == p.VAR_POSITIONAL: + continue + if p.kind == p.VAR_KEYWORD: + continue + if p.default is not p.empty: + continue + params.append(p.name) + return params + + +def get_compatible_func(op, func): + """Returns a compatible function. + + Args: + op: a callable with whose signature the returned function is compatible. + func: a callable which is called by the returned function. + + Returns: + a compatible function, which conducts the actions of `func` but can + be called like `op`, given that: + - the list of required arguments in `func` and `op` are the same. + - there is no override of the default arguments of `op` that are not + supported by `func`. + """ + op_signature = _remove_annotation(tf_inspect.signature(op)) + func_signature = _remove_annotation(tf_inspect.signature(func)) + + # Identitical signatures, no need to apply compatibility fixes. + if op_signature == func_signature: + return func + + # When calling func: + # - Positional args without default must be in the same order. + # - Ignore missing optional arguments from op + + op_pos_names = _get_required_param_names(op_signature) + func_pos_names = _get_required_param_names(func_signature) + + if op_pos_names != func_pos_names: + raise AssertionError( + "The decorated function's non-default arguments must be identical" + " to that of the overridden op." + f" func has {func_pos_names}. op has {op_pos_names}." + ) + + func_missing_params = {} + + for name in set(op_signature.parameters.keys()) - set( + func_signature.parameters.keys() + ): + p = op_signature.parameters[name] + if p.default is p.empty: + raise AssertionError( + "The decorated function's signature must implement all of the" + f" non-default arguments of the overridden op. Argument `{name}` is" + " unimplemented." + ) + func_missing_params[name] = p + + def compatible_func(*args, **kwargs): + bound = op_signature.bind(*args, **kwargs) + for name, param in func_missing_params.items(): + if name not in bound.arguments: + continue + value = bound.arguments.pop(name) + if value is not param.default: + raise AssertionError( + f"Dispatched op is called with argument `{name}` set to a" + " non-default value, which is not supported by the decorated" + " function" + ) + return func(*bound.args, **bound.kwargs) + + return compatible_func + + +# pylint: disable=g-doc-return-or-yield +def dispatch_for_types(op, *types): + """Decorator to declare that a Python function overrides an op for a type. + + The decorated function is used to override `op` if any of the arguments or + keyword arguments (including elements of lists or tuples) have one of the + specified types. + + Example: + + ```python + @dispatch_for_types(math_ops.add, RaggedTensor, RaggedTensorValue) + def ragged_add(x, y, name=None): ... + ``` + + Args: + op: Python function: the operation that should be overridden. + *types: The argument types for which this function should be used. + """ + + def decorator(func): + + _TypeBasedDispatcher(get_compatible_func(op, func), types).register(op) + return func + + return decorator + + +# pylint: enable=g-doc-return-or-yield + + +def add_fallback_dispatch_list(target): + """Decorator that adds a dispatch_list attribute to an op.""" + if hasattr(target, FALLBACK_DISPATCH_ATTR): + raise AssertionError("%s already has a dispatch list" % target) + setattr(target, FALLBACK_DISPATCH_ATTR, []) + return target + + +# Alias for backwards-compatibility. +add_dispatch_list = add_fallback_dispatch_list + + +################################################################################ +# Type-based Dispatch +################################################################################ + + +@tf_export("experimental.dispatch_for_api") +def dispatch_for_api(api, *signatures): + """Decorator that overrides the default implementation for a TensorFlow API. + + The decorated function (known as the "dispatch target") will override the + default implementation for the API when the API is called with parameters that + match a specified type signature. Signatures are specified using dictionaries + that map parameter names to type annotations. E.g., in the following example, + `masked_add` will be called for `tf.add` if both `x` and `y` are + `MaskedTensor`s: + + >>> class MaskedTensor(tf.experimental.ExtensionType): + ... values: tf.Tensor + ... mask: tf.Tensor + + >>> @dispatch_for_api(tf.math.add, {'x': MaskedTensor, 'y': MaskedTensor}) + ... def masked_add(x, y, name=None): + ... return MaskedTensor(x.values + y.values, x.mask & y.mask) + + >>> mt = tf.add(MaskedTensor([1, 2], [True, False]), MaskedTensor(10, True)) + >>> print(f"values={mt.values.numpy()}, mask={mt.mask.numpy()}") + values=[11 12], mask=[ True False] + + If multiple type signatures are specified, then the dispatch target will be + called if any of the signatures match. For example, the following code + registers `masked_add` to be called if `x` is a `MaskedTensor` *or* `y` is + a `MaskedTensor`. + + >>> @dispatch_for_api(tf.math.add, {'x': MaskedTensor}, {'y':MaskedTensor}) + ... def masked_add(x, y): + ... x_values = x.values if isinstance(x, MaskedTensor) else x + ... x_mask = x.mask if isinstance(x, MaskedTensor) else True + ... y_values = y.values if isinstance(y, MaskedTensor) else y + ... y_mask = y.mask if isinstance(y, MaskedTensor) else True + ... return MaskedTensor(x_values + y_values, x_mask & y_mask) + + The type annotations in type signatures may be type objects (e.g., + `MaskedTensor`), `typing.List` values, or `typing.Union` values. For + example, the following will register `masked_concat` to be called if `values` + is a list of `MaskedTensor` values: + + >>> @dispatch_for_api(tf.concat, {'values': typing.List[MaskedTensor]}) + ... def masked_concat(values, axis): + ... return MaskedTensor(tf.concat([v.values for v in values], axis), + ... tf.concat([v.mask for v in values], axis)) + + Each type signature must contain at least one subclass of `tf.CompositeTensor` + (which includes subclasses of `tf.ExtensionType`), and dispatch will only be + triggered if at least one type-annotated parameter contains a + `CompositeTensor` value. This rule avoids invoking dispatch in degenerate + cases, such as the following examples: + + * `@dispatch_for_api(tf.concat, {'values': List[MaskedTensor]})`: Will not + dispatch to the decorated dispatch target when the user calls + `tf.concat([])`. + + * `@dispatch_for_api(tf.add, {'x': Union[MaskedTensor, Tensor], 'y': + Union[MaskedTensor, Tensor]})`: Will not dispatch to the decorated dispatch + target when the user calls `tf.add(tf.constant(1), tf.constant(2))`. + + The dispatch target's signature must match the signature of the API that is + being overridden. In particular, parameters must have the same names, and + must occur in the same order. The dispatch target may optionally elide the + "name" parameter, in which case it will be wrapped with a call to + `tf.name_scope` when appropraite. + + Args: + api: The TensorFlow API to override. + *signatures: Dictionaries mapping parameter names or indices to type + annotations, specifying when the dispatch target should be called. In + particular, the dispatch target will be called if any signature matches; + and a signature matches if all of the specified parameters have types that + match with the indicated type annotations. If no signatures are + specified, then a signature will be read from the dispatch target + function's type annotations. + + Returns: + A decorator that overrides the default implementation for `api`. + + #### Registered APIs + + The TensorFlow APIs that may be overridden by `@dispatch_for_api` are: + + <> + """ + dispatcher = getattr(api, TYPE_BASED_DISPATCH_ATTR, None) + if dispatcher is None: + raise ValueError(f"{api} does not support dispatch.") + + api_signature = tf_inspect.signature(api) + signature_checkers = [ + _make_signature_checker(api_signature, signature) + for signature in signatures + ] + + def decorator(dispatch_target): + """Decorator that registers the given dispatch target.""" + if not callable(dispatch_target): + raise TypeError("Expected dispatch_target to be callable; " + f"got {dispatch_target!r}") + dispatch_target = _add_name_scope_wrapper(dispatch_target, api_signature) + _check_signature(api_signature, dispatch_target) + + for signature_checker in signature_checkers: + dispatcher.Register(signature_checker, dispatch_target) + _TYPE_BASED_DISPATCH_SIGNATURES[api][dispatch_target].extend(signatures) + + if not signature_checkers: + signature = _signature_from_annotations(dispatch_target) + checker = _make_signature_checker(api_signature, signature) + dispatcher.Register(checker, dispatch_target) + _TYPE_BASED_DISPATCH_SIGNATURES[api][dispatch_target].append(signature) + + return dispatch_target + + return decorator + + +# Nested dict mapping `api_func` -> `dispatch_target` -> `List[signature]`, +# which can be used for documentation generation and for improved error messages +# when APIs are called with unsupported types. +_TYPE_BASED_DISPATCH_SIGNATURES = {} + + +def apis_with_type_based_dispatch(): + """Returns a list of TensorFlow APIs that support type-based dispatch.""" + return sorted( + _TYPE_BASED_DISPATCH_SIGNATURES, + key=lambda api: f"{api.__module__}.{api.__name__}") + + +def type_based_dispatch_signatures_for(cls): + """Returns dispatch signatures that have been registered for a given class. + + This function is intended for documentation-generation purposes. + + Args: + cls: The class to search for. Type signatures are searched recursively, so + e.g., if `cls=RaggedTensor`, then information will be returned for all + dispatch targets that have `RaggedTensor` anywhere in their type + annotations (including nested in `typing.Union` or `typing.List`.) + + Returns: + A `dict` mapping `api` -> `signatures`, where `api` is a TensorFlow API + function; and `signatures` is a list of dispatch signatures for `api` + that include `cls`. (Each signature is a dict mapping argument names to + type annotations; see `dispatch_for_api` for more info.) + """ + + def contains_cls(x): + """Returns true if `x` contains `cls`.""" + if isinstance(x, dict): + return any(contains_cls(v) for v in x.values()) + elif x is cls: + return True + elif (type_annotations.is_generic_list(x) or + type_annotations.is_generic_union(x)): + type_args = type_annotations.get_generic_type_args(x) + return any(contains_cls(arg) for arg in type_args) + else: + return False + + result = {} + for api, api_signatures in _TYPE_BASED_DISPATCH_SIGNATURES.items(): + for _, signatures in api_signatures.items(): + filtered = list(filter(contains_cls, signatures)) + if filtered: + result.setdefault(api, []).extend(filtered) + return result + + +# TODO(edloper): Consider using a mechanism like this to automatically add +# the `name` argument to all TensorFlow APIs that are implemented in Python +# (so each Python function doesn't need to do it manually). +def _add_name_scope_wrapper(func, api_signature): + """Wraps `func` to expect a "name" arg, and use it to call `ops.name_scope`. + + If `func` already expects a "name" arg, or if `api_signature` does not + expect a "name" arg, then returns `func` as-is. + + Args: + func: The function to wrap. Signature must match `api_signature` (except + the "name" parameter may be missing. + api_signature: The signature of the original API (used to find the index for + the "name" parameter). + + Returns: + The wrapped function (or the original function if no wrapping is needed). + """ + if "name" not in api_signature.parameters: + return func # no wrapping needed (API has no name parameter). + + func_signature = tf_inspect.signature(func) + func_argspec = tf_inspect.getargspec(func) + if "name" in func_signature.parameters or func_argspec.keywords is not None: + return func # No wrapping needed (already has name parameter). + + name_index = list(api_signature.parameters).index("name") + + def wrapped_func(*args, **kwargs): + if name_index < len(args): + name = args[name_index] + args = args[:name_index] + args[name_index + 1:] + else: + name = kwargs.pop("name", None) + if name is None: + return func(*args, **kwargs) + else: + with ops.name_scope(name): + return func(*args, **kwargs) + + wrapped_func = tf_decorator.make_decorator(func, wrapped_func) + wrapped_func.__signature__ = func_signature.replace( + parameters=(list(func_signature.parameters.values()) + + [api_signature.parameters["name"]])) + del wrapped_func._tf_decorator + return wrapped_func + + +@tf_export("experimental.unregister_dispatch_for") +def unregister_dispatch_for(dispatch_target): + """Unregisters a function that was registered with `@dispatch_for_*`. + + This is primarily intended for testing purposes. + + Example: + + >>> # Define a type and register a dispatcher to override `tf.abs`: + >>> class MyTensor(tf.experimental.ExtensionType): + ... value: tf.Tensor + >>> @tf.experimental.dispatch_for_api(tf.abs) + ... def my_abs(x: MyTensor): + ... return MyTensor(tf.abs(x.value)) + >>> tf.abs(MyTensor(5)) + MyTensor(value=) + + >>> # Unregister the dispatcher, so `tf.abs` no longer calls `my_abs`. + >>> unregister_dispatch_for(my_abs) + >>> tf.abs(MyTensor(5)) + Traceback (most recent call last): + ... + ValueError: Attempt to convert a value ... to a Tensor. + + Args: + dispatch_target: The function to unregister. + + Raises: + ValueError: If `dispatch_target` was not registered using `@dispatch_for`, + `@dispatch_for_unary_elementwise_apis`, or + `@dispatch_for_binary_elementwise_apis`. + """ + found = False + + # Check if dispatch_target registered by `@dispatch_for_api` + for api, signatures in _TYPE_BASED_DISPATCH_SIGNATURES.items(): + if dispatch_target in signatures: + dispatcher = getattr(api, TYPE_BASED_DISPATCH_ATTR) + dispatcher.Unregister(dispatch_target) + del signatures[dispatch_target] + found = True + + # Check if dispatch_target registered by `@dispatch_for_*_elementwise_apis` + elementwise_keys_to_delete = [ + key for (key, handler) in _ELEMENTWISE_API_HANDLERS.items() + if handler is dispatch_target + ] + for key in set(elementwise_keys_to_delete): + for _, target in _ELEMENTWISE_API_TARGETS[key]: + unregister_dispatch_for(target) + del _ELEMENTWISE_API_HANDLERS[key] + del _ELEMENTWISE_API_TARGETS[key] + found = True + + if not found: + raise ValueError(f"Function {dispatch_target} was not registered using " + "a `@dispatch_for_*` decorator.") + + +def register_dispatchable_type(cls): + """Class decorator that registers a type for use with type-based dispatch. + + Should *not* be used with subclasses of `CompositeTensor` or `ExtensionType` + (which are automatically registered). + + Note: this function is intended to support internal legacy use cases (such + as RaggedTensorValue), and will probably not be exposed as a public API. + + Args: + cls: The class to register. + + Returns: + `cls`. + """ + _api_dispatcher.register_dispatchable_type(cls) + return cls + + +def add_type_based_api_dispatcher(target): + """Adds a PythonAPIDispatcher to the given TensorFlow API function.""" + if hasattr(target, TYPE_BASED_DISPATCH_ATTR): + raise ValueError(f"{target} already has a type-based API dispatcher.") + + _, unwrapped = tf_decorator.unwrap(target) + target_argspec = tf_inspect.getargspec(unwrapped) + if target_argspec.varargs or target_argspec.keywords: + # @TODO(b/194903203) Add v2 dispatch support for APIs that take varargs + # and keywords. Examples of APIs that take varargs and kwargs: meshgrid, + # einsum, map_values, map_flat_values. + return target + + setattr( + target, TYPE_BASED_DISPATCH_ATTR, + _api_dispatcher.PythonAPIDispatcher(unwrapped.__name__, + target_argspec.args, + target_argspec.defaults)) + _TYPE_BASED_DISPATCH_SIGNATURES[target] = collections.defaultdict(list) + return target + + +def _check_signature(api_signature, func): + """Checks that a dispatch target's signature is compatible with an API. + + Args: + api_signature: The signature of the TensorFlow API. + func: The dispatch target. + + Raises: + ValueError: if the signatures are incompatible. Two signatures are + considered compatible if they have the same number of parameters, and all + corresponding parameters have the same `name` and `kind`. (Parameters + are not required to have the same default value or the same annotation.) + """ + # Special case: if func_signature is (*args, **kwargs), then assume it's ok. + func_argspec = tf_inspect.getargspec(func) + if (func_argspec.varargs is not None and func_argspec.keywords is not None + and not func_argspec.args): + return + + func_signature = tf_inspect.signature(func) + ok = len(api_signature.parameters) == len(func_signature.parameters) + if ok: + for param_1, param_2 in zip(api_signature.parameters.values(), + func_signature.parameters.values()): + if (param_1.name != param_2.name) or (param_1.kind != param_2.kind): + ok = False + if not ok: + raise ValueError(f"Dispatch function's signature {func_signature} does " + f"not match API's signature {api_signature}.") + + +def _make_signature_checker(api_signature, signature): + """Builds a PySignatureChecker for the given type signature. + + Args: + api_signature: The `inspect.Signature` of the API whose signature is + being checked. + signature: Dictionary mapping parameter names to type annotations. + + Returns: + A `PySignatureChecker`. + """ + if not (isinstance(signature, dict) and + all(isinstance(k, (str, int)) for k in signature)): + raise TypeError("signatures must be dictionaries mapping parameter names " + "to type annotations.") + checkers = [] + + param_names = list(api_signature.parameters) + for param_name, param_type in signature.items(): + # Convert positional parameters to named parameters. + if (isinstance(param_name, int) and + param_name < len(api_signature.parameters)): + param_name = list(api_signature.parameters.values())[param_name].name + + # Check that the parameter exists, and has an appropriate kind. + param = api_signature.parameters.get(param_name, None) + if param is None: + raise ValueError("signature includes annotation for unknown " + f"parameter {param_name!r}.") + if param.kind not in (tf_inspect.Parameter.POSITIONAL_ONLY, + tf_inspect.Parameter.POSITIONAL_OR_KEYWORD): + raise ValueError("Dispatch currently only supports type annotations " + "for positional parameters; can't handle annotation " + f"for {param.kind!r} parameter {param_name}.") + + checker = make_type_checker(param_type) + index = param_names.index(param_name) + checkers.append((index, checker)) + + return _api_dispatcher.PySignatureChecker(checkers) + + +# Cache for InstanceTypeChecker objects (we only want to create one +# InstanceTypeChecker for each type, since each one uses an internal cache +# to avoid repeated calls back into Python's isinstance). +_is_instance_checker_cache = {} + + +def make_type_checker(annotation): + """Builds a PyTypeChecker for the given type annotation.""" + if type_annotations.is_generic_union(annotation): + type_args = type_annotations.get_generic_type_args(annotation) + + # If the union contains two or more simple types, then use a single + # InstanceChecker to check them. + simple_types = [t for t in type_args if isinstance(t, type)] + simple_types = tuple(sorted(simple_types, key=id)) + if len(simple_types) > 1: + if simple_types not in _is_instance_checker_cache: + checker = _api_dispatcher.MakeInstanceChecker(*simple_types) + _is_instance_checker_cache[simple_types] = checker + options = ([_is_instance_checker_cache[simple_types]] + + [make_type_checker(t) for t in type_args + if not isinstance(t, type)]) + return _api_dispatcher.MakeUnionChecker(options) + + options = [make_type_checker(t) for t in type_args] + return _api_dispatcher.MakeUnionChecker(options) + + elif type_annotations.is_generic_list(annotation): + type_args = type_annotations.get_generic_type_args(annotation) + if len(type_args) != 1: + raise AssertionError("Expected List[...] to have a single type parameter") + elt_type = make_type_checker(type_args[0]) + return _api_dispatcher.MakeListChecker(elt_type) + + elif isinstance(annotation, type): + if annotation not in _is_instance_checker_cache: + checker = _api_dispatcher.MakeInstanceChecker(annotation) + _is_instance_checker_cache[annotation] = checker + return _is_instance_checker_cache[annotation] + + elif annotation is None: + return make_type_checker(type(None)) + + else: + raise ValueError(f"Type annotation {annotation} is not currently supported" + " by dispatch. Supported annotations: type objects, " + " List[...], and Union[...]") + + +def _signature_from_annotations(func): + """Builds a dict mapping from parameter names to type annotations.""" + func_signature = tf_inspect.signature(func) + + signature = dict([(name, param.annotation) + for (name, param) in func_signature.parameters.items() + if param.annotation != tf_inspect.Parameter.empty]) + if not signature: + raise ValueError("The dispatch_for_api decorator must be called with at " + "least one signature, or applied to a function that " + "has type annotations on its parameters.") + return signature + + +# Registries for elementwise APIs and API handlers. +# +# _*_ELEMENTWISE_APIS: A list of TensorFlow APIs that have been registered +# as elementwise operations using the `register_*_elementwise_api` +# decorators. +# +# _ELEMENTWISE_API_HANDLERS: Dicts mapping from argument type(s) to API +# handlers that have been registered with the `dispatch_for_*_elementwise_apis` +# decorators. +# +# _ELEMENTWISE_API_TARGETS: Dict mapping from argument type(s) to lists of +# `(api, dispatch_target)` pairs. Used to impelement +# `unregister_elementwise_api_handler`. +_UNARY_ELEMENTWISE_APIS = [] +_BINARY_ELEMENTWISE_APIS = [] +_BINARY_ELEMENTWISE_ASSERT_APIS = [] +_ELEMENTWISE_API_HANDLERS = {} +_ELEMENTWISE_API_TARGETS = {} + +_ASSERT_API_TAG = "ASSERT_API_TAG" + + +@tf_export("experimental.dispatch_for_unary_elementwise_apis") +def dispatch_for_unary_elementwise_apis(x_type): + """Decorator to override default implementation for unary elementwise APIs. + + The decorated function (known as the "elementwise api handler") overrides + the default implementation for any unary elementwise API whenever the value + for the first argument (typically named `x`) matches the type annotation + `x_type`. The elementwise api handler is called with two arguments: + + `elementwise_api_handler(api_func, x)` + + Where `api_func` is a function that takes a single parameter and performs the + elementwise operation (e.g., `tf.abs`), and `x` is the first argument to the + elementwise api. + + The following example shows how this decorator can be used to update all + unary elementwise operations to handle a `MaskedTensor` type: + + >>> class MaskedTensor(tf.experimental.ExtensionType): + ... values: tf.Tensor + ... mask: tf.Tensor + >>> @dispatch_for_unary_elementwise_apis(MaskedTensor) + ... def unary_elementwise_api_handler(api_func, x): + ... return MaskedTensor(api_func(x.values), x.mask) + >>> mt = MaskedTensor([1, -2, -3], [True, False, True]) + >>> abs_mt = tf.abs(mt) + >>> print(f"values={abs_mt.values.numpy()}, mask={abs_mt.mask.numpy()}") + values=[1 2 3], mask=[ True False True] + + For unary elementwise operations that take extra arguments beyond `x`, those + arguments are *not* passed to the elementwise api handler, but are + automatically added when `api_func` is called. E.g., in the following + example, the `dtype` parameter is not passed to + `unary_elementwise_api_handler`, but is added by `api_func`. + + >>> ones_mt = tf.ones_like(mt, dtype=tf.float32) + >>> print(f"values={ones_mt.values.numpy()}, mask={ones_mt.mask.numpy()}") + values=[1.0 1.0 1.0], mask=[ True False True] + + Args: + x_type: A type annotation indicating when the api handler should be called. + See `dispatch_for_api` for a list of supported annotation types. + + Returns: + A decorator. + + #### Registered APIs + + The unary elementwise APIs are: + + <> + """ + + def decorator(handler): + if (x_type,) in _ELEMENTWISE_API_HANDLERS: + raise ValueError("A unary elementwise dispatch handler " + f"({_ELEMENTWISE_API_HANDLERS[(x_type,)]}) " + f"has already been registered for {x_type}.") + _ELEMENTWISE_API_HANDLERS[(x_type,)] = handler + for api in _UNARY_ELEMENTWISE_APIS: + _add_dispatch_for_unary_elementwise_api(api, x_type, handler) + + return handler + + return decorator + + +@tf_export("experimental.dispatch_for_binary_elementwise_apis") +def dispatch_for_binary_elementwise_apis(x_type, y_type): + """Decorator to override default implementation for binary elementwise APIs. + + The decorated function (known as the "elementwise api handler") overrides + the default implementation for any binary elementwise API whenever the value + for the first two arguments (typically named `x` and `y`) match the specified + type annotations. The elementwise api handler is called with two arguments: + + `elementwise_api_handler(api_func, x, y)` + + Where `x` and `y` are the first two arguments to the elementwise api, and + `api_func` is a TensorFlow function that takes two parameters and performs the + elementwise operation (e.g., `tf.add`). + + The following example shows how this decorator can be used to update all + binary elementwise operations to handle a `MaskedTensor` type: + + >>> class MaskedTensor(tf.experimental.ExtensionType): + ... values: tf.Tensor + ... mask: tf.Tensor + >>> @dispatch_for_binary_elementwise_apis(MaskedTensor, MaskedTensor) + ... def binary_elementwise_api_handler(api_func, x, y): + ... return MaskedTensor(api_func(x.values, y.values), x.mask & y.mask) + >>> a = MaskedTensor([1, 2, 3, 4, 5], [True, True, True, True, False]) + >>> b = MaskedTensor([2, 4, 6, 8, 0], [True, True, True, False, True]) + >>> c = tf.add(a, b) + >>> print(f"values={c.values.numpy()}, mask={c.mask.numpy()}") + values=[ 3 6 9 12 5], mask=[ True True True False False] + + Args: + x_type: A type annotation indicating when the api handler should be called. + y_type: A type annotation indicating when the api handler should be called. + + Returns: + A decorator. + + #### Registered APIs + + The binary elementwise APIs are: + + <> + """ + + def decorator(handler): + if (x_type, y_type) in _ELEMENTWISE_API_HANDLERS: + raise ValueError("A binary elementwise dispatch handler " + f"({_ELEMENTWISE_API_HANDLERS[x_type, y_type]}) " + f"has already been registered for ({x_type}, {y_type}).") + _ELEMENTWISE_API_HANDLERS[x_type, y_type] = handler + for api in _BINARY_ELEMENTWISE_APIS: + _add_dispatch_for_binary_elementwise_api(api, x_type, y_type, handler) + + return handler + + return decorator + + +@tf_export("experimental.dispatch_for_binary_elementwise_assert_apis") +def dispatch_for_binary_elementwise_assert_apis(x_type, y_type): + """Decorator to override default implementation for binary elementwise assert APIs. + + The decorated function (known as the "elementwise assert handler") + overrides the default implementation for any binary elementwise assert API + whenever the value for the first two arguments (typically named `x` and `y`) + match the specified type annotations. The handler is called with two + arguments: + + `elementwise_assert_handler(assert_func, x, y)` + + Where `x` and `y` are the first two arguments to the binary elementwise assert + operation, and `assert_func` is a TensorFlow function that takes two + parameters and performs the elementwise assert operation (e.g., + `tf.debugging.assert_equal`). + + The following example shows how this decorator can be used to update all + binary elementwise assert operations to handle a `MaskedTensor` type: + + >>> class MaskedTensor(tf.experimental.ExtensionType): + ... values: tf.Tensor + ... mask: tf.Tensor + >>> @dispatch_for_binary_elementwise_assert_apis(MaskedTensor, MaskedTensor) + ... def binary_elementwise_assert_api_handler(assert_func, x, y): + ... merged_mask = tf.logical_and(x.mask, y.mask) + ... selected_x_values = tf.boolean_mask(x.values, merged_mask) + ... selected_y_values = tf.boolean_mask(y.values, merged_mask) + ... assert_func(selected_x_values, selected_y_values) + >>> a = MaskedTensor([1, 1, 0, 1, 1], [False, False, True, True, True]) + >>> b = MaskedTensor([2, 2, 0, 2, 2], [True, True, True, False, False]) + >>> tf.debugging.assert_equal(a, b) # assert passed; no exception was thrown + + >>> a = MaskedTensor([1, 1, 1, 1, 1], [True, True, True, True, True]) + >>> b = MaskedTensor([0, 0, 0, 0, 2], [True, True, True, True, True]) + >>> tf.debugging.assert_greater(a, b) + Traceback (most recent call last): + ... + InvalidArgumentError: Condition x > y did not hold. + + Args: + x_type: A type annotation indicating when the api handler should be called. + y_type: A type annotation indicating when the api handler should be called. + + Returns: + A decorator. + + #### Registered APIs + + The binary elementwise assert APIs are: + + <> + """ + + def decorator(handler): + api_handler_key = (x_type, y_type, _ASSERT_API_TAG) + if api_handler_key in _ELEMENTWISE_API_HANDLERS: + raise ValueError("A binary elementwise assert dispatch handler " + f"({_ELEMENTWISE_API_HANDLERS[api_handler_key]}) " + f"has already been registered for ({x_type}, {y_type}).") + _ELEMENTWISE_API_HANDLERS[api_handler_key] = handler + for api in _BINARY_ELEMENTWISE_ASSERT_APIS: + _add_dispatch_for_binary_elementwise_api(api, x_type, y_type, handler) + + return handler + + return decorator + + +def register_unary_elementwise_api(func): + """Decorator that registers a TensorFlow op as a unary elementwise API.""" + _UNARY_ELEMENTWISE_APIS.append(func) + for args, handler in _ELEMENTWISE_API_HANDLERS.items(): + if len(args) == 1: + _add_dispatch_for_unary_elementwise_api(func, args[0], handler) + return func + + +def register_binary_elementwise_api(func): + """Decorator that registers a TensorFlow op as a binary elementwise API.""" + _BINARY_ELEMENTWISE_APIS.append(func) + for args, handler in _ELEMENTWISE_API_HANDLERS.items(): + if len(args) == 2: + _add_dispatch_for_binary_elementwise_api(func, args[0], args[1], handler) + return func + + +def register_binary_elementwise_assert_api(func): + """Decorator that registers a TensorFlow op as a binary elementwise assert API. + + Different from `dispatch_for_binary_elementwise_apis`, this decorator is used + for assert apis, such as assert_equal, assert_none_equal, etc, which return + None in eager mode and an op in graph mode. + + Args: + func: The function that implements the binary elementwise assert API. + + Returns: + `func` + """ + _BINARY_ELEMENTWISE_ASSERT_APIS.append(func) + for args, handler in _ELEMENTWISE_API_HANDLERS.items(): + if len(args) == 3 and args[2] is _ASSERT_API_TAG: + _add_dispatch_for_binary_elementwise_api(func, args[0], args[1], handler) + return func + + +def unary_elementwise_apis(): + """Returns a list of APIs that have been registered as unary elementwise.""" + return tuple(_UNARY_ELEMENTWISE_APIS) + + +def binary_elementwise_apis(): + """Returns a list of APIs that have been registered as binary elementwise.""" + return tuple(_BINARY_ELEMENTWISE_APIS) + + +def _add_dispatch_for_unary_elementwise_api(api, x_type, + elementwise_api_handler): + """Registers a unary elementwise handler as a dispatcher for a given API.""" + api_signature = tf_inspect.signature(api) + x_name = list(api_signature.parameters)[0] + name_index = _find_name_index(api_signature) + + need_to_bind_api_args = ( + len(api_signature.parameters) > 2 or + "name" not in api_signature.parameters) + + @dispatch_for_api(api, {x_name: x_type}) + def dispatch_target(*args, **kwargs): + args, kwargs, name = _extract_name_arg(args, kwargs, name_index) + if args: + x, args = args[0], args[1:] + else: + x = kwargs.pop(x_name) + + if need_to_bind_api_args: + tensor_api = lambda v: api(v, *args, **kwargs) + else: + tensor_api = api + + if name is None: + return elementwise_api_handler(tensor_api, x) + else: + with ops.name_scope(name, None, [x]): + return elementwise_api_handler(tensor_api, x) + + dispatch_target.__name__ = "elementwise_dispatch_target_for_" + api.__name__ + dispatch_target.__qualname__ = dispatch_target.__name__ + # Keep track of what targets we've registered (so we can unregister them). + target_list = _ELEMENTWISE_API_TARGETS.setdefault((x_type,), []) + target_list.append((api, dispatch_target)) + + +def _add_dispatch_for_binary_elementwise_api(api, x_type, y_type, + elementwise_api_handler): + """Registers a binary elementwise handler as a dispatcher for a given API.""" + api_signature = tf_inspect.signature(api) + x_name, y_name = list(api_signature.parameters)[:2] + name_index = _find_name_index(api_signature) + + need_to_bind_api_args = (len(api_signature.parameters) > 3 or + "name" not in api_signature.parameters) + + @dispatch_for_api(api, {x_name: x_type, y_name: y_type}) + def dispatch_target(*args, **kwargs): + args, kwargs, name = _extract_name_arg(args, kwargs, name_index) + if len(args) > 1: + x, y, args = args[0], args[1], args[2:] + elif args: + x, args = args[0], args[1:] + y = kwargs.pop(y_name, None) + else: + x = kwargs.pop(x_name, None) + y = kwargs.pop(y_name, None) + + if need_to_bind_api_args: + tensor_api = lambda v1, v2: api(v1, v2, *args, **kwargs) + else: + tensor_api = api + + if name is None: + return elementwise_api_handler(tensor_api, x, y) + else: + with ops.name_scope(name, None, [x, y]): + return elementwise_api_handler(tensor_api, x, y) + + dispatch_target.__name__ = "elementwise_dispatch_target_for_" + api.__name__ + dispatch_target.__qualname__ = dispatch_target.__name__ + # Keep track of what targets we've registered (so we can unregister them). + target_list = _ELEMENTWISE_API_TARGETS.setdefault((x_type, y_type), []) + target_list.append((api, dispatch_target)) + + +def _find_name_index(signature): + """Returns the index of the `name` parameter, or -1 if it's not present.""" + try: + return list(signature.parameters).index("name") + except ValueError: + return -1 + + +def _extract_name_arg(args, kwargs, name_index): + """Extracts the parameter `name` and returns `(args, kwargs, name_value)`.""" + if name_index < 0: + name_value = None + elif name_index < len(args): + name_value = args[name_index] + args = args[:name_index] + args[name_index + 1:] + else: + name_value = kwargs.pop("name", None) + return args, kwargs, name_value + + +def update_docstrings_with_api_lists(): + """Updates the docstrings of dispatch decorators with API lists. + + Updates docstrings for `dispatch_for_api`, + `dispatch_for_unary_elementwise_apis`, and + `dispatch_for_binary_elementwise_apis`, by replacing the string '<>' + with a list of APIs that have been registered for that decorator. + """ + _update_docstring_with_api_list(dispatch_for_unary_elementwise_apis, + _UNARY_ELEMENTWISE_APIS) + _update_docstring_with_api_list(dispatch_for_binary_elementwise_apis, + _BINARY_ELEMENTWISE_APIS) + _update_docstring_with_api_list(dispatch_for_binary_elementwise_assert_apis, + _BINARY_ELEMENTWISE_ASSERT_APIS) + _update_docstring_with_api_list(dispatch_for_api, + _TYPE_BASED_DISPATCH_SIGNATURES) + + +def _update_docstring_with_api_list(target, api_list): + """Replaces `<>` in target.__doc__ with the given list of APIs.""" + lines = [] + for func in api_list: + name = tf_export_lib.get_canonical_name_for_symbol( + func, add_prefix_to_v1_names=True) + if name is not None: + params = tf_inspect.signature(func).parameters.keys() + lines.append(f" * `tf.{name}({', '.join(params)})`") + lines.sort() + target.__doc__ = target.__doc__.replace(" <>", "\n".join(lines)) + + +################################################################################ +# Dispatch Support +################################################################################ +@tf_export("__internal__.dispatch.add_dispatch_support", v1=[]) +def add_dispatch_support(target=None, iterable_parameters=None): + """Decorator that adds a dispatch handling wrapper to a TensorFlow Python API. + + This wrapper adds the decorated function as an API that can be overridden + using the `@dispatch_for_api` decorator. In the following example, we first + define a new API (`double`) that supports dispatch, then define a custom type + (`MaskedTensor`) and finally use `dispatch_for_api` to override the default + implementation of `double` when called with `MaskedTensor` values: + + >>> @add_dispatch_support + ... def double(x): + ... return x * 2 + >>> class MaskedTensor(tf.experimental.ExtensionType): + ... values: tf.Tensor + ... mask: tf.Tensor + >>> @dispatch_for_api(double, {'x': MaskedTensor}) + ... def masked_double(x): + ... return MaskedTensor(x.values * 2, y.mask) + + The optional `iterable_parameter` argument can be used to mark parameters that + can take arbitrary iterable values (such as generator expressions). These + need to be handled specially during dispatch, since just iterating over an + iterable uses up its values. In the following example, we define a new API + whose second argument can be an iterable value; and then override the default + implementatio of that API when the iterable contains MaskedTensors: + + >>> @add_dispatch_support(iterable_parameters=['ys']) + ... def add_tensor_to_list_of_tensors(x, ys): + ... return [x + y for y in ys] + >>> @dispatch_for_api(add_tensor_to_list_of_tensors, + ... {'ys': typing.List[MaskedTensor]}) + ... def masked_add_tensor_to_list_of_tensors(x, ys): + ... return [MaskedTensor(x+y.values, y.mask) for y in ys] + + (Note: the only TensorFlow API that currently supports iterables is `add_n`.) + + Args: + target: The TensorFlow API that should support dispatch. + iterable_parameters: Optional list of parameter names that may be called + with iterables (such as the `inputs` parameter for `tf.add_n`). + + Returns: + A decorator. + """ + + if not (iterable_parameters is None or + (isinstance(iterable_parameters, (list, tuple)) and + all(isinstance(p, str) for p in iterable_parameters))): + raise TypeError("iterable_parameters should be a list or tuple of string.") + + def decorator(dispatch_target): + + # Get the name & index for each iterable parameter. + if iterable_parameters is None: + iterable_params = None + else: + arg_names = tf_inspect.getargspec(dispatch_target).args + iterable_params = [ + (name, arg_names.index(name)) for name in iterable_parameters + ] + + @traceback_utils.filter_traceback + def op_dispatch_handler(*args, **kwargs): + """Call `dispatch_target`, peforming dispatch when appropriate.""" + + # Type-based dispatch system (dispatch v2): + if api_dispatcher is not None: + if iterable_params is not None: + args, kwargs = replace_iterable_params(args, kwargs, iterable_params) + result = api_dispatcher.Dispatch(args, kwargs) + if result is not NotImplemented: + return result + + # Fallback dispatch system (dispatch v1): + try: + return dispatch_target(*args, **kwargs) + except (TypeError, ValueError): + # Note: convert_to_eager_tensor currently raises a ValueError, not a + # TypeError, when given unexpected types. So we need to catch both. + result = dispatch(op_dispatch_handler, args, kwargs) + if result is not OpDispatcher.NOT_SUPPORTED: + return result + else: + raise + + add_fallback_dispatch_list(op_dispatch_handler) + op_dispatch_handler = tf_decorator.make_decorator(dispatch_target, + op_dispatch_handler) + add_type_based_api_dispatcher(op_dispatch_handler) + api_dispatcher = getattr(op_dispatch_handler, TYPE_BASED_DISPATCH_ATTR, + None) + return op_dispatch_handler + + if target is None: + return decorator + else: + return decorator(target) + + +def replace_iterable_params(args, kwargs, iterable_params): + """Returns (args, kwargs) with any iterable parameters converted to lists. + + Args: + args: Positional rguments to a function + kwargs: Keyword arguments to a function. + iterable_params: A list of (name, index) tuples for iterable parameters. + + Returns: + A tuple (args, kwargs), where any positional or keyword parameters in + `iterable_params` have their value converted to a `list`. + """ + args = list(args) + for name, index in iterable_params: + if index < len(args): + args[index] = list(args[index]) + elif name in kwargs: + kwargs[name] = list(kwargs[name]) + return tuple(args), kwargs diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/example_parser_configuration.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/example_parser_configuration.py new file mode 100644 index 0000000000000000000000000000000000000000..fbbc0e66169971ee921cf682adb9c2524fe14f30 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/example_parser_configuration.py @@ -0,0 +1,206 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Extract parse_example op configuration to a proto.""" + +from tensorflow.core.example import example_parser_configuration_pb2 +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util + + +def extract_example_parser_configuration(parse_example_op, sess): + """Returns an ExampleParserConfig proto. + + Args: + parse_example_op: A ParseExample or ParseExampleV2 `Operation` + sess: A tf.compat.v1.Session needed to obtain some configuration values. + Returns: + A ExampleParserConfig proto. + + Raises: + ValueError: If attributes are inconsistent. + """ + if parse_example_op.type == "ParseExample": + return _extract_from_parse_example(parse_example_op, sess) + elif parse_example_op.type == "ParseExampleV2": + return _extract_from_parse_example_v2(parse_example_op, sess) + else: + raise ValueError( + "Found unexpected type when parsing example. Expected `ParseExample` " + f"object. Received type: {parse_example_op.type}") + + +def _extract_from_parse_example(parse_example_op, sess): + """Extract ExampleParserConfig from ParseExample op.""" + config = example_parser_configuration_pb2.ExampleParserConfiguration() + + num_sparse = parse_example_op.get_attr("Nsparse") + num_dense = parse_example_op.get_attr("Ndense") + total_features = num_dense + num_sparse + + sparse_types = parse_example_op.get_attr("sparse_types") + dense_types = parse_example_op.get_attr("Tdense") + dense_shapes = parse_example_op.get_attr("dense_shapes") + + if len(sparse_types) != num_sparse: + raise ValueError("len(sparse_types) attribute does not match " + "Nsparse attribute (%d vs %d)" % + (len(sparse_types), num_sparse)) + + if len(dense_types) != num_dense: + raise ValueError("len(dense_types) attribute does not match " + "Ndense attribute (%d vs %d)" % + (len(dense_types), num_dense)) + + if len(dense_shapes) != num_dense: + raise ValueError("len(dense_shapes) attribute does not match " + "Ndense attribute (%d vs %d)" % + (len(dense_shapes), num_dense)) + + # Skip over the serialized input, and the names input. + fetch_list = parse_example_op.inputs[2:] + + # Fetch total_features key names and num_dense default values. + if len(fetch_list) != (total_features + num_dense): + raise ValueError("len(fetch_list) does not match total features + " + "num_dense (%d vs %d)" % + (len(fetch_list), (total_features + num_dense))) + + fetched = sess.run(fetch_list) + + if len(fetched) != len(fetch_list): + raise ValueError("len(fetched) does not match len(fetch_list) " + "(%d vs %d)" % (len(fetched), len(fetch_list))) + + # Fetch indices. + sparse_keys_start = 0 + dense_keys_start = sparse_keys_start + num_sparse + dense_def_start = dense_keys_start + num_dense + + # Output tensor indices. + sparse_indices_start = 0 + sparse_values_start = num_sparse + sparse_shapes_start = sparse_values_start + num_sparse + dense_values_start = sparse_shapes_start + num_sparse + + # Dense features. + for i in range(num_dense): + key = fetched[dense_keys_start + i] + feature_config = config.feature_map[key] + # Convert the default value numpy array fetched from the session run + # into a TensorProto. + fixed_config = feature_config.fixed_len_feature + + fixed_config.default_value.CopyFrom( + tensor_util.make_tensor_proto(fetched[dense_def_start + i])) + # Convert the shape from the attributes + # into a TensorShapeProto. + fixed_config.shape.CopyFrom( + tensor_shape.TensorShape(dense_shapes[i]).as_proto()) + + fixed_config.dtype = dense_types[i].as_datatype_enum + # Get the output tensor name. + fixed_config.values_output_tensor_name = parse_example_op.outputs[ + dense_values_start + i].name + + # Sparse features. + for i in range(num_sparse): + key = fetched[sparse_keys_start + i] + feature_config = config.feature_map[key] + var_len_feature = feature_config.var_len_feature + var_len_feature.dtype = sparse_types[i].as_datatype_enum + var_len_feature.indices_output_tensor_name = parse_example_op.outputs[ + sparse_indices_start + i].name + var_len_feature.values_output_tensor_name = parse_example_op.outputs[ + sparse_values_start + i].name + var_len_feature.shapes_output_tensor_name = parse_example_op.outputs[ + sparse_shapes_start + i].name + + return config + + +def _extract_from_parse_example_v2(parse_example_op, sess): + """Extract ExampleParserConfig from ParseExampleV2 op.""" + config = example_parser_configuration_pb2.ExampleParserConfiguration() + + dense_types = parse_example_op.get_attr("Tdense") + num_sparse = parse_example_op.get_attr("num_sparse") + sparse_types = parse_example_op.get_attr("sparse_types") + ragged_value_types = parse_example_op.get_attr("ragged_value_types") + ragged_split_types = parse_example_op.get_attr("ragged_split_types") + dense_shapes = parse_example_op.get_attr("dense_shapes") + + num_dense = len(dense_types) + num_ragged = len(ragged_value_types) + assert len(ragged_value_types) == len(ragged_split_types) + assert len(parse_example_op.inputs) == 5 + num_dense + + # Skip over the serialized input, and the names input. + fetched = sess.run(parse_example_op.inputs[2:]) + sparse_keys = fetched[0].tolist() + dense_keys = fetched[1].tolist() + ragged_keys = fetched[2].tolist() + dense_defaults = fetched[3:] + assert len(sparse_keys) == num_sparse + assert len(dense_keys) == num_dense + assert len(ragged_keys) == num_ragged + + # Output tensor indices. + sparse_indices_start = 0 + sparse_values_start = num_sparse + sparse_shapes_start = sparse_values_start + num_sparse + dense_values_start = sparse_shapes_start + num_sparse + ragged_values_start = dense_values_start + num_dense + ragged_row_splits_start = ragged_values_start + num_ragged + + # Dense features. + for i in range(num_dense): + key = dense_keys[i] + feature_config = config.feature_map[key] + # Convert the default value numpy array fetched from the session run + # into a TensorProto. + fixed_config = feature_config.fixed_len_feature + + fixed_config.default_value.CopyFrom( + tensor_util.make_tensor_proto(dense_defaults[i])) + # Convert the shape from the attributes + # into a TensorShapeProto. + fixed_config.shape.CopyFrom( + tensor_shape.TensorShape(dense_shapes[i]).as_proto()) + + fixed_config.dtype = dense_types[i].as_datatype_enum + # Get the output tensor name. + fixed_config.values_output_tensor_name = parse_example_op.outputs[ + dense_values_start + i].name + + # Sparse features. + for i in range(num_sparse): + key = sparse_keys[i] + feature_config = config.feature_map[key] + var_len_feature = feature_config.var_len_feature + var_len_feature.dtype = sparse_types[i].as_datatype_enum + var_len_feature.indices_output_tensor_name = parse_example_op.outputs[ + sparse_indices_start + i].name + var_len_feature.values_output_tensor_name = parse_example_op.outputs[ + sparse_values_start + i].name + var_len_feature.shapes_output_tensor_name = parse_example_op.outputs[ + sparse_shapes_start + i].name + + if num_ragged != 0: + del ragged_values_start # unused + del ragged_row_splits_start # unused + raise ValueError("Ragged features are not yet supported by " + "example_parser_configuration.proto") + + return config diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/fast_module_type.pyi b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/fast_module_type.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a04c7f7468b5695b2140450c76ca2e02095c94d1 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/fast_module_type.pyi @@ -0,0 +1,16 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +def get_fast_module_type_class() -> object: ... diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/function_utils.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/function_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..743a81343240c1085e0904348c02c3025f81f4cd --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/function_utils.py @@ -0,0 +1,132 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility to retrieve function args.""" + +import functools + +from tensorflow.core.protobuf import config_pb2 +from tensorflow.python.util import tf_decorator +from tensorflow.python.util import tf_inspect + + +def _is_bound_method(fn): + _, fn = tf_decorator.unwrap(fn) + return tf_inspect.ismethod(fn) and (fn.__self__ is not None) + + +def _is_callable_object(obj): + return hasattr(obj, '__call__') and tf_inspect.ismethod(obj.__call__) + + +def fn_args(fn): + """Get argument names for function-like object. + + Args: + fn: Function, or function-like object (e.g., result of `functools.partial`). + + Returns: + `tuple` of string argument names. + + Raises: + ValueError: if partial function has positionally bound arguments + """ + if isinstance(fn, functools.partial): + args = fn_args(fn.func) + args = [a for a in args[len(fn.args):] if a not in (fn.keywords or [])] + else: + if _is_callable_object(fn): + fn = fn.__call__ + args = tf_inspect.getfullargspec(fn).args + if _is_bound_method(fn) and args: + # If it's a bound method, it may or may not have a self/cls first + # argument; for example, self could be captured in *args. + # If it does have a positional argument, it is self/cls. + args.pop(0) + return tuple(args) + + +def has_kwargs(fn): + """Returns whether the passed callable has **kwargs in its signature. + + Args: + fn: Function, or function-like object (e.g., result of `functools.partial`). + + Returns: + `bool`: if `fn` has **kwargs in its signature. + + Raises: + `TypeError`: If fn is not a Function, or function-like object. + """ + if isinstance(fn, functools.partial): + fn = fn.func + elif _is_callable_object(fn): + fn = fn.__call__ + elif not callable(fn): + raise TypeError( + 'Argument `fn` should be a callable. ' + f'Received: fn={fn} (of type {type(fn)})') + return tf_inspect.getfullargspec(fn).varkw is not None + + +def get_func_name(func): + """Returns name of passed callable.""" + _, func = tf_decorator.unwrap(func) + if callable(func): + if tf_inspect.isfunction(func): + return func.__name__ + elif tf_inspect.ismethod(func): + return '%s.%s' % ( + func.__self__.__class__.__name__, + func.__func__.__name__, + ) + else: # Probably a class instance with __call__ + return str(type(func)) + else: + raise ValueError( + 'Argument `func` must be a callable. ' + f'Received func={func} (of type {type(func)})') + + +def get_func_code(func): + """Returns func_code of passed callable, or None if not available.""" + _, func = tf_decorator.unwrap(func) + if callable(func): + if tf_inspect.isfunction(func) or tf_inspect.ismethod(func): + return func.__code__ + # Since the object is not a function or method, but is a callable, we will + # try to access the __call__method as a function. This works with callable + # classes but fails with functool.partial objects despite their __call__ + # attribute. + try: + return func.__call__.__code__ + except AttributeError: + return None + else: + raise ValueError( + 'Argument `func` must be a callable. ' + f'Received func={func} (of type {type(func)})') + + +_rewriter_config_optimizer_disabled = None + + +def get_disabled_rewriter_config(): + global _rewriter_config_optimizer_disabled + if _rewriter_config_optimizer_disabled is None: + config = config_pb2.ConfigProto() + rewriter_config = config.graph_options.rewrite_options + rewriter_config.disable_meta_optimizer = True + _rewriter_config_optimizer_disabled = config.SerializeToString() + return _rewriter_config_optimizer_disabled diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/keras_deps.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/keras_deps.py new file mode 100644 index 0000000000000000000000000000000000000000..8ab710f4b867b48c11245e8764af7ecec7f1b342 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/keras_deps.py @@ -0,0 +1,80 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Interface that provides access to Keras dependencies. + +This library is a common interface that contains Keras functions needed by +TensorFlow and TensorFlow Lite and is required as per the dependency inversion +principle (https://en.wikipedia.org/wiki/Dependency_inversion_principle). As per +this principle, high-level modules (eg: TensorFlow and TensorFlow Lite) should +not depend on low-level modules (eg: Keras) and instead both should depend on a +common interface such as this file. +""" + + +from tensorflow.python.util.tf_export import tf_export + +_KERAS_CALL_CONTEXT_FUNCTION = None +_KERAS_CLEAR_SESSION_FUNCTION = None +_KERAS_GET_SESSION_FUNCTION = None +_KERAS_LOAD_MODEL_FUNCTION = None + +# TODO(b/169898786): Use the Keras public API when TFLite moves out of TF + + +# Register functions +@tf_export('__internal__.register_call_context_function', v1=[]) +def register_call_context_function(func): + global _KERAS_CALL_CONTEXT_FUNCTION + _KERAS_CALL_CONTEXT_FUNCTION = func + + +@tf_export('__internal__.register_clear_session_function', v1=[]) +def register_clear_session_function(func): + global _KERAS_CLEAR_SESSION_FUNCTION + _KERAS_CLEAR_SESSION_FUNCTION = func + + +@tf_export('__internal__.register_get_session_function', v1=[]) +def register_get_session_function(func): + global _KERAS_GET_SESSION_FUNCTION + _KERAS_GET_SESSION_FUNCTION = func + + +@tf_export('__internal__.register_load_model_function', v1=[]) +def register_load_model_function(func): + global _KERAS_LOAD_MODEL_FUNCTION + _KERAS_LOAD_MODEL_FUNCTION = func + + +# Get functions +def get_call_context_function(): + global _KERAS_CALL_CONTEXT_FUNCTION + return _KERAS_CALL_CONTEXT_FUNCTION + + +def get_clear_session_function(): + global _KERAS_CLEAR_SESSION_FUNCTION + return _KERAS_CLEAR_SESSION_FUNCTION + + +def get_get_session_function(): + global _KERAS_GET_SESSION_FUNCTION + return _KERAS_GET_SESSION_FUNCTION + + +def get_load_model_function(): + global _KERAS_LOAD_MODEL_FUNCTION + return _KERAS_LOAD_MODEL_FUNCTION diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/keyword_args.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/keyword_args.py new file mode 100644 index 0000000000000000000000000000000000000000..ddd96b91f9f6699f4e6c49cdcb5dba25f4ff04ad --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/keyword_args.py @@ -0,0 +1,50 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Keyword args functions.""" + +import functools + +from tensorflow.python.util import decorator_utils + + +def keyword_args_only(func): + """Decorator for marking specific function accepting keyword args only. + + This decorator raises a `ValueError` if the input `func` is called with any + non-keyword args. This prevents the caller from providing the arguments in + wrong order. + + Args: + func: The function or method needed to be decorated. + + Returns: + Decorated function or method. + + Raises: + ValueError: If `func` is not callable. + """ + + decorator_utils.validate_callable(func, "keyword_args_only") + @functools.wraps(func) + def new_func(*args, **kwargs): + """Keyword args only wrapper.""" + if args: + raise ValueError( + f"The function {func.__name__} only accepts keyword arguments. " + "Do not pass positional arguments. Received the following positional " + f"arguments: {args}") + return func(**kwargs) + return new_func diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/lazy_loader.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/lazy_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..7d8c186677583fc573b45aa0a674330e41cb5237 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/lazy_loader.py @@ -0,0 +1,224 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""A LazyLoader class.""" + +import importlib +import os +import types +from tensorflow.python.platform import tf_logging as logging + +_TENSORFLOW_LAZY_LOADER_PREFIX = "_tfll" + + +class LazyLoader(types.ModuleType): + """Lazily import a module, mainly to avoid pulling in large dependencies. + + `contrib`, and `ffmpeg` are examples of modules that are large and not always + needed, and this allows them to only be loaded when they are used. + """ + + # The lint error here is incorrect. + def __init__(self, local_name, parent_module_globals, name, warning=None): + self._tfll_local_name = local_name + self._tfll_parent_module_globals = parent_module_globals + self._tfll_warning = warning + + # These members allows doctest correctly process this module member without + # triggering self._load(). self._load() mutates parant_module_globals and + # triggers a dict mutated during iteration error from doctest.py. + # - for from_module() + super().__setattr__("__module__", name.rsplit(".", 1)[0]) + # - for is_routine() + super().__setattr__("__wrapped__", None) + + super().__init__(name) + + def _load(self): + """Load the module and insert it into the parent's globals.""" + # Import the target module and insert it into the parent's namespace + module = importlib.import_module(self.__name__) + self._tfll_parent_module_globals[self._tfll_local_name] = module + + # Emit a warning if one was specified + if self._tfll_warning: + logging.warning(self._tfll_warning) + # Make sure to only warn once. + self._tfll_warning = None + + # Update this object's dict so that if someone keeps a reference to the + # LazyLoader, lookups are efficient (__getattr__ is only called on lookups + # that fail). + self.__dict__.update(module.__dict__) + + return module + + def __getattr__(self, name): + module = self._load() + return getattr(module, name) + + def __setattr__(self, name, value): + if name.startswith(_TENSORFLOW_LAZY_LOADER_PREFIX): + super().__setattr__(name, value) + else: + module = self._load() + setattr(module, name, value) + self.__dict__[name] = value + try: + # check if the module has __all__ + if name not in self.__all__ and name != "__all__": + self.__all__.append(name) + except AttributeError: + pass + + def __delattr__(self, name): + if name.startswith(_TENSORFLOW_LAZY_LOADER_PREFIX): + super().__delattr__(name) + else: + module = self._load() + delattr(module, name) + self.__dict__.pop(name) + try: + # check if the module has __all__ + if name in self.__all__: + self.__all__.remove(name) + except AttributeError: + pass + + def __repr__(self): + # Carefully to not trigger _load, since repr may be called in very + # sensitive places. + return f"" + + def __dir__(self): + module = self._load() + return dir(module) + + def __reduce__(self): + return importlib.import_module, (self.__name__,) + + +class KerasLazyLoader(LazyLoader): + """LazyLoader that handles routing to different Keras version.""" + + def __init__( # pylint: disable=super-init-not-called + self, parent_module_globals, mode=None, submodule=None, name="keras"): + self._tfll_parent_module_globals = parent_module_globals + self._tfll_mode = mode + self._tfll_submodule = submodule + self._tfll_name = name + self._tfll_initialized = False + + def _initialize(self): + """Resolve the Keras version to use and initialize the loader.""" + self._tfll_initialized = True + package_name = None + keras_version = None + if os.environ.get("TF_USE_LEGACY_KERAS", None) in ("true", "True", "1"): + try: + import tf_keras # pylint: disable=g-import-not-at-top,unused-import + + keras_version = "tf_keras" + if self._tfll_mode == "v1": + package_name = "tf_keras.api._v1.keras" + else: + package_name = "tf_keras.api._v2.keras" + except ImportError: + logging.warning( + "Your environment has TF_USE_LEGACY_KERAS set to True, but you " + "do not have the tf_keras package installed. You must install it " + "in order to use the legacy tf.keras. Install it via: " + "`pip install tf_keras`" + ) + else: + try: + import keras # pylint: disable=g-import-not-at-top + + if keras.__version__.startswith("3."): + # This is the Keras 3.x case. + keras_version = "keras_3" + package_name = "keras._tf_keras.keras" + else: + # This is the Keras 2.x case. + keras_version = "keras_2" + if self._tfll_mode == "v1": + package_name = "keras.api._v1.keras" + else: + package_name = "keras.api._v2.keras" + except ImportError: + raise ImportError( # pylint: disable=raise-missing-from + "Keras cannot be imported. Check that it is installed." + ) + + self._tfll_keras_version = keras_version + if keras_version is not None: + if self._tfll_submodule is not None: + package_name += "." + self._tfll_submodule + super().__init__( + self._tfll_name, self._tfll_parent_module_globals, package_name + ) + else: + raise ImportError( # pylint: disable=raise-missing-from + "Keras cannot be imported. Check that it is installed." + ) + + def __getattr__(self, item): + if item in ("_tfll_mode", "_tfll_initialized", "_tfll_name"): + return super(types.ModuleType, self).__getattribute__(item) + if not self._tfll_initialized: + self._initialize() + if self._tfll_keras_version == "keras_3": + if ( + self._tfll_mode == "v1" + and not self._tfll_submodule + and item.startswith("compat.v1.") + ): + raise AttributeError( + "`tf.compat.v1.keras` is not available with Keras 3. Keras 3 has " + "no support for TF 1 APIs. You can install the `tf_keras` package " + "as an alternative, and set the environment variable " + "`TF_USE_LEGACY_KERAS=True` to configure TensorFlow to route " + "`tf.compat.v1.keras` to `tf_keras`." + ) + elif ( + self._tfll_mode == "v2" + and not self._tfll_submodule + and item.startswith("compat.v2.") + ): + raise AttributeError( + "`tf.compat.v2.keras` is not available with Keras 3. Just use " + "`import keras` instead." + ) + elif self._tfll_submodule and self._tfll_submodule.startswith( + "__internal__.legacy." + ): + raise AttributeError( + f"`{item}` is not available with Keras 3." + ) + module = self._load() + return getattr(module, item) + + def __repr__(self): + if self._tfll_initialized: + return ( + f"" + ) + return "" + + def __dir__(self): + if not self._tfll_initialized: + self._initialize() + return super().__dir__() diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/lock_util.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/lock_util.py new file mode 100644 index 0000000000000000000000000000000000000000..6832011e1550931432293cb9c7274ff0be9f1646 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/lock_util.py @@ -0,0 +1,130 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Locking related utils.""" + +import threading + + +class GroupLock(object): + """A lock to allow many members of a group to access a resource exclusively. + + This lock provides a way to allow access to a resource by multiple threads + belonging to a logical group at the same time, while restricting access to + threads from all other groups. You can think of this as an extension of a + reader-writer lock, where you allow multiple writers at the same time. We + made it generic to support multiple groups instead of just two - readers and + writers. + + Simple usage example with two groups accessing the same resource: + + ```python + lock = GroupLock(num_groups=2) + + # In a member of group 0: + with lock.group(0): + # do stuff, access the resource + # ... + + # In a member of group 1: + with lock.group(1): + # do stuff, access the resource + # ... + ``` + + Using as a context manager with `.group(group_id)` is the easiest way. You + can also use the `acquire` and `release` method directly. + """ + + __slots__ = ["_ready", "_num_groups", "_group_member_counts"] + + def __init__(self, num_groups=2): + """Initialize a group lock. + + Args: + num_groups: The number of groups that will be accessing the resource under + consideration. Should be a positive number. + + Returns: + A group lock that can then be used to synchronize code. + + Raises: + ValueError: If num_groups is less than 1. + """ + if num_groups < 1: + raise ValueError( + "Argument `num_groups` must be a positive integer. " + f"Received: num_groups={num_groups}") + self._ready = threading.Condition(threading.Lock()) + self._num_groups = num_groups + self._group_member_counts = [0] * self._num_groups + + def group(self, group_id): + """Enter a context where the lock is with group `group_id`. + + Args: + group_id: The group for which to acquire and release the lock. + + Returns: + A context manager which will acquire the lock for `group_id`. + """ + self._validate_group_id(group_id) + return self._Context(self, group_id) + + def acquire(self, group_id): + """Acquire the group lock for a specific group `group_id`.""" + self._validate_group_id(group_id) + + self._ready.acquire() + while self._another_group_active(group_id): + self._ready.wait() + self._group_member_counts[group_id] += 1 + self._ready.release() + + def release(self, group_id): + """Release the group lock for a specific group `group_id`.""" + self._validate_group_id(group_id) + + self._ready.acquire() + self._group_member_counts[group_id] -= 1 + if self._group_member_counts[group_id] == 0: + self._ready.notify_all() + self._ready.release() + + def _another_group_active(self, group_id): + return any( + c > 0 for g, c in enumerate(self._group_member_counts) if g != group_id) + + def _validate_group_id(self, group_id): + if group_id < 0 or group_id >= self._num_groups: + raise ValueError( + "Argument `group_id` should verify `0 <= group_id < num_groups` " + f"(with `num_groups={self._num_groups}`). " + f"Received: group_id={group_id}") + + class _Context(object): + """Context manager helper for `GroupLock`.""" + + __slots__ = ["_lock", "_group_id"] + + def __init__(self, lock, group_id): + self._lock = lock + self._group_id = group_id + + def __enter__(self): + self._lock.acquire(self._group_id) + + def __exit__(self, type_arg, value_arg, traceback_arg): + del type_arg, value_arg, traceback_arg + self._lock.release(self._group_id) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/nest_util.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/nest_util.py new file mode 100644 index 0000000000000000000000000000000000000000..c53042f7dc11abc5b2ced9ca79f2c7ba329f02b3 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/nest_util.py @@ -0,0 +1,1726 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utility methods for handling nests. + +This module encapsulates different semantics of handling nests by the public +tf.nest APIs and internal tf.data APIs. The difference in semantics exists for +historic reasons and reconciliation would require a non-backwards compatible +change. + +The implementation of the different semantics use a common utility to +avoid / minimize further divergence between the two APIs over time. +""" + +import collections as _collections +import enum + +import wrapt as _wrapt + +from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import +from tensorflow.python.platform import tf_logging +from tensorflow.python.util import _pywrap_utils +from tensorflow.python.util.compat import collections_abc as _collections_abc +from tensorflow.python.util.custom_nest_protocol import CustomNestProtocol + + +_is_mapping_view = _pywrap_utils.IsMappingView +_is_attrs = _pywrap_utils.IsAttrs +_is_composite_tensor = _pywrap_utils.IsCompositeTensor +_is_type_spec = _pywrap_utils.IsTypeSpec +_is_mutable_mapping = _pywrap_utils.IsMutableMapping +_is_mapping = _pywrap_utils.IsMapping +_tf_data_is_nested = _pywrap_utils.IsNestedForData +_tf_data_flatten = _pywrap_utils.FlattenForData +_tf_core_is_nested = _pywrap_utils.IsNested +_is_nested_or_composite = _pywrap_utils.IsNestedOrComposite +# See the swig file (util.i) for documentation. +same_namedtuples = _pywrap_utils.SameNamedtuples + + +STRUCTURES_HAVE_MISMATCHING_TYPES = ( + "The two structures don't have the same sequence type. Input structure has " + "type {input_type}, while shallow structure has type {shallow_type}." +) + +STRUCTURES_HAVE_MISMATCHING_LENGTHS = ( + "The two structures don't have the same sequence length. Input " + "structure has length {input_length}, while shallow structure has length " + "{shallow_length}." +) + +INPUT_TREE_SMALLER_THAN_SHALLOW_TREE = ( + "The input_tree has fewer items than the shallow_tree. Input structure " + "has length {input_size}, while shallow structure has length " + "{shallow_size}." +) + +SHALLOW_TREE_HAS_INVALID_KEYS = ( + "The shallow_tree's keys are not a subset of the input_tree's keys. The " + "shallow_tree has the following keys that are not in the input_tree: {}." +) + + +class Modality(enum.Enum): + """Modality/semantic used for treating nested structures. + + - Modality.CORE follows tensorflow_core/tf.nest semantics. + + The following collection types are recognized by `tf.nest` as nested + structures: + + * `collections.abc.Sequence` (except `string` and `bytes`). + This includes `list`, `tuple`, and `namedtuple`. + * `collections.abc.Mapping` (with sortable keys). + This includes `dict` and `collections.OrderedDict`. + * `collections.abc.MappingView` (with sortable keys). + * [`attr.s` classes](https://www.attrs.org/). + + Any other values are considered **atoms**. Not all collection types are + considered nested structures. For example, the following types are + considered atoms: + + * `set`; `{"a", "b"}` is an atom, while `["a", "b"]` is a nested structure. + * [`dataclass` classes](https://docs.python.org/library/dataclasses.html) + * `tf.Tensor` + * `numpy.array` + + - Modality.DATA follows tf.data's nest semantics. + + This modality makes two changes: + 1. It removes support for lists as a level of nesting in nested structures. + 2. It adds support for `SparseTensorValue` as an atomic element. + + The motivation for this change is twofold: + + 1. It seems more natural for lists to be treated (e.g. in Dataset + constructors) + as tensors, rather than lists of (lists of...) tensors. + 2. This is needed because `SparseTensorValue` is implemented as a `namedtuple` + that would normally be flattened and we want to be able to create sparse + tensor from `SparseTensorValue's similarly to creating tensors from numpy + arrays. + """ + + CORE = "CORE" + DATA = "DATA" + + +class _DotString(object): + __slots__ = [] + + def __str__(self): + return "." + + def __repr__(self): + return "." + + +_DOT = _DotString() + + +def is_nested(modality, structure): + """Returns true if its input is a nested structure. + + For Modality.CORE refer to + [tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest) + for the definition of a nested structure. + + Args: + modality: enum value of supported modality [Modality.CORE or Modality.DATA] + structure: the value to test. + + Returns: + True if the input is a nested structure. + """ + if modality == Modality.CORE: + return _tf_core_is_nested(structure) + elif modality == Modality.DATA: + return _tf_data_is_nested(structure) + else: + raise ValueError( + "Unknown modality used {} for nested structure".format(modality) + ) + + +# TODO(b/225045380): Move to a "leaf" library to use in trace_type. +def is_namedtuple(instance, strict=False): + """Returns True iff `instance` is a `namedtuple`. + + Args: + instance: An instance of a Python object. + strict: If True, `instance` is considered to be a `namedtuple` only if it is + a "plain" namedtuple. For instance, a class inheriting from a `namedtuple` + will be considered to be a `namedtuple` iff `strict=False`. + + Returns: + True if `instance` is a `namedtuple`. + """ + return _pywrap_utils.IsNamedtuple(instance, strict) + + +def sequence_like(instance, args): + """Converts the sequence `args` to the same type as `instance`. + + Args: + instance: an instance of `tuple`, `list`, `namedtuple`, `dict`, + `collections.OrderedDict`, or `composite_tensor.Composite_Tensor` or + `type_spec.TypeSpec`. + args: items to be converted to the `instance` type. + + Returns: + `args` with the type of `instance`. + """ + if _is_mutable_mapping(instance): + # Pack dictionaries in a deterministic order by sorting the keys. + # Notice this means that we ignore the original order of `OrderedDict` + # instances. This is intentional, to avoid potential bugs caused by mixing + # ordered and plain dicts (e.g., flattening a dict but using a + # corresponding `OrderedDict` to pack it back). + result = dict(zip(_tf_core_sorted(instance), args)) + instance_type = type(instance) + if instance_type == _collections.defaultdict: + d = _collections.defaultdict(instance.default_factory) + else: + d = instance_type() + for key in instance: + d[key] = result[key] + return d + elif _is_mapping(instance): + result = dict(zip(_tf_core_sorted(instance), args)) + instance_type = type(instance) + if not getattr(instance_type, "__supported_by_tf_nest__", False): + tf_logging.log_first_n( + tf_logging.WARN, + "Mapping types may not work well with tf.nest. " + "Prefer using MutableMapping for {}".format(instance_type), + 1, + ) + try: + return instance_type((key, result[key]) for key in instance) + except TypeError as err: + # pylint: disable=raise-missing-from + raise TypeError( + "Error creating an object of type {} like {}. Note that " + "it must accept a single positional argument " + "representing an iterable of key-value pairs, in " + "addition to self. Cause: {}".format(type(instance), instance, err) + ) + elif _is_mapping_view(instance): + # We can't directly construct mapping views, so we create a list instead + return list(args) + elif is_namedtuple(instance) or _is_attrs(instance): + if isinstance(instance, _wrapt.ObjectProxy): + instance_type = type(instance.__wrapped__) + else: + instance_type = type(instance) + return instance_type(*args) + elif _is_composite_tensor(instance): + assert len(args) == 1 + spec = instance._type_spec # pylint: disable=protected-access + return spec._from_components(args[0]) # pylint: disable=protected-access + elif _is_type_spec(instance): + # Pack a CompositeTensor's components according to a TypeSpec. + assert len(args) == 1 + return instance._from_components(args[0]) # pylint: disable=protected-access + elif isinstance(instance, range): + return sequence_like(list(instance), args) + elif isinstance(instance, _wrapt.ObjectProxy): + # For object proxies, first create the underlying type and then re-wrap it + # in the proxy type. + return type(instance)(sequence_like(instance.__wrapped__, args)) + elif isinstance(instance, CustomNestProtocol): + metadata = instance.__tf_flatten__()[0] + return instance.__tf_unflatten__(metadata, tuple(args)) + else: + # Not a namedtuple + return type(instance)(args) + + +def _get_attrs_items(obj): + """Returns a list of (name, value) pairs from an attrs instance. + + TODO(b/268078256): check if this comment is valid, and if so, ensure it's + handled in the function below. + The list will be sorted by name. + + Args: + obj: an object. + + Returns: + A list of (attr_name, attr_value) pairs, sorted by attr_name. + """ + attrs = getattr(obj.__class__, "__attrs_attrs__") + attr_names = (a.name for a in attrs) + return [(attr_name, getattr(obj, attr_name)) for attr_name in attr_names] + + +def _tf_core_sorted(dict_): + """Returns a sorted list of the dict keys, with error if keys not sortable.""" + try: + return sorted(dict_.keys()) + except TypeError: + # pylint: disable=raise-missing-from + raise TypeError("nest only supports dicts with sortable keys.") + + +def _tf_data_sorted(dict_): + """Returns a sorted list of the dict keys, with error if keys not sortable.""" + try: + return sorted(list(dict_)) + except TypeError as e: + # pylint: disable=raise-missing-from + raise TypeError( + f"nest only supports dicts with sortable keys. Error: {e.message}" + ) + + +def yield_value(modality, iterable): + """Yield elements of `iterable` in a deterministic order. + + Args: + modality: enum value of supported modality [Modality.CORE or Modality.DATA] + iterable: an iterable. + + Yields: + The iterable elements in a deterministic order. + """ + if modality == Modality.CORE: + yield from _tf_core_yield_value(iterable) + elif modality == Modality.DATA: + yield from _tf_data_yield_value(iterable) + else: + raise ValueError( + "Unknown modality used {} for nested structure".format(modality) + ) + + +def _tf_core_yield_value(iterable): + for _, v in _tf_core_yield_sorted_items(iterable): + yield v + + +def yield_sorted_items(modality, iterable): + if modality == Modality.CORE: + return _tf_core_yield_sorted_items(iterable) + else: + raise ValueError( + "Unknown modality used {} for nested structure".format(modality) + ) + + +def _tf_core_yield_sorted_items(iterable): + """Yield (key, value) pairs for `iterable` in a deterministic order. + + For Sequences, the key will be an int, the array index of a value. + For Mappings, the key will be the dictionary key. + For objects (e.g. namedtuples), the key will be the attribute name. + + In all cases, the keys will be iterated in sorted order. + + Args: + iterable: an iterable. + + Yields: + The iterable's (key, value) pairs, in order of sorted keys. + """ + # Ordered to check common structure types (list, tuple, dict) first. + if isinstance(iterable, list): + for item in enumerate(iterable): + yield item + # namedtuples handled separately to avoid expensive namedtuple check. + elif type(iterable) == tuple: # pylint: disable=unidiomatic-typecheck + for item in enumerate(iterable): + yield item + elif isinstance(iterable, (dict, _collections_abc.Mapping)): + # Iterate through dictionaries in a deterministic order by sorting the + # keys. Notice this means that we ignore the original order of `OrderedDict` + # instances. This is intentional, to avoid potential bugs caused by mixing + # ordered and plain dicts (e.g., flattening a dict but using a + # corresponding `OrderedDict` to pack it back). + for key in _tf_core_sorted(iterable): + yield key, iterable[key] + elif _is_attrs(iterable): + for item in _get_attrs_items(iterable): + yield item + elif is_namedtuple(iterable): + for field in iterable._fields: + yield field, getattr(iterable, field) + elif _is_composite_tensor(iterable): + type_spec = iterable._type_spec # pylint: disable=protected-access + yield type_spec.value_type.__name__, type_spec._to_components(iterable) # pylint: disable=protected-access + elif _is_type_spec(iterable): + # Note: to allow CompositeTensors and their TypeSpecs to have matching + # structures, we need to use the same key string here. + yield iterable.value_type.__name__, iterable._component_specs # pylint: disable=protected-access + elif isinstance(iterable, CustomNestProtocol): + flat_component = iterable.__tf_flatten__()[1] + assert isinstance(flat_component, tuple) + yield from enumerate(flat_component) + else: + for item in enumerate(iterable): + yield item + + +def _tf_data_yield_value(iterable): + """Yield elements of `iterable` in a deterministic order. + + Args: + iterable: an iterable. + + Yields: + The iterable elements in a deterministic order. + """ + # pylint: disable=protected-access + if isinstance(iterable, _collections_abc.Mapping): + # Iterate through dictionaries in a deterministic order by sorting the + # keys. Notice this means that we ignore the original order of `OrderedDict` + # instances. This is intentional, to avoid potential bugs caused by mixing + # ordered and plain dicts (e.g., flattening a dict but using a + # corresponding `OrderedDict` to pack it back). + for key in _tf_data_sorted(iterable): + yield iterable[key] + # To avoid circular imports. sparse_tensor + # depends on tensorflow/python/util/nest.py transitively, and if we try to + # import sparse_tensor again, it results in a circular import. Instead, here + # we check the class name instead of using `isinstance`. + elif iterable.__class__.__name__ == "SparseTensorValue": + yield iterable + elif _is_attrs(iterable): + for _, attr in _get_attrs_items(iterable): + yield attr + elif isinstance(iterable, CustomNestProtocol): + flat_component = iterable.__tf_flatten__()[1] + assert isinstance(flat_component, tuple) + yield from flat_component + else: + for value in iterable: + yield value + + +def assert_same_structure( + modality, nest1, nest2, check_types=True, expand_composites=False +): + """Asserts that two structures are nested in the same way. + + For Modality.CORE refer to + [tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest) + for the definition of a structure. Note the method does not check the types of + atoms inside the structures. + + Examples: + + * These atom vs. atom comparisons will pass: + + >>> tf.nest.assert_same_structure(1.5, tf.Variable(1, tf.uint32)) + >>> tf.nest.assert_same_structure("abc", np.array([1, 2])) + + * These nested structure vs. nested structure comparisons will pass: + + >>> structure1 = (((1, 2), 3), 4, (5, 6)) + >>> structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6")) + >>> structure3 = [(("a", "b"), "c"), "d", ["e", "f"]] + >>> tf.nest.assert_same_structure(structure1, structure2) + >>> tf.nest.assert_same_structure(structure1, structure3, check_types=False) + + >>> import collections + >>> tf.nest.assert_same_structure( + ... collections.namedtuple("bar", "a b")(1, 2), + ... collections.namedtuple("foo", "a b")(2, 3), + ... check_types=False) + + >>> tf.nest.assert_same_structure( + ... collections.namedtuple("bar", "a b")(1, 2), + ... { "a": 1, "b": 2 }, + ... check_types=False) + + >>> tf.nest.assert_same_structure( + ... { "a": 1, "b": 2, "c": 3 }, + ... { "c": 6, "b": 5, "a": 4 }) + + >>> ragged_tensor1 = tf.RaggedTensor.from_row_splits( + ... values=[3, 1, 4, 1, 5, 9, 2, 6], + ... row_splits=[0, 4, 4, 7, 8, 8]) + >>> ragged_tensor2 = tf.RaggedTensor.from_row_splits( + ... values=[3, 1, 4], + ... row_splits=[0, 3]) + >>> tf.nest.assert_same_structure( + ... ragged_tensor1, + ... ragged_tensor2, + ... expand_composites=True) + + * These examples will raise exceptions: + + >>> tf.nest.assert_same_structure([0, 1], np.array([0, 1])) + Traceback (most recent call last): + ... + ValueError: The two structures don't have the same nested structure + + >>> tf.nest.assert_same_structure( + ... collections.namedtuple('bar', 'a b')(1, 2), + ... collections.namedtuple('foo', 'a b')(2, 3)) + Traceback (most recent call last): + ... + TypeError: The two structures don't have the same nested structure + + For Modality.DATA, nested structures are treated differently than + Modality.CORE. Please refer to class Modality's documentation above to read up + on these differences. + + Args: + modality: enum value of supported modality [Modality.CORE or Modality.DATA] + nest1: an atom or a nested structure. + nest2: an atom or a nested structure. + check_types: - For Modality.CORE: if `True` (default) types of structures + are checked as well, including the keys of dictionaries. If set to + `False`, for example a list and a tuple of objects will look the same if + they have the same size. Note that namedtuples with identical name and + fields are always considered to have the same shallow structure. Two types + will also be considered the same if they are both list subtypes (which + allows "list" and "_ListWrapper" from trackable dependency tracking to + compare equal). `check_types=True` only checks type of sub-structures. The + types of atoms are not checked. - For Modality.DATA: if `True` (default) + types of sequences should be same as well. For dictionary, "type" of + dictionary is considered to include its keys. In other words, two + dictionaries with different keys are considered to have a different + "type". If set to `False`, two iterables are considered same as long as + they yield the elements that have same structures. + expand_composites: Arg only valid for Modality.CORE. If true, then composite + tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are + expanded into their component tensors. + + Raises: + ValueError: If the two structures do not have the same number of atoms or + if the two structures are not nested in the same way. + TypeError: If the two structures differ in the type of sequence in any of + their substructures. Only possible if `check_types` is `True`. + """ + if modality == Modality.CORE: + _tf_core_assert_same_structure(nest1, nest2, check_types, expand_composites) + elif modality == Modality.DATA: + _tf_data_assert_same_structure(nest1, nest2, check_types) + else: + raise ValueError( + "Unknown modality used {} for nested structure".format(modality) + ) + + +# pylint: disable=missing-function-docstring +def _tf_core_assert_same_structure( + nest1, nest2, check_types=True, expand_composites=False +): + # Convert to bool explicitly as otherwise pybind will not be able# to handle + # type mismatch message correctly. See GitHub issue 42329 for details. + check_types = bool(check_types) + expand_composites = bool(expand_composites) + try: + _pywrap_utils.AssertSameStructure( + nest1, nest2, check_types, expand_composites + ) + except (ValueError, TypeError) as e: + str1 = str(_tf_core_map_structure(lambda _: _DOT, nest1)) + str2 = str(_tf_core_map_structure(lambda _: _DOT, nest2)) + raise type(e)( + "%s\nEntire first structure:\n%s\nEntire second structure:\n%s" + % (str(e), str1, str2) + ) + + +def _tf_data_assert_same_structure(nest1, nest2, check_types=True): + _pywrap_utils.AssertSameStructureForData(nest1, nest2, check_types) + + +def _tf_core_packed_nest_with_indices( + structure, flat, index, is_nested_fn, sequence_fn=None +): + """Helper function for pack_sequence_as. + + Args: + structure: structure to mimic. + flat: Flattened values to output substructure for. + index: Index at which to start reading from flat. + is_nested_fn: Function used to test if a value should be treated as a nested + structure. + sequence_fn: Function used to generate a new strcuture instance. + + Returns: + The tuple (new_index, child), where: + * new_index - the updated index into `flat` having processed `structure`. + * packed - the subset of `flat` corresponding to `structure`, + having started at `index`, and packed into the same nested + format. + + Raises: + ValueError: if `structure` contains more atoms than `flat` + (assuming indexing starts from `index`). + """ + packed = [] + sequence_fn = sequence_fn or sequence_like + for s in _tf_core_yield_value(structure): + if is_nested_fn(s): + new_index, child = _tf_core_packed_nest_with_indices( + s, flat, index, is_nested_fn, sequence_fn + ) + packed.append(sequence_fn(s, child)) + index = new_index + else: + packed.append(flat[index]) + index += 1 + return index, packed + + +def _tf_data_packed_nest_with_indices(structure, flat, index): + """Helper function for pack_nest_as. + + Args: + structure: Substructure (tuple of elements and/or tuples) to mimic + flat: Flattened values to output substructure for. + index: Index at which to start reading from flat. + + Returns: + The tuple (new_index, child), where: + * new_index - the updated index into `flat` having processed `structure`. + * packed - the subset of `flat` corresponding to `structure`, + having started at `index`, and packed into the same nested + format. + + Raises: + ValueError: if `structure` contains more elements than `flat` + (assuming indexing starts from `index`). + """ + packed = [] + for s in _tf_data_yield_value(structure): + if _tf_data_is_nested(s): + new_index, child = _tf_data_packed_nest_with_indices(s, flat, index) + packed.append(sequence_like(s, child)) # pylint: disable=protected-access + index = new_index + else: + packed.append(flat[index]) + index += 1 + return index, packed + + +def flatten(modality, structure, expand_composites=False): + """Flattens a nested structure. + + - For Modality.CORE: refer to + [tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest) + for the definition of a structure. + + If the structure is an atom, then returns a single-item list: [structure]. + + This is the inverse of the `nest.pack_sequence_as` method that takes in a + flattened list and re-packs it into the nested structure. + + In the case of dict instances, the sequence consists of the values, sorted by + key to ensure deterministic behavior. This is true also for OrderedDict + instances: their sequence order is ignored, the sorting order of keys is used + instead. The same convention is followed in `nest.pack_sequence_as`. This + correctly repacks dicts and OrderedDicts after they have been flattened, and + also allows flattening an OrderedDict and then repacking it back using a + corresponding plain dict, or vice-versa. Dictionaries with non-sortable keys + cannot be flattened. + + Users must not modify any collections used in nest while this function is + running. + + Examples: + + 1. Python dict (ordered by key): + + >>> dict = { "key3": "value3", "key1": "value1", "key2": "value2" } + >>> tf.nest.flatten(dict) + ['value1', 'value2', 'value3'] + + 2. For a nested python tuple: + + >>> tuple = ((1.0, 2.0), (3.0, 4.0, 5.0), 6.0) + >>> tf.nest.flatten(tuple) + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] + + 3. For a nested dictionary of dictionaries: + + >>> dict = { "key3": {"c": (1.0, 2.0), "a": (3.0)}, + ... "key1": {"m": "val1", "g": "val2"} } + >>> tf.nest.flatten(dict) + ['val2', 'val1', 3.0, 1.0, 2.0] + + 4. Numpy array (will not flatten): + + >>> array = np.array([[1, 2], [3, 4]]) + >>> tf.nest.flatten(array) + [array([[1, 2], + [3, 4]])] + + 5. `tf.Tensor` (will not flatten): + + >>> tensor = tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]) + >>> tf.nest.flatten(tensor) + [] + + 6. `tf.RaggedTensor`: This is a composite tensor thats representation consists + of a flattened list of 'values' and a list of 'row_splits' which indicate how + to chop up the flattened list into different rows. For more details on + `tf.RaggedTensor`, please visit + https://www.tensorflow.org/api_docs/python/tf/RaggedTensor. + + with `expand_composites=False`, we just return the RaggedTensor as is. + + >>> tensor = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2]]) + >>> tf.nest.flatten(tensor, expand_composites=False) + [] + + with `expand_composites=True`, we return the component Tensors that make up + the RaggedTensor representation (the values and row_splits tensors) + + >>> tensor = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2]]) + >>> tf.nest.flatten(tensor, expand_composites=True) + [, + ] + + Args: + modality: enum value of supported modality [Modality.CORE or Modality.DATA] + structure: an atom or a nested structure. Note, numpy arrays are considered + atoms and are not flattened. + expand_composites: Arg valid for Modality.CORE only. If true, then composite + tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are + expanded into their component tensors. + + Returns: + A Python list, the flattened version of the input. + + Raises: + TypeError: The nest is or contains a dict with non-sortable keys. + """ + if modality == Modality.CORE: + return _tf_core_flatten(structure, expand_composites) + elif modality == Modality.DATA: + return _tf_data_flatten(structure) + else: + raise ValueError( + "Unknown modality used {} for nested structure".format(modality) + ) + + +def _tf_core_flatten(structure, expand_composites=False): + """See comments for flatten() in tensorflow/python/util/nest.py.""" + if structure is None: + return [None] + expand_composites = bool(expand_composites) + return _pywrap_utils.Flatten(structure, expand_composites) + + +def pack_sequence_as( + modality, structure, flat_sequence, expand_composites, sequence_fn=None +): + """Returns a given flattened sequence packed into a given structure. + + - For Modality.CORE: Refer to + [tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest) + for the definition of a structure. + + If `structure` is an atom, `flat_sequence` must be a single-item list; + in this case the return value is `flat_sequence[0]`. + + If `structure` is or contains a dict instance, the keys will be sorted to + pack the flat sequence in deterministic order. This is true also for + `OrderedDict` instances: their sequence order is ignored, the sorting order of + keys is used instead. The same convention is followed in `flatten`. + This correctly repacks dicts and `OrderedDict`s after they have been + flattened, and also allows flattening an `OrderedDict` and then repacking it + back using a corresponding plain dict, or vice-versa. + Dictionaries with non-sortable keys cannot be flattened. + + Examples: + + 1. Python dict: + + >>> structure = { "key3": "", "key1": "", "key2": "" } + >>> flat_sequence = ["value1", "value2", "value3"] + >>> tf.nest.pack_sequence_as(structure, flat_sequence) + {'key3': 'value3', 'key1': 'value1', 'key2': 'value2'} + + 2. For a nested python tuple: + + >>> structure = (('a','b'), ('c','d','e'), 'f') + >>> flat_sequence = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] + >>> tf.nest.pack_sequence_as(structure, flat_sequence) + ((1.0, 2.0), (3.0, 4.0, 5.0), 6.0) + + 3. For a nested dictionary of dictionaries: + + >>> structure = { "key3": {"c": ('alpha', 'beta'), "a": ('gamma')}, + ... "key1": {"e": "val1", "d": "val2"} } + >>> flat_sequence = ['val2', 'val1', 3.0, 1.0, 2.0] + >>> tf.nest.pack_sequence_as(structure, flat_sequence) + {'key3': {'c': (1.0, 2.0), 'a': 3.0}, 'key1': {'e': 'val1', 'd': 'val2'}} + + 4. Numpy array (considered a scalar): + + >>> structure = ['a'] + >>> flat_sequence = [np.array([[1, 2], [3, 4]])] + >>> tf.nest.pack_sequence_as(structure, flat_sequence) + [array([[1, 2], + [3, 4]])] + + 5. tf.Tensor (considered a scalar): + + >>> structure = ['a'] + >>> flat_sequence = [tf.constant([[1., 2., 3.], [4., 5., 6.]])] + >>> tf.nest.pack_sequence_as(structure, flat_sequence) + [] + + 6. `tf.RaggedTensor`: This is a composite tensor thats representation consists + of a flattened list of 'values' and a list of 'row_splits' which indicate how + to chop up the flattened list into different rows. For more details on + `tf.RaggedTensor`, please visit + https://www.tensorflow.org/api_docs/python/tf/RaggedTensor. + + With `expand_composites=False`, we treat RaggedTensor as a scalar. + + >>> structure = { "foo": tf.ragged.constant([[1, 2], [3]]), + ... "bar": tf.constant([[5]]) } + >>> flat_sequence = [ "one", "two" ] + >>> tf.nest.pack_sequence_as(structure, flat_sequence, + ... expand_composites=False) + {'foo': 'two', 'bar': 'one'} + + With `expand_composites=True`, we expect that the flattened input contains + the tensors making up the ragged tensor i.e. the values and row_splits + tensors. + + >>> structure = { "foo": tf.ragged.constant([[1., 2.], [3.]]), + ... "bar": tf.constant([[5.]]) } + >>> tensors = tf.nest.flatten(structure, expand_composites=True) + >>> print(tensors) + [, + , + ] + >>> verified_tensors = [tf.debugging.check_numerics(t, 'invalid tensor: ') + ... if t.dtype==tf.float32 else t + ... for t in tensors] + >>> tf.nest.pack_sequence_as(structure, verified_tensors, + ... expand_composites=True) + {'foo': , + 'bar': } + + - For Modality.DATA: If `structure` is a scalar, `flat_sequence` must be a + single-element list; + in this case the return value is `flat_sequence[0]`. + + Args: + modality: enum value of supported modality [Modality.CORE or Modality.DATA] + structure: - For Modality.CORE: Nested structure, whose structure is given + by nested lists, tuples, and dicts. Note: numpy arrays and strings are + considered scalars. - For Modality.DATA: tuple or list constructed of + scalars and/or other tuples/lists, or a scalar. Note: numpy arrays are + considered scalars. + flat_sequence: flat sequence to pack. + expand_composites: Arg valid for Modality.CORE only. If true, then composite + tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are + expanded into their component tensors. + sequence_fn: Arg valid for Modality.CORE only. + + Returns: + packed: `flat_sequence` converted to have the same recursive structure as + `structure`. + + Raises: + ValueError: If `flat_sequence` and `structure` have different + atom counts. + TypeError: For Modality.CORE only. `structure` is or contains a dict with + non-sortable keys. + """ + if modality == Modality.CORE: + return _tf_core_pack_sequence_as( + structure, flat_sequence, expand_composites, sequence_fn + ) + elif modality == Modality.DATA: + return _tf_data_pack_sequence_as(structure, flat_sequence) + else: + raise ValueError( + "Unknown modality used {} for nested structure".format(modality) + ) + + +def _tf_core_pack_sequence_as( + structure, flat_sequence, expand_composites, sequence_fn=None +): + """Implements sequence packing, with the option to alter the structure.""" + is_nested_fn = ( + _is_nested_or_composite if expand_composites else _tf_core_is_nested + ) + sequence_fn = sequence_fn or sequence_like + + def truncate(value, length): + value_str = str(value) + return value_str[:length] + (value_str[length:] and "...") + + if not is_nested_fn(flat_sequence): + raise TypeError( + "Attempted to pack value:\n {}\ninto a structure, but found " + "incompatible type `{}` instead.".format( + truncate(flat_sequence, 100), type(flat_sequence) + ) + ) + + if not is_nested_fn(structure): + if len(flat_sequence) != 1: + raise ValueError( + "The target structure is of type `{}`\n {}\nHowever the input " + "is a sequence ({}) of length {}.\n {}\nnest cannot " + "guarantee that it is safe to map one to the other.".format( + type(structure), + truncate(structure, 100), + type(flat_sequence), + len(flat_sequence), + truncate(flat_sequence, 100), + ) + ) + return flat_sequence[0] + + try: + final_index, packed = _tf_core_packed_nest_with_indices( + structure, flat_sequence, 0, is_nested_fn, sequence_fn + ) + if final_index < len(flat_sequence): + raise IndexError + except IndexError: + flat_structure = _tf_core_flatten( + structure, expand_composites=expand_composites + ) + if len(flat_structure) != len(flat_sequence): + # pylint: disable=raise-missing-from + raise ValueError( + "Could not pack sequence. Structure had %d atoms, but " + "flat_sequence had %d items. Structure: %s, flat_sequence: %s." + % (len(flat_structure), len(flat_sequence), structure, flat_sequence) + ) + return sequence_fn(structure, packed) + + +def _tf_data_pack_sequence_as(structure, flat_sequence): + """Returns a given flattened sequence packed into a nest. + + If `structure` is a scalar, `flat_sequence` must be a single-element list; + in this case the return value is `flat_sequence[0]`. + + Args: + structure: tuple or list constructed of scalars and/or other tuples/lists, + or a scalar. Note: numpy arrays are considered scalars. + flat_sequence: flat sequence to pack. + + Returns: + packed: `flat_sequence` converted to have the same recursive structure as + `structure`. + + Raises: + ValueError: If nest and structure have different element counts. + """ + if not (_tf_data_is_nested(flat_sequence) or isinstance(flat_sequence, list)): + raise TypeError( + "Argument `flat_sequence` must be a sequence. Got " + f"'{type(flat_sequence).__name__}'." + ) + + if not _tf_data_is_nested(structure): + if len(flat_sequence) != 1: + raise ValueError( + "Argument `structure` is a scalar but " + f"`len(flat_sequence)`={len(flat_sequence)} > 1" + ) + return flat_sequence[0] + + flat_structure = _tf_data_flatten(structure) + if len(flat_structure) != len(flat_sequence): + raise ValueError( + "Could not pack sequence. Argument `structure` had " + f"{len(flat_structure)} elements, but argument `flat_sequence` had " + f"{len(flat_sequence)} elements. Received structure: " + f"{structure}, flat_sequence: {flat_sequence}." + ) + + _, packed = _tf_data_packed_nest_with_indices(structure, flat_sequence, 0) + return sequence_like(structure, packed) # pylint: disable=protected-access + + +def map_structure(modality, func, *structure, **kwargs): + """Creates a new structure by applying `func` to each atom in `structure`. + + - For Modality.CORE: Refer to + [tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest) + for the definition of a structure. + + Applies `func(x[0], x[1], ...)` where x[i] enumerates all atoms in + `structure[i]`. All items in `structure` must have the same arity, + and the return value will contain results with the same structure layout. + + Examples: + + * A single Python dict: + + >>> a = {"hello": 24, "world": 76} + >>> tf.nest.map_structure(lambda p: p * 2, a) + {'hello': 48, 'world': 152} + + * Multiple Python dictionaries: + + >>> d1 = {"hello": 24, "world": 76} + >>> d2 = {"hello": 36, "world": 14} + >>> tf.nest.map_structure(lambda p1, p2: p1 + p2, d1, d2) + {'hello': 60, 'world': 90} + + * A single Python list: + + >>> a = [24, 76, "ab"] + >>> tf.nest.map_structure(lambda p: p * 2, a) + [48, 152, 'abab'] + + * Scalars: + + >>> tf.nest.map_structure(lambda x, y: x + y, 3, 4) + 7 + + * Empty structures: + + >>> tf.nest.map_structure(lambda x: x + 1, ()) + () + + * Check the types of iterables: + + >>> s1 = (((1, 2), 3), 4, (5, 6)) + >>> s1_list = [[[1, 2], 3], 4, [5, 6]] + >>> tf.nest.map_structure(lambda x, y: None, s1, s1_list) + Traceback (most recent call last): + ... + TypeError: The two structures don't have the same nested structure + + * Type check is set to False: + + >>> s1 = (((1, 2), 3), 4, (5, 6)) + >>> s1_list = [[[1, 2], 3], 4, [5, 6]] + >>> tf.nest.map_structure(lambda x, y: None, s1, s1_list, check_types=False) + (((None, None), None), None, (None, None)) + + - For Modality.DATA: Applies `func(x[0], x[1], ...)` where x[i] is an entry in + `structure[i]`. All structures in `structure` must have the same arity, + and the return value will contain the results in the same structure. + + Args: + modality: enum value of supported modality [Modality.CORE or Modality.DATA] + func: A callable that accepts as many arguments as there are structures. + *structure: - For Modality.CORE: atom or nested structure. - For + Modality.DATA: scalar, or tuple or list of constructed scalars and/or + other tuples/lists, or scalars. Note: numpy arrays are considered + scalars. + **kwargs: Valid keyword args are: * `check_types`: - For Modality.CORE: If + set to `True` (default) the types of iterables within the structures have + to be same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError` + exception). To allow this set this argument to `False`. Note that + namedtuples with identical name and fields are always considered to have + the same shallow structure. - For Modality.DATA: only valid keyword + argument is `check_types`. If set to `True` (default) the types of + iterables within the structures have to be same (e.g. `map_structure(func, + [1], (1,))` raises a `TypeError` exception). To allow this set this + argument to `False`. * `expand_composites`: Valid for Modality.CORE only. + If set to `True`, then composite tensors such as `tf.sparse.SparseTensor` + and `tf.RaggedTensor` are expanded into their component tensors. If + `False` (the default), then composite tensors are not expanded. + + Returns: + A new structure with the same arity as `structure[0]`, whose atoms + correspond to `func(x[0], x[1], ...)` where `x[i]` is the atom in the + corresponding location in `structure[i]`. If there are different structure + types and `check_types` is `False` the structure types of the first + structure will be used. + + Raises: + TypeError: If `func` is not callable or if the structures do not match + each other by depth tree. + ValueError: If no structure is provided or if the structures do not match + each other by type. + ValueError: If wrong keyword arguments are provided. + """ + if modality == Modality.CORE: + return _tf_core_map_structure(func, *structure, **kwargs) + elif modality == Modality.DATA: + return _tf_data_map_structure(func, *structure, **kwargs) + else: + raise ValueError( + "Unknown modality used {} for nested structure".format(modality) + ) + + +# pylint: disable=missing-function-docstring +def _tf_core_map_structure(func, *structure, **kwargs): + if not callable(func): + raise TypeError("func must be callable, got: %s" % func) + + if not structure: + raise ValueError("Must provide at least one structure") + + check_types = kwargs.pop("check_types", True) + expand_composites = kwargs.pop("expand_composites", False) + + if kwargs: + raise ValueError( + "Only valid keyword arguments are `check_types` and " + "`expand_composites`, not: `%s`" + % "`, `".join(kwargs.keys()) + ) + + for other in structure[1:]: + _tf_core_assert_same_structure( + structure[0], + other, + check_types=check_types, + expand_composites=expand_composites, + ) + + flat_structure = (_tf_core_flatten(s, expand_composites) for s in structure) + entries = zip(*flat_structure) + + return _tf_core_pack_sequence_as( + structure[0], + [func(*x) for x in entries], + expand_composites=expand_composites, + ) + + +# pylint: disable=missing-function-docstring +def _tf_data_map_structure(func, *structure, **check_types_dict): + if not callable(func): + raise TypeError(f"Argument `func` must be callable, got: {func}") + + if not structure: + raise ValueError("Must provide at least one structure") + + if check_types_dict: + if "check_types" not in check_types_dict or len(check_types_dict) > 1: + raise ValueError( + "Only valid keyword argument for `check_types_dict` is " + f"'check_types'. Got {check_types_dict}." + ) + check_types = check_types_dict["check_types"] + else: + check_types = True + + for other in structure[1:]: + _tf_data_assert_same_structure(structure[0], other, check_types=check_types) + + flat_structure = (_tf_data_flatten(s) for s in structure) + entries = zip(*flat_structure) + + return _tf_data_pack_sequence_as(structure[0], [func(*x) for x in entries]) + + +def yield_flat_up_to(modality, shallow_tree, input_tree, is_nested_fn, path=()): + """Yields (path, value) pairs of input_tree flattened up to shallow_tree. + + - For Modality.CORE: See comments for _tf_core_yield_flat_up_to() below + - For Modality.DATA: See comments for _tf_data_yield_flat_up_to() below + + Args: + modality: enum value of supported modality [Modality.CORE or Modality.DATA] + shallow_tree: Nested structure. Traverse no further than its leaf nodes. + input_tree: Nested structure. Return the paths and values from this tree. + Must have the same upper structure as shallow_tree. + is_nested_fn: Arg valid for Modality.CORE only. Function used to test if a + value should be treated as a nested structure. + path: Arg valid for Modality.CORE only. Tuple. Optional argument, only used + when recursing. The path from the root of the original shallow_tree, down + to the root of the shallow_tree arg of this recursive call. + + Yields: + Pairs of (path, value), where path the tuple path of a leaf node in + shallow_tree, and value is the value of the corresponding node in + input_tree. + """ + if modality == Modality.CORE: + yield from _tf_core_yield_flat_up_to( + shallow_tree, input_tree, is_nested_fn, path + ) + elif modality == Modality.DATA: + yield from _tf_data_yield_flat_up_to(shallow_tree, input_tree) + else: + raise ValueError( + "Unknown modality used {} for nested structure".format(modality) + ) + + +def _tf_core_yield_flat_up_to(shallow_tree, input_tree, is_nested_fn, path=()): + """Yields (path, value) pairs of input_tree flattened up to shallow_tree. + + Args: + shallow_tree: Nested structure. Traverse no further than its leaf nodes. + input_tree: Nested structure. Return the paths and values from this tree. + Must have the same upper structure as shallow_tree. + is_nested_fn: Function used to test if a value should be treated as a nested + structure. + path: Tuple. Optional argument, only used when recursing. The path from the + root of the original shallow_tree, down to the root of the shallow_tree + arg of this recursive call. + + Yields: + Pairs of (path, value), where path the tuple path of a leaf node in + shallow_tree, and value is the value of the corresponding node in + input_tree. + """ + if not is_nested_fn(shallow_tree): + yield (path, input_tree) + else: + input_tree = dict(_tf_core_yield_sorted_items(input_tree)) + for ( + shallow_key, + shallow_subtree, + ) in _tf_core_yield_sorted_items(shallow_tree): + subpath = path + (shallow_key,) + input_subtree = input_tree[shallow_key] + for leaf_path, leaf_value in _tf_core_yield_flat_up_to( + shallow_subtree, input_subtree, is_nested_fn, path=subpath + ): + yield (leaf_path, leaf_value) + + +def _tf_data_yield_flat_up_to(shallow_tree, input_tree): + """Yields elements `input_tree` partially flattened up to `shallow_tree`.""" + if _tf_data_is_nested(shallow_tree): + for shallow_branch, input_branch in zip( + _tf_data_yield_value(shallow_tree), _tf_data_yield_value(input_tree) + ): + for input_leaf in _tf_data_yield_flat_up_to(shallow_branch, input_branch): + yield input_leaf + else: + yield input_tree + + +def assert_shallow_structure( + modality, + shallow_tree, + input_tree, + check_types=True, + expand_composites=False, +): + """Asserts that `shallow_tree` is a shallow structure of `input_tree`. + + This function tests if the `input_tree` structure can be created from + the `shallow_tree` structure by replacing its leaf nodes with deeper + tree structures. + + Examples: + + The following code will raise an exception: + ```python + shallow_tree = {"a": "A", "b": "B"} + input_tree = {"a": 1, "c": 2} + assert_shallow_structure(shallow_tree, input_tree) + ``` + + The following code will raise an exception: + ```python + shallow_tree = ["a", "b"] + input_tree = ["c", ["d", "e"], "f"] + assert_shallow_structure(shallow_tree, input_tree) + ``` + + Args: + modality: enum value of supported modality [Modality.CORE or Modality.DATA] + shallow_tree: an arbitrarily nested structure. + input_tree: an arbitrarily nested structure. + check_types: if `True` (default) the sequence types of `shallow_tree` and + `input_tree` have to be the same. Note that even with check_types==True, + this function will consider two different namedtuple classes with the same + name and _fields attribute to be the same class. + expand_composites: Valid for Modality.CORE only. If true, then composite + tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are + expanded into their component tensors. + + Raises: + TypeError: If `shallow_tree` is a sequence but `input_tree` is not. + TypeError: If the sequence types of `shallow_tree` are different from + `input_tree`. Only raised if `check_types` is `True`. + ValueError: If the sequence lengths of `shallow_tree` are different from + `input_tree`. + """ + if modality == Modality.CORE: + _tf_core_assert_shallow_structure( + shallow_tree, input_tree, check_types, expand_composites + ) + elif modality == Modality.DATA: + _tf_data_assert_shallow_structure(shallow_tree, input_tree, check_types) + else: + raise ValueError( + "Unknown modality used {} for nested structure".format(modality) + ) + + +# pylint: disable=missing-function-docstring +def _tf_core_assert_shallow_structure( + shallow_tree, input_tree, check_types=True, expand_composites=False +): + is_nested_fn = ( + _is_nested_or_composite if expand_composites else _tf_core_is_nested + ) + if is_nested_fn(shallow_tree): + if not is_nested_fn(input_tree): + raise TypeError( + "If shallow structure is a sequence, input must also be a sequence. " + "Input has type: %s." + % type(input_tree) + ) + + if isinstance(shallow_tree, _wrapt.ObjectProxy): + shallow_type = type(shallow_tree.__wrapped__) + else: + shallow_type = type(shallow_tree) + + if check_types and not isinstance(input_tree, shallow_type): + # Duck-typing means that nest should be fine with two different + # namedtuples with identical name and fields. + shallow_is_namedtuple = is_namedtuple(shallow_tree, False) + input_is_namedtuple = is_namedtuple(input_tree, False) + if shallow_is_namedtuple and input_is_namedtuple: + if not same_namedtuples(shallow_tree, input_tree): + raise TypeError( + STRUCTURES_HAVE_MISMATCHING_TYPES.format( + input_type=type(input_tree), shallow_type=type(shallow_tree) + ) + ) + + elif isinstance(shallow_tree, list) and isinstance(input_tree, list): + # List subclasses are considered the same, + # e.g. python list vs. _ListWrapper. + pass + + elif ( + _is_composite_tensor(shallow_tree) or _is_type_spec(shallow_tree) + ) and (_is_composite_tensor(input_tree) or _is_type_spec(input_tree)): + pass # Compatibility will be checked below. + + elif not ( + isinstance(shallow_tree, _collections_abc.Mapping) + and isinstance(input_tree, _collections_abc.Mapping) + ): + raise TypeError( + STRUCTURES_HAVE_MISMATCHING_TYPES.format( + input_type=type(input_tree), shallow_type=type(shallow_tree) + ) + ) + + if _is_composite_tensor(shallow_tree) or _is_composite_tensor(input_tree): + if not ( + (_is_composite_tensor(input_tree) or _is_type_spec(input_tree)) + and ( + _is_composite_tensor(shallow_tree) or _is_type_spec(shallow_tree) + ) + ): + raise TypeError( + STRUCTURES_HAVE_MISMATCHING_TYPES.format( + input_type=type(input_tree), shallow_type=type(shallow_tree) + ) + ) + # pylint: disable=protected-access + type_spec_1 = ( + shallow_tree + if _is_type_spec(shallow_tree) + else shallow_tree._type_spec + )._without_tensor_names() + type_spec_2 = ( + input_tree if _is_type_spec(input_tree) else input_tree._type_spec + )._without_tensor_names() + # TODO(b/246356867): Replace the most_specific_common_supertype below + # with get_structure. + if hasattr(type_spec_1, "_get_structure") and hasattr( + type_spec_2, "_get_structure" + ): + result = ( + type_spec_1._get_structure() == type_spec_2._get_structure() or None + ) + else: + result = type_spec_1.most_specific_common_supertype([type_spec_2]) + if result is None: + raise ValueError( + "Incompatible CompositeTensor TypeSpecs: %s vs. %s" + % (type_spec_1, type_spec_2) + ) + # pylint: enable=protected-access + + elif _is_type_spec(shallow_tree): + if not _is_type_spec(input_tree): + raise TypeError( + "If shallow structure is a TypeSpec, input must also " + "be a TypeSpec. Input has type: %s." + % type(input_tree) + ) + else: + if len(input_tree) != len(shallow_tree): + raise ValueError( + STRUCTURES_HAVE_MISMATCHING_LENGTHS.format( + input_length=len(input_tree), shallow_length=len(shallow_tree) + ) + ) + elif len(input_tree) < len(shallow_tree): + raise ValueError( + INPUT_TREE_SMALLER_THAN_SHALLOW_TREE.format( + input_size=len(input_tree), shallow_size=len(shallow_tree) + ) + ) + + if isinstance(shallow_tree, _collections_abc.Mapping): + absent_keys = set(shallow_tree) - set(input_tree) + if absent_keys: + raise ValueError( + SHALLOW_TREE_HAS_INVALID_KEYS.format(sorted(absent_keys)) + ) + + for shallow_branch, input_branch in zip( + _tf_core_yield_value(shallow_tree), + _tf_core_yield_value(input_tree), + ): + _tf_core_assert_shallow_structure( + shallow_branch, + input_branch, + check_types=check_types, + expand_composites=expand_composites, + ) + + +# pylint: disable=missing-function-docstring +def _tf_data_assert_shallow_structure( + shallow_tree, input_tree, check_types=True +): + if _tf_data_is_nested(shallow_tree): + if not _tf_data_is_nested(input_tree): + raise TypeError( + "If shallow structure is a sequence, input must also be a sequence. " + f"Input has type: '{type(input_tree).__name__}'." + ) + + if check_types and not isinstance(input_tree, type(shallow_tree)): + raise TypeError( + "The two structures don't have the same sequence type. Input " + f"structure has type '{type(input_tree).__name__}', while shallow " + f"structure has type '{type(shallow_tree).__name__}'." + ) + + if len(input_tree) != len(shallow_tree): + raise ValueError( + "The two structures don't have the same sequence length. Input " + f"structure has length {len(input_tree)}, while shallow structure " + f"has length {len(shallow_tree)}." + ) + + if check_types and isinstance(shallow_tree, _collections_abc.Mapping): + if set(input_tree) != set(shallow_tree): + raise ValueError( + "The two structures don't have the same keys. Input " + f"structure has keys {list(input_tree)}, while shallow structure " + f"has keys {list(shallow_tree)}." + ) + input_tree = sorted(input_tree.items()) + shallow_tree = sorted(shallow_tree.items()) + + for shallow_branch, input_branch in zip(shallow_tree, input_tree): + _tf_data_assert_shallow_structure( + shallow_branch, input_branch, check_types=check_types + ) + + +def flatten_up_to( + modality, + shallow_tree, + input_tree, + check_types=True, + expand_composites=False, +): + # pylint: disable=g-doc-return-or-yield,g-doc-args + """Flattens `input_tree` up to `shallow_tree`. + + - For Modality.CORE: refer to + [tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest) + for the definition of a structure. + + Any further depth in structure in `input_tree` is retained as structures in + the partially flatten output. + + If `shallow_tree` and `input_tree` are atoms, this returns a + single-item list: `[input_tree]`. + + Use Case: + + Sometimes we may wish to partially flatten a structure, retaining some + of the nested structure. We achieve this by specifying a shallow structure, + `shallow_tree`, we wish to flatten up to. + + The input, `input_tree`, can be thought of as having the same structure layout + as `shallow_tree`, but with leaf nodes that are themselves tree structures. + + Examples: + + ```python + input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]] + shallow_tree = [[True, True], [False, True]] + + flattened_input_tree = flatten_up_to(shallow_tree, input_tree) + flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree) + + # Output is: + # [[2, 2], [3, 3], [4, 9], [5, 5]] + # [True, True, False, True] + ``` + + ```python + input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]] + shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]] + + input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree) + input_tree_flattened = flatten(input_tree) + + # Output is: + # [('a', 1), ('b', 2), ('c', 3), ('d', 4)] + # ['a', 1, 'b', 2, 'c', 3, 'd', 4] + ``` + + Edge Cases: + + ```python + flatten_up_to(0, 0) # Output: [0] + flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]] + flatten_up_to([0, 1, 2], 0) # Output: TypeError + flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2] + + ``` + + Args: + modality: enum value of supported modality [Modality.CORE or Modality.DATA] + shallow_tree: a possibly pruned structure of input_tree. + input_tree: an atom or a nested structure. Note, numpy arrays are considered + atoms. + check_types: bool. If True, check that each node in shallow_tree has the + same type as the corresponding node in input_tree. + expand_composites: Arg valid for Modality.CORE only. If true, then composite + tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are + expanded into their component tensors. + + Returns: + A Python list, the partially flattened version of `input_tree` according to + the structure of `shallow_tree`. + + Raises: + TypeError: If `shallow_tree` is a nested structure but `input_tree` is not. + TypeError: If the structure types of `shallow_tree` are different from + `input_tree`. + ValueError: If the structure lengths of `shallow_tree` are different from + `input_tree`. + """ + if modality == Modality.CORE: + return _tf_core_flatten_up_to( + shallow_tree, input_tree, check_types, expand_composites + ) + elif modality == Modality.DATA: + return _tf_data_flatten_up_to(shallow_tree, input_tree) + else: + raise ValueError( + "Unknown modality used {} for nested structure".format(modality) + ) + + +def _tf_core_flatten_up_to( + shallow_tree, input_tree, check_types=True, expand_composites=False +): + is_nested_fn = ( + _is_nested_or_composite if expand_composites else _tf_core_is_nested + ) + _tf_core_assert_shallow_structure( + shallow_tree, + input_tree, + check_types=check_types, + expand_composites=expand_composites, + ) + # Discard paths returned by nest_util._tf_core_yield_flat_up_to. + return [ + v + for _, v in _tf_core_yield_flat_up_to( + shallow_tree, input_tree, is_nested_fn + ) + ] + + +def _tf_data_flatten_up_to(shallow_tree, input_tree): + _tf_data_assert_shallow_structure(shallow_tree, input_tree) + return list(_tf_data_yield_flat_up_to(shallow_tree, input_tree)) + + +def map_structure_up_to(modality, shallow_tree, func, *inputs, **kwargs): + """Applies a function or op to a number of partially flattened inputs. + + The `inputs` are flattened up to `shallow_tree` before being mapped. + + Use Case: + + Sometimes we wish to apply a function to a partially flattened + structure (for example when the function itself takes structure inputs). We + achieve this by specifying a shallow structure, `shallow_tree` we wish to + flatten up to. + + The `inputs`, can be thought of as having the same structure layout as + `shallow_tree`, but with leaf nodes that are themselves tree structures. + + This function therefore will return something with the same base structure as + `shallow_tree`. + + Examples: + + ```python + shallow_tree = [None, None] + inp_val = [1, 2, 3] + out = map_structure_up_to(shallow_tree, lambda x: 2 * x, inp_val) + + # Output is: [2, 4] + ``` + + ```python + ab_tuple = collections.namedtuple("ab_tuple", "a, b") + op_tuple = collections.namedtuple("op_tuple", "add, mul") + inp_val = ab_tuple(a=2, b=3) + inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3)) + out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul, + inp_val, inp_ops) + + # Output is: ab_tuple(a=6, b=15) + ``` + + ```python + data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]] + name_list = ['evens', ['odds', 'primes']] + out = map_structure_up_to( + name_list, + lambda name, sec: "first_{}_{}".format(len(sec), name), + name_list, data_list) + + # Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']] + ``` + + Args: + modality: enum value of supported modality [Modality.CORE or Modality.DATA] + shallow_tree: a shallow structure, common to all the inputs. + func: callable which will be applied to each input individually. + *inputs: structures that are compatible with shallow_tree. The function + `func` is applied to corresponding structures due to partial flattening of + each input, so the function must support arity of `len(inputs)`. + **kwargs: Arg valid for Modality.CORE only. kwargs to feed to func(). + Special kwarg `check_types` is not passed to func, but instead determines + whether the types of iterables within the structures have to be same (e.g. + `map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow + this set this argument to `False`. + + Raises: + TypeError: If `shallow_tree` is a nested structure but `input_tree` is not. + TypeError: If the structure types of `shallow_tree` are different from + `input_tree`. + ValueError: If the structure lengths of `shallow_tree` are different from + `input_tree`. + + Returns: + result of repeatedly applying `func`, with the same structure layout as + `shallow_tree`. + """ + if modality == Modality.CORE: + return _tf_core_map_structure_with_tuple_paths_up_to( + shallow_tree, func, *inputs, **kwargs + ) + elif modality == Modality.DATA: + return _tf_data_map_structure_up_to(shallow_tree, func, *inputs) + else: + raise ValueError( + "Unknown modality used {} for nested structure".format(modality) + ) + + +def _tf_core_map_structure_with_tuple_paths_up_to( + shallow_tree, func, *inputs, **kwargs +): + """See comments for map_structure_with_tuple_paths_up_to() in tensorflow/python/util/nest.py.""" + if not inputs: + raise ValueError("Cannot map over no sequences") + + check_types = kwargs.pop("check_types", True) + expand_composites = kwargs.pop("expand_composites", False) + is_nested_fn = ( + _is_nested_or_composite if expand_composites else _tf_core_is_nested + ) + + for input_tree in inputs: + _tf_core_assert_shallow_structure( + shallow_tree, + input_tree, + check_types=check_types, + expand_composites=expand_composites, + ) + + # Flatten each input separately, apply the function to corresponding items, + # then repack based on the structure of the first input. + flat_value_gen = ( + _tf_core_flatten_up_to( # pylint: disable=g-complex-comprehension + shallow_tree, + input_tree, + check_types, + expand_composites=expand_composites, + ) + for input_tree in inputs + ) + flat_path_gen = ( + path + for path, _ in _tf_core_yield_flat_up_to( + shallow_tree, inputs[0], is_nested_fn + ) + ) + results = [ + func(*args, **kwargs) for args in zip(flat_path_gen, *flat_value_gen) + ] + return _tf_core_pack_sequence_as( + structure=shallow_tree, + flat_sequence=results, + expand_composites=expand_composites, + ) + + +# pylint: disable=missing-function-docstring +def _tf_data_map_structure_up_to(shallow_tree, func, *inputs): + if not inputs: + raise ValueError( + "Argument `inputs` is empty. Cannot map over no sequences." + ) + for input_tree in inputs: + _tf_data_assert_shallow_structure(shallow_tree, input_tree) + + # Flatten each input separately, apply the function to corresponding elements, + # then repack based on the structure of the first input. + all_flattened_up_to = ( + _tf_data_flatten_up_to(shallow_tree, input_tree) for input_tree in inputs + ) + + results = [func(*tensors) for tensors in zip(*all_flattened_up_to)] + return _tf_data_pack_sequence_as( + structure=shallow_tree, flat_sequence=results + ) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/serialization.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/serialization.py new file mode 100644 index 0000000000000000000000000000000000000000..5b7bf0dde7888d5d39262c1544b7c1760f839da8 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/serialization.py @@ -0,0 +1,78 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for serializing Python objects.""" + +import numpy as np +import wrapt + +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import tensor_shape +from tensorflow.python.util.compat import collections_abc + + +def get_json_type(obj): + """Serializes any object to a JSON-serializable structure. + + Args: + obj: the object to serialize + + Returns: + JSON-serializable structure representing `obj`. + + Raises: + TypeError: if `obj` cannot be serialized. + """ + # if obj is a serializable Keras class instance + # e.g. optimizer, layer + if hasattr(obj, 'get_config'): + return {'class_name': obj.__class__.__name__, 'config': obj.get_config()} + + # if obj is any numpy type + if type(obj).__module__ == np.__name__: + if isinstance(obj, np.ndarray): + return obj.tolist() + else: + return obj.item() + + # misc functions (e.g. loss function) + if callable(obj): + return obj.__name__ + + # if obj is a python 'type' + if type(obj).__name__ == type.__name__: + return obj.__name__ + + if isinstance(obj, tensor_shape.Dimension): + return obj.value + + if isinstance(obj, tensor_shape.TensorShape): + return obj.as_list() + + if isinstance(obj, dtypes.DType): + return obj.name + + if isinstance(obj, collections_abc.Mapping): + return dict(obj) + + if obj is Ellipsis: + return {'class_name': '__ellipsis__'} + + if isinstance(obj, wrapt.ObjectProxy): + return obj.__wrapped__ + + raise TypeError(f'Object {obj} is not JSON-serializable. You may implement ' + 'a `get_config()` method on the class ' + '(returning a JSON-serializable dictionary) to make it ' + 'serializable.') diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/tf_decorator.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/tf_decorator.py new file mode 100644 index 0000000000000000000000000000000000000000..906bf87c61b210e7a997418efbd5045f3d14e27a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/tf_decorator.py @@ -0,0 +1,361 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Base TFDecorator class and utility functions for working with decorators. + +There are two ways to create decorators that TensorFlow can introspect into. +This is important for documentation generation purposes, so that function +signatures aren't obscured by the (*args, **kwds) signature that decorators +often provide. + +1. Call `tf_decorator.make_decorator` on your wrapper function. If your +decorator is stateless, or can capture all of the variables it needs to work +with through lexical closure, this is the simplest option. Create your wrapper +function as usual, but instead of returning it, return +`tf_decorator.make_decorator(target, your_wrapper)`. This will attach some +decorator introspection metadata onto your wrapper and return it. + +Example: + + def print_hello_before_calling(target): + def wrapper(*args, **kwargs): + print('hello') + return target(*args, **kwargs) + return tf_decorator.make_decorator(target, wrapper) + +2. Derive from TFDecorator. If your decorator needs to be stateful, you can +implement it in terms of a TFDecorator. Store whatever state you need in your +derived class, and implement the `__call__` method to do your work before +calling into your target. You can retrieve the target via +`super(MyDecoratorClass, self).decorated_target`, and call it with whatever +parameters it needs. + +Example: + + class CallCounter(tf_decorator.TFDecorator): + def __init__(self, target): + super(CallCounter, self).__init__('count_calls', target) + self.call_count = 0 + + def __call__(self, *args, **kwargs): + self.call_count += 1 + return super(CallCounter, self).decorated_target(*args, **kwargs) + + def count_calls(target): + return CallCounter(target) +""" +import inspect +from typing import Dict, Any + + +def _make_default_values(fullargspec: inspect.FullArgSpec) -> Dict[str, Any]: + """Returns default values from the function's fullargspec.""" + if fullargspec.defaults is not None: + defaults = { + name: value for name, value in zip( + fullargspec.args[-len(fullargspec.defaults):], fullargspec.defaults) + } + else: + defaults = {} + + if fullargspec.kwonlydefaults is not None: + defaults.update(fullargspec.kwonlydefaults) + + return defaults + + +def fullargspec_to_signature( + fullargspec: inspect.FullArgSpec) -> inspect.Signature: + """Repackages fullargspec information into an equivalent inspect.Signature.""" + defaults = _make_default_values(fullargspec) + parameters = [] + + for arg in fullargspec.args: + parameters.append( + inspect.Parameter( + arg, + inspect.Parameter.POSITIONAL_OR_KEYWORD, + default=defaults.get(arg, inspect.Parameter.empty), + ) + ) + + if fullargspec.varargs is not None: + parameters.append( + inspect.Parameter(fullargspec.varargs, inspect.Parameter.VAR_POSITIONAL) + ) + + for kwarg in fullargspec.kwonlyargs: + parameters.append( + inspect.Parameter( + kwarg, + inspect.Parameter.KEYWORD_ONLY, + default=defaults.get(kwarg, inspect.Parameter.empty), + ) + ) + + if fullargspec.varkw is not None: + parameters.append( + inspect.Parameter(fullargspec.varkw, inspect.Parameter.VAR_KEYWORD) + ) + + return inspect.Signature(parameters) + + +def make_decorator(target, + decorator_func, + decorator_name=None, + decorator_doc='', + decorator_argspec=None): + """Make a decorator from a wrapper and a target. + + Args: + target: The final callable to be wrapped. + decorator_func: The wrapper function. + decorator_name: The name of the decorator. If `None`, the name of the + function calling make_decorator. + decorator_doc: Documentation specific to this application of + `decorator_func` to `target`. + decorator_argspec: Override the signature using FullArgSpec. + + Returns: + The `decorator_func` argument with new metadata attached. + """ + if decorator_name is None: + decorator_name = inspect.currentframe().f_back.f_code.co_name + decorator = TFDecorator(decorator_name, target, decorator_doc, + decorator_argspec) + setattr(decorator_func, '_tf_decorator', decorator) + # Objects that are callables (e.g., a functools.partial object) may not have + # the following attributes. + if hasattr(target, '__name__'): + decorator_func.__name__ = target.__name__ + if hasattr(target, '__qualname__'): + decorator_func.__qualname__ = target.__qualname__ + if hasattr(target, '__module__'): + decorator_func.__module__ = target.__module__ + if hasattr(target, '__dict__'): + # Copy dict entries from target which are not overridden by decorator_func. + for name in target.__dict__: + if name not in decorator_func.__dict__: + decorator_func.__dict__[name] = target.__dict__[name] + if hasattr(target, '__doc__'): + decorator_func.__doc__ = decorator.__doc__ + decorator_func.__wrapped__ = target + # Keeping a second handle to `target` allows callers to detect whether the + # decorator was modified using `rewrap`. + decorator_func.__original_wrapped__ = target + if decorator_argspec: + decorator_func.__signature__ = fullargspec_to_signature( + decorator_argspec) + elif callable(target): + try: + signature = inspect.signature(target) + except (TypeError, ValueError): + # Certain callables such as builtins can not be inspected for signature. + pass + else: + bound_instance = _get_bound_instance(target) + # Present the decorated func as a method as well + if bound_instance and 'self' in signature.parameters: + signature = inspect.Signature(list(signature.parameters.values())[1:]) + decorator_func.__self__ = bound_instance + + decorator_func.__signature__ = signature + + return decorator_func + + +def _get_bound_instance(target): + """Returns the instance any of the targets is attached to.""" + decorators, target = unwrap(target) + for decorator in decorators: + if inspect.ismethod(decorator.decorated_target): + return decorator.decorated_target.__self__ + + +def _has_tf_decorator_attr(obj): + """Checks if object has _tf_decorator attribute. + + This check would work for mocked object as well since it would + check if returned attribute has the right type. + + Args: + obj: Python object. + """ + return (hasattr(obj, '_tf_decorator') and + isinstance(getattr(obj, '_tf_decorator'), TFDecorator)) + + +def rewrap(decorator_func, previous_target, new_target): + """Injects a new target into a function built by make_decorator. + + This function allows replacing a function wrapped by `decorator_func`, + assuming the decorator that wraps the function is written as described below. + + The decorator function must use `.__wrapped__` instead of the + wrapped function that is normally used: + + Example: + + # Instead of this: + def simple_parametrized_wrapper(*args, **kwds): + return wrapped_fn(*args, **kwds) + + tf_decorator.make_decorator(simple_parametrized_wrapper, wrapped_fn) + + # Write this: + def simple_parametrized_wrapper(*args, **kwds): + return simple_parametrized_wrapper.__wrapped__(*args, **kwds) + + tf_decorator.make_decorator(simple_parametrized_wrapper, wrapped_fn) + + Note that this process modifies decorator_func. + + Args: + decorator_func: Callable returned by `wrap`. + previous_target: Callable that needs to be replaced. + new_target: Callable to replace previous_target with. + + Returns: + The updated decorator. If decorator_func is not a tf_decorator, new_target + is returned. + """ + # Because the process mutates the decorator, we only need to alter the + # innermost function that wraps previous_target. + cur = decorator_func + innermost_decorator = None + target = None + while _has_tf_decorator_attr(cur): + innermost_decorator = cur + target = getattr(cur, '_tf_decorator') + if target.decorated_target is previous_target: + break + cur = target.decorated_target + assert cur is not None + + # If decorator_func is not a decorator, new_target replaces it directly. + if innermost_decorator is None: + # Consistency check. The caller should always pass the result of + # tf_decorator.unwrap as previous_target. If decorator_func is not a + # decorator, that will have returned decorator_func itself. + assert decorator_func is previous_target + return new_target + + target.decorated_target = new_target + + if inspect.ismethod(innermost_decorator): + # Bound methods can't be assigned attributes. Thankfully, they seem to + # be just proxies for their unbound counterpart, and we can modify that. + if hasattr(innermost_decorator, '__func__'): + innermost_decorator.__func__.__wrapped__ = new_target + elif hasattr(innermost_decorator, 'im_func'): + innermost_decorator.im_func.__wrapped__ = new_target + else: + innermost_decorator.__wrapped__ = new_target + else: + innermost_decorator.__wrapped__ = new_target + + return decorator_func + + +def unwrap(maybe_tf_decorator): + """Unwraps an object into a list of TFDecorators and a final target. + + Args: + maybe_tf_decorator: Any callable object. + + Returns: + A tuple whose first element is an list of TFDecorator-derived objects that + were applied to the final callable target, and whose second element is the + final undecorated callable target. If the `maybe_tf_decorator` parameter is + not decorated by any TFDecorators, the first tuple element will be an empty + list. The `TFDecorator` list is ordered from outermost to innermost + decorators. + """ + decorators = [] + cur = maybe_tf_decorator + while True: + if isinstance(cur, TFDecorator): + decorators.append(cur) + elif _has_tf_decorator_attr(cur): + decorators.append(getattr(cur, '_tf_decorator')) + else: + break + if not hasattr(decorators[-1], 'decorated_target'): + break + cur = decorators[-1].decorated_target + return decorators, cur + + +class TFDecorator(object): + """Base class for all TensorFlow decorators. + + TFDecorator captures and exposes the wrapped target, and provides details + about the current decorator. + """ + + def __init__(self, + decorator_name, + target, + decorator_doc='', + decorator_argspec=None): + self._decorated_target = target + self._decorator_name = decorator_name + self._decorator_doc = decorator_doc + self._decorator_argspec = decorator_argspec + if hasattr(target, '__name__'): + self.__name__ = target.__name__ + if hasattr(target, '__qualname__'): + self.__qualname__ = target.__qualname__ + if self._decorator_doc: + self.__doc__ = self._decorator_doc + elif hasattr(target, '__doc__') and target.__doc__: + self.__doc__ = target.__doc__ + else: + self.__doc__ = '' + + if decorator_argspec: + self.__signature__ = fullargspec_to_signature(decorator_argspec) + elif callable(target): + try: + self.__signature__ = inspect.signature(target) + except (TypeError, ValueError): + # Certain callables such as builtins can not be inspected for signature. + pass + + def __get__(self, instance, owner): + return self._decorated_target.__get__(instance, owner) + + def __call__(self, *args, **kwargs): + return self._decorated_target(*args, **kwargs) + + @property + def decorated_target(self): + return self._decorated_target + + @decorated_target.setter + def decorated_target(self, decorated_target): + self._decorated_target = decorated_target + + @property + def decorator_name(self): + return self._decorator_name + + @property + def decorator_doc(self): + return self._decorator_doc + + @property + def decorator_argspec(self): + return self._decorator_argspec diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/tf_decorator_export.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/tf_decorator_export.py new file mode 100644 index 0000000000000000000000000000000000000000..58115c7290abc9c5983594a160fab5bac1fc32c1 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/tf_decorator_export.py @@ -0,0 +1,26 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Exports functions from tf_decorator.py to avoid cycles.""" + +from tensorflow.python.util import tf_decorator +from tensorflow.python.util import tf_export + + +make_decorator = tf_export.tf_export( + '__internal__.decorator.make_decorator', v1=[] +)(tf_decorator.make_decorator) +unwrap = tf_export.tf_export('__internal__.decorator.unwrap', v1=[])( + tf_decorator.unwrap +) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/tf_should_use.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/tf_should_use.py new file mode 100644 index 0000000000000000000000000000000000000000..8f45edd6874ab092799bb10bd1d683d2497ba316 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/tf_should_use.py @@ -0,0 +1,311 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Decorator that provides a warning if the wrapped object is never used.""" +import copy +import sys +import textwrap +import traceback +import types + +from tensorflow.python.eager import context +from tensorflow.python.framework import ops +from tensorflow.python.platform import tf_logging +from tensorflow.python.util import tf_decorator + + +class _TFShouldUseHelper(object): + """Object stored in TFShouldUse-wrapped objects. + + When it is deleted it will emit a warning or error if its `sate` method + has not been called by time of deletion, and Tensorflow is not executing + eagerly or inside a tf.function (which use autodeps and resolve the + main issues this wrapper warns about). + """ + + def __init__(self, type_, repr_, stack_frame, error_in_function, + warn_in_eager): + self._type = type_ + self._repr = repr_ + self._stack_frame = stack_frame + self._error_in_function = error_in_function + if context.executing_eagerly(): + # If warn_in_eager, sated == False. Otherwise true. + self._sated = not warn_in_eager + elif ops.inside_function(): + if error_in_function: + self._sated = False + ops.add_exit_callback_to_default_func_graph( + lambda: self._check_sated(raise_error=True)) + else: + self._sated = True + else: + # TF1 graph building mode + self._sated = False + + def sate(self): + self._sated = True + self._type = None + self._repr = None + self._stack_frame = None + self._logging_module = None + + def _check_sated(self, raise_error): + """Check if the object has been sated.""" + if self._sated: + return + creation_stack = ''.join( + [line.rstrip() + for line in traceback.format_stack(self._stack_frame, limit=5)]) + if raise_error: + try: + raise RuntimeError( + 'Object was never used (type {}): {}. If you want to mark it as ' + 'used call its "mark_used()" method. It was originally created ' + 'here:\n{}'.format(self._type, self._repr, creation_stack)) + finally: + self.sate() + else: + tf_logging.error( + '==================================\n' + 'Object was never used (type {}):\n{}\nIf you want to mark it as ' + 'used call its "mark_used()" method.\nIt was originally created ' + 'here:\n{}\n' + '==================================' + .format(self._type, self._repr, creation_stack)) + + def __del__(self): + self._check_sated(raise_error=False) + + +def _new__init__(self, wrapped_value, tf_should_use_helper): + # pylint: disable=protected-access + self._tf_should_use_helper = tf_should_use_helper + self._tf_should_use_wrapped_value = wrapped_value + + +def _new__setattr__(self, key, value): + if key in ('_tf_should_use_helper', '_tf_should_use_wrapped_value'): + return object.__setattr__(self, key, value) + return setattr( + object.__getattribute__(self, '_tf_should_use_wrapped_value'), + key, value) + + +def _new__getattribute__(self, key): + if key not in ('_tf_should_use_helper', '_tf_should_use_wrapped_value'): + object.__getattribute__(self, '_tf_should_use_helper').sate() + if key in ( + '_tf_should_use_wrapped_value', + '_tf_should_use_helper', + 'mark_used', + '__setattr__', + ): + return object.__getattribute__(self, key) + return getattr( + object.__getattribute__(self, '_tf_should_use_wrapped_value'), key) + + +def _new_mark_used(self, *args, **kwargs): + object.__getattribute__(self, '_tf_should_use_helper').sate() + try: + mu = object.__getattribute__( + object.__getattribute__(self, '_tf_should_use_wrapped_value'), + 'mark_used') + return mu(*args, **kwargs) + except AttributeError: + pass + +OVERLOADABLE_OPERATORS = { + '__add__', + '__radd__', + '__sub__', + '__rsub__', + '__mul__', + '__rmul__', + '__div__', + '__rdiv__', + '__truediv__', + '__rtruediv__', + '__floordiv__', + '__rfloordiv__', + '__mod__', + '__rmod__', + '__lt__', + '__le__', + '__gt__', + '__ge__', + '__ne__', + '__eq__', + '__and__', + '__rand__', + '__or__', + '__ror__', + '__xor__', + '__rxor__', + '__getitem__', + '__pow__', + '__rpow__', + '__invert__', + '__neg__', + '__abs__', + '__matmul__', + '__rmatmul__', +} + + +_WRAPPERS = {} + + +class ShouldUseWrapper(object): + pass + + +def _get_wrapper(x, tf_should_use_helper): + """Create a wrapper for object x, whose class subclasses type(x). + + The wrapper will emit a warning if it is deleted without any of its + properties being accessed or methods being called. + + Args: + x: The instance to wrap. + tf_should_use_helper: The object that tracks usage. + + Returns: + An object wrapping `x`, of type `type(x)`. + """ + type_x = type(x) + memoized = _WRAPPERS.get(type_x, None) + if memoized: + return memoized(x, tf_should_use_helper) + + # Make a copy of `object` + tx = copy.deepcopy(ShouldUseWrapper) + # Prefer using __orig_bases__, which preserve generic type arguments. + bases = getattr(tx, '__orig_bases__', tx.__bases__) + + def set_body(ns): + ns.update(tx.__dict__) + return ns + + copy_tx = types.new_class(tx.__name__, bases, exec_body=set_body) + copy_tx.__init__ = _new__init__ + copy_tx.__getattribute__ = _new__getattribute__ + for op in OVERLOADABLE_OPERATORS: + if hasattr(type_x, op): + setattr(copy_tx, op, getattr(type_x, op)) + + copy_tx.mark_used = _new_mark_used + copy_tx.__setattr__ = _new__setattr__ + _WRAPPERS[type_x] = copy_tx + + return copy_tx(x, tf_should_use_helper) + + +def _add_should_use_warning(x, error_in_function=False, warn_in_eager=False): + """Wraps object x so that if it is never used, a warning is logged. + + Args: + x: Python object. + error_in_function: Python bool. If `True`, a `RuntimeError` is raised + if the returned value is never used when created during `tf.function` + tracing. + warn_in_eager: Python bool. If `True` raise warning if in Eager mode as well + as graph mode. + + Returns: + An instance of `TFShouldUseWarningWrapper` which subclasses `type(x)` + and is a very shallow wrapper for `x` which logs access into `x`. + """ + if x is None or (isinstance(x, list) and not x): + return x + + if context.executing_eagerly() and not warn_in_eager: + return x + + if ops.inside_function() and not error_in_function: + # We don't currently log warnings in tf.function calls, so just skip it. + return x + + # Extract the current frame for later use by traceback printing. + try: + raise ValueError() + except ValueError: + stack_frame = sys.exc_info()[2].tb_frame.f_back + + tf_should_use_helper = _TFShouldUseHelper( + type_=type(x), + repr_=repr(x), + stack_frame=stack_frame, + error_in_function=error_in_function, + warn_in_eager=warn_in_eager) + + return _get_wrapper(x, tf_should_use_helper) + + +def should_use_result(fn=None, warn_in_eager=False, error_in_function=False): + """Function wrapper that ensures the function's output is used. + + If the output is not used, a `logging.error` is logged. If + `error_in_function` is set, then a `RuntimeError` will be raised at the + end of function tracing if the output is not used by that point. + + An output is marked as used if any of its attributes are read, modified, or + updated. Examples when the output is a `Tensor` include: + + - Using it in any capacity (e.g. `y = t + 0`, `sess.run(t)`) + - Accessing a property (e.g. getting `t.name` or `t.op`). + - Calling `t.mark_used()`. + + Note, certain behaviors cannot be tracked - for these the object may not + be marked as used. Examples include: + + - `t != 0`. In this case, comparison is done on types / ids. + - `isinstance(t, tf.Tensor)`. Similar to above. + + Args: + fn: The function to wrap. + warn_in_eager: Whether to create warnings in Eager as well. + error_in_function: Whether to raise an error when creating a tf.function. + + Returns: + The wrapped function. + """ + def decorated(fn): + """Decorates the input function.""" + def wrapped(*args, **kwargs): + return _add_should_use_warning(fn(*args, **kwargs), + warn_in_eager=warn_in_eager, + error_in_function=error_in_function) + fn_doc = fn.__doc__ or '' + split_doc = fn_doc.split('\n', 1) + if len(split_doc) == 1: + updated_doc = fn_doc + else: + brief, rest = split_doc + updated_doc = '\n'.join([brief, textwrap.dedent(rest)]) + + note = ('\n\nNote: The output of this function should be used. If it is ' + 'not, a warning will be logged or an error may be raised. ' + 'To mark the output as used, call its .mark_used() method.') + return tf_decorator.make_decorator( + target=fn, + decorator_func=wrapped, + decorator_name='should_use_result', + decorator_doc=updated_doc + note) + + if fn is not None: + return decorated(fn) + else: + return decorated diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/type_annotations.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/type_annotations.py new file mode 100644 index 0000000000000000000000000000000000000000..97196bd16a7103130c90f3841248841d57b8f8f6 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/type_annotations.py @@ -0,0 +1,59 @@ +# Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for accessing Python generic type annotations (typing.*).""" + +import collections.abc +import typing + + +def is_generic_union(tp): + """Returns true if `tp` is a parameterized typing.Union value.""" + return (tp is not typing.Union and + getattr(tp, '__origin__', None) is typing.Union) + + +def is_generic_tuple(tp): + """Returns true if `tp` is a parameterized typing.Tuple value.""" + return (tp not in (tuple, typing.Tuple) and + getattr(tp, '__origin__', None) in (tuple, typing.Tuple)) + + +def is_generic_list(tp): + """Returns true if `tp` is a parameterized typing.List value.""" + return (tp not in (list, typing.List) and + getattr(tp, '__origin__', None) in (list, typing.List)) + + +def is_generic_mapping(tp): + """Returns true if `tp` is a parameterized typing.Mapping value.""" + return (tp not in (collections.abc.Mapping, typing.Mapping) and getattr( + tp, '__origin__', None) in (collections.abc.Mapping, typing.Mapping)) + + +def is_forward_ref(tp): + """Returns true if `tp` is a typing forward reference.""" + if hasattr(typing, 'ForwardRef'): + return isinstance(tp, typing.ForwardRef) + elif hasattr(typing, '_ForwardRef'): + return isinstance(tp, typing._ForwardRef) # pylint: disable=protected-access + else: + return False + + +# Note: typing.get_args was added in Python 3.8. +if hasattr(typing, 'get_args'): + get_generic_type_args = typing.get_args +else: + get_generic_type_args = lambda tp: tp.__args__ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/util/variable_utils.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/variable_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9680b338e4943134078552b7583b1d5a245889ba --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/util/variable_utils.py @@ -0,0 +1,83 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility to manipulate resource variables.""" + +from tensorflow.python.framework import composite_tensor +from tensorflow.python.framework import ops +from tensorflow.python.util import _pywrap_utils +from tensorflow.python.util import nest + + +def convert_variables_to_tensors(values): + """Converts `ResourceVariable`s in `values` to `Tensor`s. + + If an object is a `CompositeTensor` and overrides its + `_convert_variables_to_tensors` method, its `ResourceVariable` components + will also be converted to `Tensor`s. Objects other than `ResourceVariable`s + in `values` will be returned unchanged. + + Args: + values: A nested structure of `ResourceVariable`s, or any other objects. + + Returns: + A new structure with `ResourceVariable`s in `values` converted to `Tensor`s. + """ + def _convert_resource_variable_to_tensor(x): + if _pywrap_utils.IsResourceVariable(x): + return ops.convert_to_tensor(x) + elif isinstance(x, composite_tensor.CompositeTensor): + return composite_tensor.convert_variables_to_tensors(x) + else: + return x + + return nest.map_structure(_convert_resource_variable_to_tensor, values) + + +def replace_variables_with_atoms(values): + """Replaces `ResourceVariable`s in `values` with tf.nest atoms. + + This function is mostly for backward compatibility. Historically, + `ResourceVariable`s are treated as tf.nest atoms. This is no + longer the case after `ResourceVariable` becoming `CompositeTensor`. + Unfortunately, tf.nest doesn't allow customization of what objects + are treated as atoms. Calling this function to manually convert + `ResourceVariable`s to atoms to avoid breaking tf.assert_same_structure + with inputs of a `ResourceVariable` and an atom, like a `Tensor`. + + The specific implementation uses 0 as the tf.nest atom, but other tf.nest + atoms could also serve the purpose. Note, the `TypeSpec` of None is not a + tf.nest atom. + + Objects other than `ResourceVariable`s in `values` will be returned unchanged. + + Note: this function does not look into `CompositeTensor`s. Replacing + `ResourceVariable`s in a `CompositeTensor` with atoms will change the + `TypeSpec` of the `CompositeTensor`, which violates the semantics of + `CompositeTensor` and tf.nest. So `ResourceVariable`s in `CompositeTensor`s + will be returned as they are. + + Args: + values: A nested structure of `ResourceVariable`s, or any other objects. + + Returns: + A new structure with `ResourceVariable`s in `values` converted to atoms. + """ + def _replace_resource_variable_with_atom(x): + if _pywrap_utils.IsResourceVariable(x): + return 0 # tf.nest treats 0 or tf.constant(0) as an atom. + else: + return x + + return nest.map_structure(_replace_resource_variable_with_atom, values)