diff --git a/.gitattributes b/.gitattributes index 2f6306070701cb6799b17356b3fb8844e0a5f70b..3cdf7bbf5cd114affa84cb4bf7f29d7649faeb65 100644 --- a/.gitattributes +++ b/.gitattributes @@ -882,3 +882,7 @@ videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_tr videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/nn_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_nn_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/metrics_impl.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/__pycache__/pfor.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +videochat2/lib/python3.10/site-packages/tensorflow/python/lib/io/_pywrap_record_io.so filter=lfs diff=lfs merge=lfs -text +videochat2/lib/python3.10/site-packages/tensorflow/python/lib/core/_pywrap_py_func.so filter=lfs diff=lfs merge=lfs -text diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ab3d065680462bedcbcbe2d931d382e1b34c0cd Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..917324d379f29d1438e700bfcb70eb038d56b91f Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/batching.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/batching.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21cd0c902cd335a65284ecaec57711a273f47a12 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/batching.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/cardinality.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/cardinality.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f42aa03e1b0b3b2582788f1a74f164032e2349b Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/cardinality.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/compression_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/compression_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..444222a0c4be91df258a89794810c2e556f5606c Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/compression_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/counter.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/counter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27233d2da30b1c203768cf7e930c024a0e844c84 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/counter.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/data_service_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/data_service_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd65ac48d31efabe0b586767b7d9bed1021789a8 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/data_service_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/distribute.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/distribute.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..612c5ed99c1b30a54400a2921c11a67a9fe40d24 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/distribute.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/distributed_save_op.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/distributed_save_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9fb9c146e50984e1bf11d3e75e697f1de43b735 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/distributed_save_op.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/enumerate_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/enumerate_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5369550eb58bb9d2f05e2beb2c2b41bb5dcc386d Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/enumerate_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/error_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/error_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3aa2a5a7d16569402bbf8346e487cd75d254c4f Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/error_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/from_list.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/from_list.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4b7857e1feadbce91777ff5a5573da91a98531d Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/from_list.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/get_single_element.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/get_single_element.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2419c0daae4ec84acb10f35249e58c020598ad2 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/get_single_element.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/grouping.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/grouping.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6cbd245ac027949c4414e485ae8842e319d679a Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/grouping.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/interleave_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/interleave_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5dd4771a8ab121be0b62759a8b414f93c82a6eba Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/interleave_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/io.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a520e6bcbd2e62ff476934b9e2fe1258dc0de06 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/io.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/iterator_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/iterator_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e57a2542f03476dff5a243d3c37091639f8d9bcf Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/iterator_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/lookup_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/lookup_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99e4c7a5ae88d0e17b02cba1b47624ea6af916cc Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/lookup_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/map_defun.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/map_defun.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bba7256889d9da589c75a17becf1731a43f9ab3a Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/map_defun.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/matching_files.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/matching_files.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3de4dde46ba1cecf7396f5420492e107cefb28a5 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/matching_files.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/pad_to_cardinality.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/pad_to_cardinality.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78de9ff9ce9b7a6c22d870cece896076bf78ae41 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/pad_to_cardinality.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/parsing_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/parsing_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1f3206cd33f9a6d2bf14616e4976cdb7a1f7dd0 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/parsing_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/prefetching_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/prefetching_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d9a853d70cae250ea5f0443e761447b6b3abbde Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/prefetching_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/random_access.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/random_access.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e9754352acd056621e5bf036fc24fae3f27226e Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/random_access.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/random_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/random_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c55234546a896c1be1f1071aca0cee147460ffb Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/random_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/readers.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/readers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ff6c0c65354ab998a8dce155aec328ab42e7c41 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/readers.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/resampling.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/resampling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a77afa4e4a450ef18ef3505724fd179f98dd54a2 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/resampling.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/scan_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/scan_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..688f14dfc3381ad62c43a9c2536c4c772874ed50 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/scan_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/shuffle_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/shuffle_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48e0375820844cb48c8e24e686d02877272a24d4 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/shuffle_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/snapshot.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/snapshot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbb6c053db9c084ee4792fbe0dc7a2ca932fc143 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/snapshot.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/take_while_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/take_while_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fccf38c12a995e09a92e5716f04f9c30a440cc83 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/take_while_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/testing.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a98ff868a292c6d7a2c1b8b25cf3656b363ef1fc Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/testing.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/unique.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/unique.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b5c0ddd353e63d30ae0af68da017367c58c40d7 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/unique.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/writers.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/writers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61f93bc74fe67422c84919814a52d19032ab44d1 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/writers.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/data_service_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/data_service_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..483da9c296285be55756423a769171da5b10c51d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/data_service_ops.py @@ -0,0 +1,1176 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Python API for executing a tf.data.Dataset using a tf.data service.""" + +import enum +import functools +from typing import Callable + +from tensorflow.core.protobuf import data_service_pb2 +from tensorflow.python import tf2 +from tensorflow.python.data.experimental.ops import compression_ops +from tensorflow.python.data.experimental.service import _pywrap_server_lib +from tensorflow.python.data.experimental.service import _pywrap_utils +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.data.ops import options as options_lib +from tensorflow.python.data.ops import structured_function +from tensorflow.python.data.ops.options import AutoShardPolicy +from tensorflow.python.data.ops.options import ExternalStatePolicy +from tensorflow.python.eager import context +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import gen_experimental_dataset_ops +from tensorflow.python.ops import string_ops +from tensorflow.python.saved_model import nested_structure_coder +from tensorflow.python.util.tf_export import tf_export + +COMPRESSION_AUTO = "AUTO" +COMPRESSION_NONE = None +_PARALLEL_EPOCHS = "parallel_epochs" +_DISTRIBUTED_EPOCH = "distributed_epoch" + + +@tf_export("data.experimental.service.ShardingPolicy") +class ShardingPolicy(enum.IntEnum): + """Specifies how to shard data among tf.data service workers. + + OFF: No sharding will be performed. Each worker produces the entire dataset + without any sharding. With this mode, the best practice is to shuffle the + dataset nondeterministically so that workers process the dataset in different + orders. If workers are restarted or join the cluster mid-job, they will begin + processing the dataset from the beginning. + + DYNAMIC: The input dataset is dynamically split among workers at runtime. Each + worker gets the next split when it reads data from the dispatcher. Data is + produced non-deterministically in this mode. Dynamic sharding works well with + varying-sized tf.data service clusters, e.g., when you need to auto-scale your + workers. Dynamic sharding provides at-most once visitation guarantees. No + examples will be repeated, but some may be missed if a tf.data service worker + gets restarted while processing a file. + + The following are static sharding policies. The semantics are similar to + `tf.data.experimental.AutoShardPolicy`. These policies require: + * The tf.data service cluster is configured with a fixed list of workers + in DispatcherConfig. + * Each client only reads from the local tf.data service worker. + + If a worker is restarted while performing static sharding, the worker will + begin processing its shard again from the beginning. + + FILE: Shards by input files (i.e. each worker will get a fixed set of files to + process). When this option is selected, make sure that there is at least as + many files as workers. If there are fewer input files than workers, a runtime + error will be raised. + + DATA: Shards by elements produced by the dataset. Each worker will process the + whole dataset and discard the portion that is not for itself. Note that for + this mode to correctly partition the dataset elements, the dataset needs to + produce elements in a deterministic order. + + FILE_OR_DATA: Attempts FILE-based sharding, falling back to DATA-based + sharding on failure. + + HINT: Looks for the presence of `shard(SHARD_HINT, ...)` which is treated as a + placeholder to replace with `shard(num_workers, worker_index)`. + """ + + # LINT.IfChange(tf_data_service_sharding_policy) + OFF = 0 + DYNAMIC = 1 + FILE = 2 + DATA = 3 + FILE_OR_DATA = 4 + HINT = 5 + # LINT.ThenChange() + + def _to_proto(self) -> data_service_pb2.ProcessingModeDef.ShardingPolicy: + """Converts the policy to ProcessingModeDef proto enum.""" + + if self == ShardingPolicy.OFF: + return data_service_pb2.ProcessingModeDef.OFF + if self == ShardingPolicy.DYNAMIC: + return data_service_pb2.ProcessingModeDef.DYNAMIC + if self == ShardingPolicy.FILE: + return data_service_pb2.ProcessingModeDef.FILE + if self == ShardingPolicy.DATA: + return data_service_pb2.ProcessingModeDef.DATA + if self == ShardingPolicy.FILE_OR_DATA: + return data_service_pb2.ProcessingModeDef.FILE_OR_DATA + if self == ShardingPolicy.HINT: + return data_service_pb2.ProcessingModeDef.HINT + raise ValueError(f"Unable to convert sharding policy {self!r} to proto.") + + +@tf_export("data.experimental.service.CrossTrainerCache") +class CrossTrainerCache: + """Options related to the tf.data service cross trainer cache. + + This is used to enable cross-trainer cache when distributing a dataset. For + example: + + ``` + dataset = dataset.apply(tf.data.experimental.service.distribute( + processing_mode=tf.data.experimental.service.ShardingPolicy.OFF, + service=FLAGS.tf_data_service_address, + job_name="job", + cross_trainer_cache=data_service_ops.CrossTrainerCache( + trainer_id=trainer_id()))) + ``` + + For more details, refer to + https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers. + """ + + def __init__(self, trainer_id): + """Constructs a CrossTrainerCache. + + Args: + trainer_id: Each training job has a unique ID. Once a job has consumed + data, the data remains in the cache and is re-used by jobs with different + `trainer_id`s. Requests with the same `trainer_id` do not re-use data. + + Raises: + ValueError if `trainer_id` is empty. + """ + if not trainer_id: + raise ValueError( + "tf.data service cross-trainer cache requires a non-empty trainer ID." + ) + self.trainer_id = trainer_id + + def _to_proto(self) -> data_service_pb2.CrossTrainerCacheOptions: + return data_service_pb2.CrossTrainerCacheOptions(trainer_id=self.trainer_id) + + +def _get_validated_sharding_policy(processing_mode) -> ShardingPolicy: + """Validates `processing_mode` and converts it to ShardingPolicy.""" + + if isinstance(processing_mode, ShardingPolicy): + return processing_mode + if processing_mode == _PARALLEL_EPOCHS: + return ShardingPolicy.OFF + if processing_mode == _DISTRIBUTED_EPOCH: + return ShardingPolicy.DYNAMIC + + raise ValueError("tf.data service processing mode should be a " + "`tf.data.experimental.service.ShardingPolicy`, " + "`\"parallel_epochs\"`, or `\"distributed_epoch\"`. Got " + f"{processing_mode!r}.") + + +def _validate_job_name(job_name) -> None: + if job_name is None: + return + if not isinstance(job_name, str): + raise ValueError("`job_name` must be a string, but `job_name` was of type " + f"{type(job_name)}. job_name={job_name}") + if not job_name: + raise ValueError("`job_name` must not be empty") + + +def _validate_compression(compression) -> None: + valid_compressions = [COMPRESSION_AUTO, COMPRESSION_NONE] + if compression not in valid_compressions: + raise ValueError(f"Invalid `compression` argument: {compression}. " + f"Must be one of {valid_compressions}.") + + +def _get_compression_proto( + compression) -> data_service_pb2.DataServiceMetadata.Compression: + if compression == COMPRESSION_AUTO: + return data_service_pb2.DataServiceMetadata.COMPRESSION_SNAPPY + if compression == COMPRESSION_NONE: + return data_service_pb2.DataServiceMetadata.COMPRESSION_OFF + raise ValueError(f"Invalid `compression` argument: {compression}. " + f"Must be one of {[COMPRESSION_AUTO, COMPRESSION_NONE]}.") + + +def _to_tensor(dataset_id) -> tensor.Tensor: + """Converts `dataset_id` to Tensor.""" + + if isinstance(dataset_id, tensor.Tensor): + return dataset_id + if isinstance(dataset_id, str) or isinstance(dataset_id, bytes): + return ops.convert_to_tensor( + dataset_id, dtype=dtypes.string, name="dataset_id") + return ops.convert_to_tensor( + dataset_id, dtype=dtypes.int64, name="dataset_id") + + +def _to_string(dataset_id) -> str: + """Converts `dataset_id` to string.""" + + if isinstance(dataset_id, tensor.Tensor): + return (dataset_id if dataset_id.dtype == dtypes.string else + string_ops.as_string(dataset_id)) + return (dataset_id.decode() + if isinstance(dataset_id, bytes) else str(dataset_id)) + + +class _DataServiceDatasetV2(dataset_ops.DatasetSource): + """A `Dataset` that reads elements from the tf.data service.""" + + def __init__(self, + dataset_id, + processing_mode, + address, + element_spec, + protocol, + data_transfer_protocol, + job_name=None, + consumer_index=None, + num_consumers=None, + max_outstanding_requests=None, + task_refresh_interval_hint_ms=None, + cross_trainer_cache=None, + target_workers="AUTO"): + """Constructs a _DataServiceDatasetV2. + + Args: + dataset_id: The dataset id for the dataset to read from. + processing_mode: A `tf.data.experimental.service.ShardingPolicy` + specifying how to shard the dataset among tf.data workers. See + `tf.data.experimental.service.ShardingPolicy` for details. For backwards + compatibility, `processing_mode` may also be set to the strings + `"parallel_epochs"` or `"distributed_epoch"`, which are respectively + equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`. + address: The tf.data service address, e.g. "localhost:5000". + element_spec: The dataset element spec for the dataset to read from. + protocol: The protocol to use for communicating with the tf.data service, + e.g. "grpc". + data_transfer_protocol: (Optional.) The protocol to use for transferring + data with the tf.data service. By default, data is transferred using + gRPC. + job_name: (Optional.) The name of the job. If provided, it must be a + non-empty string or Tensor. This argument makes it possible for multiple + datasets to share the same job. The default behavior is that the dataset + creates anonymous, exclusively owned jobs. + consumer_index: (Optional.) The index of the consumer in the range from + `0` to `num_consumers`. Must be specified alongside `num_consumers`. + When specified, consumers will read from the job in a strict round-robin + order, instead of the default first-come-first-served order. + num_consumers: (Optional.) The number of consumers which will consume from + the job. Must be specified alongside `consumer_index`. When specified, + consumers will read from the job in a strict round-robin order, instead + of the default first-come-first-served order. When `num_consumers` is + specified, the dataset must have infinite cardinality to prevent a + producer from running out of data early and causing consumers to go out + of sync. + max_outstanding_requests: (Optional.) A limit on how many elements may be + requested at the same time. You can use this option to control the + amount of memory used, since `distribute` won't use more than + `element_size` * `max_outstanding_requests` of memory. + task_refresh_interval_hint_ms: (Optional.) A hint for how often to query + the dispatcher for task changes. + cross_trainer_cache: (Optional.) If a `CrossTrainerCache` object is + provided, dataset iteration will be shared across concurrently running + trainers. See + https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers + for details. + target_workers: (Optional.) Which workers to read from. If `"AUTO"`, + tf.data runtime decides which workers to read from. If `"ANY"`, reads + from any tf.data service workers. If `"LOCAL"`, only reads from local + in-processs tf.data service workers. `"AUTO"` works well for most cases, + while users can specify other targets. For example, `"LOCAL"` helps + avoid RPCs and data copy if every TF worker colocates with a tf.data + service worker. Consumers of a shared job must use the same + `target_workers`. Defaults to `"AUTO"`. + """ + if consumer_index is None != num_consumers is None: + raise ValueError( + "Must either set both `consumer_index` and `num_consumers`, " + "or neither. ", + f"consumer_index={consumer_index}, num_consumers={num_consumers}") + if num_consumers is not None and job_name is None: + raise ValueError("`job_name` must be set when setting `num_consumers`. " + f"num_consumers was set to {num_consumers}.") + + processing_mode_def = data_service_pb2.ProcessingModeDef( + sharding_policy=_get_validated_sharding_policy( + processing_mode)._to_proto()) + if job_name is None: + job_name = "" + if max_outstanding_requests is None: + max_outstanding_requests = dataset_ops.AUTOTUNE + if task_refresh_interval_hint_ms is None: + task_refresh_interval_hint_ms = dataset_ops.AUTOTUNE + + self._dataset_id = _to_tensor(dataset_id) + self._processing_mode = ops.convert_to_tensor( + processing_mode_def.SerializeToString(), + dtype=dtypes.string, + name="processing_mode") + self._address = ops.convert_to_tensor( + address, dtype=dtypes.string, name="address") + self._protocol = ops.convert_to_tensor( + protocol, dtype=dtypes.string, name="protocol") + self._job_name = ops.convert_to_tensor( + job_name, dtype=dtypes.string, name="job_name") + self._consumer_index = ops.convert_to_tensor( + -1 if consumer_index is None else consumer_index, + dtype=dtypes.int64, + name="consumer_index") + self._num_consumers = ops.convert_to_tensor( + -1 if num_consumers is None else num_consumers, + dtype=dtypes.int64, + name="num_consumers") + self._max_outstanding_requests = ops.convert_to_tensor( + max_outstanding_requests, + dtype=dtypes.int64, + name="max_outstanding_requests") + self._element_spec = element_spec + uncompress_func = structured_function.StructuredFunctionWrapper( + lambda x: compression_ops.uncompress(x, output_spec=element_spec), + transformation_name="DataServiceDataset.uncompress()", + input_structure=tensor.TensorSpec(shape=(), dtype=dtypes.variant)) + cross_trainer_cache_options = ( + cross_trainer_cache._to_proto().SerializeToString() + if cross_trainer_cache else None) + + compat_kwargs = {} + if data_transfer_protocol is not None: + compat_kwargs["data_transfer_protocol"] = data_transfer_protocol + + # If `uncompress` is `True`, the dataset will query the servers to find + # out the actual compression used. It is always set to `True` the first + # time the graph is built, and set to false when serializing, so we will + # uncompress at most once. + uncompress = True + variant_tensor = gen_experimental_dataset_ops.data_service_dataset_v4( + dataset_id=self._dataset_id, + processing_mode=self._processing_mode, + address=self._address, + protocol=self._protocol, + job_name=self._job_name, + consumer_index=self._consumer_index, + num_consumers=self._num_consumers, + max_outstanding_requests=self._max_outstanding_requests, + task_refresh_interval_hint_ms=task_refresh_interval_hint_ms, + iteration_counter=( + gen_experimental_dataset_ops.dummy_iteration_counter()), + target_workers=target_workers, + uncompress=uncompress, + uncompress_fn=uncompress_func.function, + cross_trainer_cache_options=cross_trainer_cache_options, + **compat_kwargs, + **self._flat_structure) + super(_DataServiceDatasetV2, self).__init__(variant_tensor) + + @property + def element_spec(self): + return self._element_spec + + +class _DataServiceDatasetV1(dataset_ops.DatasetV1Adapter): + """A `Dataset` that executes its input through the tf.data service.""" + + @functools.wraps(_DataServiceDatasetV2.__init__) + def __init__(self, dataset_id, processing_mode, address, element_spec, + protocol, data_transfer_protocol, job_name, consumer_index, + num_consumers, max_outstanding_requests, + task_refresh_interval_hint_ms, cross_trainer_cache, + target_workers): + + self._wrapped = _DataServiceDatasetV2( + dataset_id=dataset_id, + processing_mode=processing_mode, + address=address, + element_spec=element_spec, + protocol=protocol, + data_transfer_protocol=data_transfer_protocol, + job_name=job_name, + consumer_index=consumer_index, + num_consumers=num_consumers, + max_outstanding_requests=max_outstanding_requests, + task_refresh_interval_hint_ms=task_refresh_interval_hint_ms, + cross_trainer_cache=cross_trainer_cache, + target_workers=target_workers) + super(_DataServiceDatasetV1, self).__init__(self._wrapped) + + +if tf2.enabled(): + _DataServiceDataset = _DataServiceDatasetV2 +else: + _DataServiceDataset = _DataServiceDatasetV1 + + +def _parse_service(service) -> tuple[str, str]: + """Converts a tf.data service string into a (protocol, address) tuple. + + Args: + service: A string in the format "protocol://address" or just "address". If + the string is only an address, the default protocol will be used. + + Returns: + The (protocol, address) tuple + """ + if not isinstance(service, str): + raise ValueError("`service` must be a string, but `service` was of type " + f"{type(service)}. service={service}") + if not service: + raise ValueError("`service` must not be empty") + parts = service.split("://") + if len(parts) == 2: + protocol, address = parts + elif len(parts) == 1: + address = parts[0] + protocol = _pywrap_utils.TF_DATA_DefaultProtocol() + else: + raise ValueError("Malformed `service` string has multiple '://': " + f"{service}.") + # TODO(aaudibert): Considering validating reachability of address here. + return (protocol, address) + + +def _distribute( + processing_mode, + service, + job_name=None, + consumer_index=None, + num_consumers=None, + max_outstanding_requests=None, + task_refresh_interval_hint_ms=None, + data_transfer_protocol=None, + compression="AUTO", + cross_trainer_cache=None, + target_workers="AUTO", +) -> Callable[dataset_ops.Dataset, dataset_ops.Dataset]: + """A transformation that moves dataset processing to the tf.data service. + + This transformation is similar to `distribute`, but supports additional + parameters which we do not yet want to add to the public Python API. + + Args: + processing_mode: A `tf.data.experimental.service.ShardingPolicy` specifying + how to shard the dataset among tf.data workers. See + `tf.data.experimental.service.ShardingPolicy` for details. For backwards + compatibility, `processing_mode` may also be set to the strings + `"parallel_epochs"` or `"distributed_epoch"`, which are respectively + equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`. + service: A string or a tuple indicating how to connect to the tf.data + service. If it's a string, it should be in the format + `[://]
`, where `
` identifies the dispatcher + address and `` can optionally be used to override the default + protocol to use. If it's a tuple, it should be (protocol, address). + job_name: (Optional.) The name of the job. If provided, it must be a + non-empty string. This argument makes it possible for multiple datasets to + share the same job. The default behavior is that the dataset creates + anonymous, exclusively owned jobs. + consumer_index: (Optional.) The index of the consumer in the range from `0` + to `num_consumers`. Must be specified alongside `num_consumers`. When + specified, consumers will read from the job in a strict round-robin order, + instead of the default first-come-first-served order. + num_consumers: (Optional.) The number of consumers which will consume from + the job. Must be specified alongside `consumer_index`. When specified, + consumers will read from the job in a strict round-robin order, instead of + the default first-come-first-served order. When `num_consumers` is + specified, the dataset must have infinite cardinality to prevent a + producer from running out of data early and causing consumers to go out of + sync. + max_outstanding_requests: (Optional.) A limit on how many elements may be + requested at the same time. You can use this option to control the amount + of memory used, since `distribute` won't use more than `element_size` * + `max_outstanding_requests` of memory. + task_refresh_interval_hint_ms: (Optional.) A hint for how often to query the + dispatcher for task changes. + data_transfer_protocol: (Optional.) The protocol to use for transferring + data with the tf.data service. By default, data is transferred using gRPC. + compression: How to compress the dataset's elements before transferring them + over the network. "AUTO" leaves the decision of how to compress up to the + tf.data service runtime. `None` indicates not to compress. + cross_trainer_cache: (Optional.) If a `CrossTrainerCache` object is + provided, dataset iteration will be shared across concurrently running + trainers. See + https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers + for details. + target_workers: (Optional.) Which workers to read from. If `"AUTO"`, tf.data + runtime decides which workers to read from. If `"ANY"`, reads from any + tf.data service workers. If `"LOCAL"`, only reads from local in-processs + tf.data service workers. `"AUTO"` works well for most cases, while users + can specify other targets. For example, `"LOCAL"` helps avoid RPCs and + data copy if every TF worker colocates with a tf.data service worker. + Consumers of a shared job must use the same `target_workers`. Defaults to + `"AUTO"`. + + Returns: + Dataset: A `Dataset` of the elements produced by the data service. + """ + processing_mode = _get_validated_sharding_policy(processing_mode) + _validate_compression(compression) + + def _apply_fn(dataset) -> dataset_ops.Dataset: # pylint: disable=missing-docstring + dataset_id = _register_dataset(service, dataset, compression=compression) + return _from_dataset_id( + processing_mode, + service, + dataset_id, + dataset.element_spec, + job_name=job_name, + consumer_index=consumer_index, + num_consumers=num_consumers, + max_outstanding_requests=max_outstanding_requests, + task_refresh_interval_hint_ms=task_refresh_interval_hint_ms, + data_transfer_protocol=data_transfer_protocol, + cross_trainer_cache=cross_trainer_cache, + target_workers=target_workers) + + return _apply_fn + + +@tf_export("data.experimental.service.distribute") +def distribute( + processing_mode, + service, + job_name=None, + consumer_index=None, + num_consumers=None, + max_outstanding_requests=None, + data_transfer_protocol=None, + compression="AUTO", + cross_trainer_cache=None, + target_workers="AUTO", +) -> Callable[dataset_ops.Dataset, dataset_ops.Dataset]: + """A transformation that moves dataset processing to the tf.data service. + + When you iterate over a dataset containing the `distribute` transformation, + the tf.data service creates a "job" which produces data for the dataset + iteration. + + The tf.data service uses a cluster of workers to prepare data for training + your model. + The `processing_mode` argument to `tf.data.experimental.service.distribute` + describes how to leverage multiple workers to process the input dataset. + Currently, there are two processing modes to choose from: "distributed_epoch" + and "parallel_epochs". + + "distributed_epoch" means that the dataset will be split across all tf.data + service workers. + The dispatcher produces "splits" for the dataset and sends them to workers for + further processing. For example, if a dataset begins with a list of filenames, + the dispatcher will iterate through the filenames and send the filenames to + tf.data workers, which will perform the rest of the dataset transformations on + those files. "distributed_epoch" is useful when your model needs to see each + element of the dataset exactly once, or if it needs to see the data in a + generally-sequential order. "distributed_epoch" only works for datasets with + splittable sources, such as `Dataset.from_tensor_slices`, + `Dataset.list_files`, or `Dataset.range`. + + "parallel_epochs" means that the entire input dataset will be processed + independently by each of the tf.data service workers. + For this reason, it is important to shuffle data (e.g. filenames) + non-deterministically, so that each worker will process the elements of the + dataset in a different order. "parallel_epochs" can be used to distribute + datasets that aren't splittable. + + With two workers, "parallel_epochs" will produce every element of the dataset + twice: + + >>> dispatcher = tf.data.experimental.service.DispatchServer() + >>> dispatcher_address = dispatcher.target.split("://")[1] + >>> # Start two workers + >>> workers = [ + ... tf.data.experimental.service.WorkerServer( + ... tf.data.experimental.service.WorkerConfig( + ... dispatcher_address=dispatcher_address)) for _ in range(2) + ... ] + >>> dataset = tf.data.Dataset.range(10) + >>> dataset = dataset.apply(tf.data.experimental.service.distribute( + ... processing_mode="parallel_epochs", service=dispatcher.target)) + >>> print(sorted(list(dataset.as_numpy_iterator()))) + [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9] + + "distributed_epoch", on the other hand, will still produce each element once: + + >>> dispatcher = tf.data.experimental.service.DispatchServer() + >>> dispatcher_address = dispatcher.target.split("://")[1] + >>> workers = [ + ... tf.data.experimental.service.WorkerServer( + ... tf.data.experimental.service.WorkerConfig( + ... dispatcher_address=dispatcher_address)) for _ in range(2) + ... ] + >>> dataset = tf.data.Dataset.range(10) + >>> dataset = dataset.apply(tf.data.experimental.service.distribute( + ... processing_mode="distributed_epoch", service=dispatcher.target)) + >>> print(sorted(list(dataset.as_numpy_iterator()))) + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + + When using `apply(tf.data.experimental.service.distribute(...))`, the dataset + before the `apply` transformation executes within the tf.data service, while + the operations after `apply` happen within the local process. + + >>> dispatcher = tf.data.experimental.service.DispatchServer() + >>> dispatcher_address = dispatcher.target.split("://")[1] + >>> workers = [ + ... tf.data.experimental.service.WorkerServer( + ... tf.data.experimental.service.WorkerConfig( + ... dispatcher_address=dispatcher_address)) for _ in range(2) + ... ] + >>> dataset = tf.data.Dataset.range(5) + >>> dataset = dataset.map(lambda x: x*x) + >>> dataset = dataset.apply( + ... tf.data.experimental.service.distribute("parallel_epochs", + ... dispatcher.target)) + >>> dataset = dataset.map(lambda x: x+1) + >>> print(sorted(list(dataset.as_numpy_iterator()))) + [1, 1, 2, 2, 5, 5, 10, 10, 17, 17] + + In the above example, the dataset operations (before applying the `distribute` + function on the elements) will be executed on the tf.data workers, + and the elements are provided over RPC. The remaining transformations + (after the call to `distribute`) will be executed locally. The dispatcher + and the workers will bind to usused free ports (which are chosen at random), + in order to communicate with each other. However, to bind them to specific + ports, the `port` parameter can be passed. + + The `job_name` argument allows jobs to be shared across multiple + datasets. Instead of each dataset creating its own job, all + datasets with the same `job_name` will consume from the same job. A new job + will be created for each iteration of the dataset (with each repetition of + `Dataset.repeat` counting as a new iteration). Suppose the `DispatchServer` + is serving on `localhost:5000` and two training workers (in either a single + client or multi-client setup) iterate over the below dataset, and there is a + single tf.data worker: + + ``` + range5_dataset = tf.data.Dataset.range(5) + dataset = range5_dataset.apply(tf.data.experimental.service.distribute( + "parallel_epochs", "localhost:5000", job_name="my_job_name")) + for iteration in range(3): + print(list(dataset)) + ``` + + The elements of each job will be split between the two processes, with + elements being consumed by the processes on a first-come first-served basis. + One possible result is that process 1 prints + + ``` + [0, 2, 4] + [0, 1, 3] + [1] + ``` + + and process 2 prints + + ``` + [1, 3] + [2, 4] + [0, 2, 3, 4] + ``` + + Job names must not be re-used across different training jobs within the + lifetime of the tf.data service. In general, the tf.data service is expected + to live for the duration of a single training job. + To use the tf.data service with multiple training jobs, make sure to use + different job names to avoid conflicts. For example, suppose a training job + calls `distribute` with `job_name="job"` and reads until end of input. If + another independent job connects to the same tf.data service and tries to read + from `job_name="job"`, it will immediately receive end of input, without + getting any data. + + **Coordinated data read** + + By default, when multiple consumers read from the same job, they receive data + on a first-come first-served basis. In some use cases, it is advantageous to + coordinate the consumers. At each step, consumers read data from the same + worker. + + For example, the tf.data service can be used to coordinate example sizes + across a cluster during synchronous training, so that during each step all + replicas train on similar-sized elements. To achieve this, define a dataset + which generates rounds of `num_consumers` consecutive similar-sized batches, + then enable coordinated reads by setting `consumer_index` and `num_consumers`. + + NOTE: To keep consumers in sync, round robin data consumption requires that + the dataset have infinite cardinality. You can get this by adding `.repeat()` + at the end of the dataset definition. + + **Keras and Distribution Strategies** + + The dataset produced by the `distribute` transformation can be passed to + Keras' `Model.fit` or Distribution Strategy's + `tf.distribute.Strategy.experimental_distribute_dataset` like any other + `tf.data.Dataset`. We recommend setting a `job_name` on the call to + `distribute` so that if there are multiple workers, they read data from the + same job. Note that the autosharding normally performed by + `experimental_distribute_dataset` will be disabled when setting a `job_name`, + since sharing the job already results in splitting data across the workers. + When using a shared job, data will be dynamically balanced across workers, so + that they reach end of input about the same time. This results in better + worker utilization than with autosharding, where each worker processes an + independent set of files, and some workers may run out of data earlier than + others. + + Args: + processing_mode: A `tf.data.experimental.service.ShardingPolicy` specifying + how to shard the dataset among tf.data workers. See + `tf.data.experimental.service.ShardingPolicy` for details. For backwards + compatibility, `processing_mode` may also be set to the strings + `"parallel_epochs"` or `"distributed_epoch"`, which are respectively + equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`. + service: A string or a tuple indicating how to connect to the tf.data + service. If it's a string, it should be in the format + `[://]
`, where `
` identifies the dispatcher + address and `` can optionally be used to override the default + protocol to use. If it's a tuple, it should be (protocol, address). + job_name: (Optional.) The name of the job. If provided, it must be a + non-empty string. This argument makes it possible for multiple datasets to + share the same job. The default behavior is that the dataset creates + anonymous, exclusively owned jobs. + consumer_index: (Optional.) The index of the consumer in the range from `0` + to `num_consumers`. Must be specified alongside `num_consumers`. When + specified, consumers will read from the job in a strict round-robin order, + instead of the default first-come-first-served order. + num_consumers: (Optional.) The number of consumers which will consume from + the job. Must be specified alongside `consumer_index`. When specified, + consumers will read from the job in a strict round-robin order, instead of + the default first-come-first-served order. When `num_consumers` is + specified, the dataset must have infinite cardinality to prevent a + producer from running out of data early and causing consumers to go out of + sync. + max_outstanding_requests: (Optional.) A limit on how many elements may be + requested at the same time. You can use this option to control the amount + of memory used, since `distribute` won't use more than `element_size` * + `max_outstanding_requests` of memory. + data_transfer_protocol: (Optional.) The protocol to use for transferring + data with the tf.data service. By default, data is transferred using gRPC. + compression: How to compress the dataset's elements before transferring them + over the network. "AUTO" leaves the decision of how to compress up to the + tf.data service runtime. `None` indicates not to compress. + cross_trainer_cache: (Optional.) If a `CrossTrainerCache` object is + provided, dataset iteration will be shared across concurrently running + trainers. See + https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers + for details. + target_workers: (Optional.) Which workers to read from. If `"AUTO"`, tf.data + runtime decides which workers to read from. If `"ANY"`, reads from any + tf.data service workers. If `"LOCAL"`, only reads from local in-processs + tf.data service workers. `"AUTO"` works well for most cases, while users + can specify other targets. For example, `"LOCAL"` helps avoid RPCs and + data copy if every TF worker colocates with a tf.data service worker. + Consumers of a shared job must use the same `target_workers`. Defaults to + `"AUTO"`. + + Returns: + Dataset: A `Dataset` of the elements produced by the data service. + """ + _validate_job_name(job_name) + return _distribute( + processing_mode=processing_mode, + service=service, + job_name=job_name, + consumer_index=consumer_index, + num_consumers=num_consumers, + max_outstanding_requests=max_outstanding_requests, + data_transfer_protocol=data_transfer_protocol, + compression=compression, + cross_trainer_cache=cross_trainer_cache, + target_workers=target_workers) + + +def _register_dataset( + service, dataset, compression, dataset_id=None) -> tensor.Tensor: + """Registers a dataset with the tf.data service. + + This transformation is similar to `register_dataset`, but supports additional + parameters which we do not yet want to add to the public Python API. + + Args: + service: A string or a tuple indicating how to connect to the tf.data + service. If it's a string, it should be in the format + `[://]
`, where `
` identifies the dispatcher + address and `` can optionally be used to override the default + protocol to use. If it's a tuple, it should be (protocol, address). + dataset: A `tf.data.Dataset` to register with the tf.data service. + compression: How to compress the dataset's elements before transferring them + over the network. "AUTO" leaves the decision of how to compress up to the + tf.data service runtime. `None` indicates not to compress. + dataset_id: (Optional.) By default, tf.data service generates a unique + (string) ID for each registered dataset. If a `dataset_id` is provided, it + will use the specified ID. If a dataset with a matching ID already exists, + no new dataset is registered. This is useful if multiple training jobs + want to (re)use the same dataset for training. In this case, they can + register the dataset with the same dataset ID. + + Returns: + A scalar string tensor representing the dataset ID. + """ + _validate_compression(compression) + + if isinstance(service, tuple): + protocol, address = service + else: + protocol, address = _parse_service(service) + external_state_policy = dataset.options().experimental_external_state_policy + if external_state_policy is None: + external_state_policy = ExternalStatePolicy.WARN + + encoded_spec = None + if context.executing_eagerly(): + encoded_spec = nested_structure_coder.encode_structure( + dataset.element_spec).SerializeToString() + + if compression == COMPRESSION_AUTO: + dataset = dataset.map( + lambda *x: compression_ops.compress(x), + num_parallel_calls=dataset_ops.AUTOTUNE) + dataset = dataset._apply_debug_options() # pylint: disable=protected-access + + metadata = data_service_pb2.DataServiceMetadata( + element_spec=encoded_spec, + compression=_get_compression_proto(compression)) + + return gen_experimental_dataset_ops.register_dataset_v2( + dataset._variant_tensor, # pylint: disable=protected-access + address=address, + protocol=protocol, + external_state_policy=external_state_policy.value, + requested_dataset_id=dataset_id, + metadata=metadata.SerializeToString()) + + +@tf_export("data.experimental.service.register_dataset") +def register_dataset( + service, dataset, compression="AUTO", dataset_id=None) -> tensor.Tensor: + """Registers a dataset with the tf.data service. + + `register_dataset` registers a dataset with the tf.data service so that + datasets can be created later with + `tf.data.experimental.service.from_dataset_id`. This is useful when the + dataset + is registered by one process, then used in another process. When the same + process is both registering and reading from the dataset, it is simpler to use + `tf.data.experimental.service.distribute` instead. + + If the dataset is already registered with the tf.data service, + `register_dataset` returns the already-registered dataset's id. + + >>> dispatcher = tf.data.experimental.service.DispatchServer() + >>> dispatcher_address = dispatcher.target.split("://")[1] + >>> worker = tf.data.experimental.service.WorkerServer( + ... tf.data.experimental.service.WorkerConfig( + ... dispatcher_address=dispatcher_address)) + >>> dataset = tf.data.Dataset.range(10) + >>> dataset_id = tf.data.experimental.service.register_dataset( + ... dispatcher.target, dataset) + >>> dataset = tf.data.experimental.service.from_dataset_id( + ... processing_mode="parallel_epochs", + ... service=dispatcher.target, + ... dataset_id=dataset_id, + ... element_spec=dataset.element_spec) + >>> print(list(dataset.as_numpy_iterator())) + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + + Args: + service: A string or a tuple indicating how to connect to the tf.data + service. If it's a string, it should be in the format + `[://]
`, where `
` identifies the dispatcher + address and `` can optionally be used to override the default + protocol to use. If it's a tuple, it should be (protocol, address). + dataset: A `tf.data.Dataset` to register with the tf.data service. + compression: (Optional.) How to compress the dataset's elements before + transferring them over the network. "AUTO" leaves the decision of how to + compress up to the tf.data service runtime. `None` indicates not to + compress. + dataset_id: (Optional.) By default, tf.data service generates a unique + (string) ID for each registered dataset. If a `dataset_id` is provided, it + will use the specified ID. If a dataset with a matching ID already exists, + no new dataset is registered. This is useful if multiple training jobs + want to (re)use the same dataset for training. In this case, they can + register the dataset with the same dataset ID. + + Returns: + A scalar string tensor representing the dataset ID. + """ + return _register_dataset(service, dataset, compression, dataset_id) + + +def _from_dataset_id(processing_mode, + service, + dataset_id, + element_spec, + job_name=None, + consumer_index=None, + num_consumers=None, + max_outstanding_requests=None, + task_refresh_interval_hint_ms=None, + data_transfer_protocol=None, + cross_trainer_cache=None, + target_workers="AUTO") -> dataset_ops.Dataset: + """Creates a dataset which reads data from the tf.data service. + + This transformation is similar to `from_dataset_id`, but supports additional + parameters which we do not yet want to add to the public Python API. + + Args: + processing_mode: A `tf.data.experimental.service.ShardingPolicy` specifying + how to shard the dataset among tf.data workers. See + `tf.data.experimental.service.ShardingPolicy` for details. For backwards + compatibility, `processing_mode` may also be set to the strings + `"parallel_epochs"` or `"distributed_epoch"`, which are respectively + equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`. + service: A string or a tuple indicating how to connect to the tf.data + service. If it's a string, it should be in the format + `[://]
`, where `
` identifies the dispatcher + address and `` can optionally be used to override the default + protocol to use. If it's a tuple, it should be (protocol, address). + dataset_id: The id of the dataset to read from. This id is returned by + `register_dataset` when the dataset is registered with the tf.data + service. + element_spec: A nested structure of `tf.TypeSpec`s representing the type of + elements produced by the dataset. This argument is only required inside a + tf.function. Use `tf.data.Dataset.element_spec` to get the element spec + for a given dataset. + job_name: (Optional.) The name of the job. If provided, it must be a + non-empty string or tensor. This argument makes it possible for multiple + datasets to share the same job. The default behavior is that the dataset + creates anonymous, exclusively owned jobs. + consumer_index: (Optional.) The index of the consumer in the range from `0` + to `num_consumers`. Must be specified alongside `num_consumers`. When + specified, consumers will read from the job in a strict round-robin order, + instead of the default first-come-first-served order. + num_consumers: (Optional.) The number of consumers which will consume from + the job. Must be specified alongside `consumer_index`. When specified, + consumers will read from the job in a strict round-robin order, instead of + the default first-come-first-served order. When `num_consumers` is + specified, the dataset must have infinite cardinality to prevent a + producer from running out of data early and causing consumers to go out of + sync. + max_outstanding_requests: (Optional.) A limit on how many elements may be + requested at the same time. You can use this option to control the amount + of memory used, since `distribute` won't use more than `element_size` * + `max_outstanding_requests` of memory. + task_refresh_interval_hint_ms: (Optional.) A hint for how often to query the + dispatcher for task changes. + data_transfer_protocol: (Optional.) The protocol to use for transferring + data with the tf.data service. By default, data is transferred using gRPC. + cross_trainer_cache: (Optional.) If a `CrossTrainerCache` object is + provided, dataset iteration will be shared across concurrently running + trainers. See + https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers + for details. + target_workers: (Optional.) Which workers to read from. If `"AUTO"`, tf.data + runtime decides which workers to read from. If `"ANY"`, reads from any + tf.data service workers. If `"LOCAL"`, only reads from local in-processs + tf.data service workers. `"AUTO"` works well for most cases, while users + can specify other targets. For example, `"LOCAL"` helps avoid RPCs and + data copy if every TF worker colocates with a tf.data service worker. + Consumers of a shared job must use the same `target_workers`. Defaults to + `"AUTO"`. + + Returns: + A `tf.data.Dataset` which reads from the tf.data service. + """ + def _get_element_spec(): + """Fetches the element spec from the server.""" + data_service_metadata = None + dataset_id_val = tensor_util.constant_value(dataset_id) + try: + data_service_metadata = ( + _pywrap_server_lib.TF_DATA_GetDataServiceMetadataByID( + dataset_id_val, address, protocol + ) + ) + except NotImplementedError as err: + raise ValueError( + "The tf.data service is running an earlier version of TensorFlow " + "that requires specifying `element_spec` as an argument to " + "`from_dataset_id`. Please either supply an element spec or update " + "the tf.data service to the latest version.") from err + except RuntimeError: + # This error results from dataset ID not found. A more appropriate error + # will be raised when the dataset is created. + pass + + if not data_service_metadata or not data_service_metadata.element_spec: + dataset_id_val = tensor_util.constant_value(dataset_id) + raise ValueError( + f"Failed to fetch element spec for dataset id {dataset_id_val} from " + "tf.data service. If the dataset was registered in graph mode or " + "inside a tf.function, the `element_spec` must be specified as an " + "argument to `from_dataset_id`.") + + struct_pb = nested_structure_coder.struct_pb2.StructuredValue() + struct_pb.ParseFromString(data_service_metadata.element_spec) + return nested_structure_coder.decode_proto(struct_pb) + + processing_mode = _get_validated_sharding_policy(processing_mode) + if isinstance(service, tuple): + protocol, address = service + else: + protocol, address = _parse_service(service) + if job_name is not None: + if not isinstance(job_name, str) and not isinstance( + job_name, tensor.Tensor): + raise ValueError( + "`job_name` must be a string or Tensor, but `job_name` was of type " + f"{type(job_name)}. job_name={job_name}.") + + if not element_spec: + if not context.executing_eagerly(): + raise ValueError( + "In graph mode `element_spec` must be provided manually.") + element_spec = _get_element_spec() + + dataset = _DataServiceDataset( + dataset_id=dataset_id, + processing_mode=processing_mode, + address=address, + element_spec=element_spec, + protocol=protocol, + data_transfer_protocol=data_transfer_protocol, + job_name=job_name, + consumer_index=consumer_index, + num_consumers=num_consumers, + max_outstanding_requests=max_outstanding_requests, + task_refresh_interval_hint_ms=task_refresh_interval_hint_ms, + cross_trainer_cache=cross_trainer_cache, + target_workers=target_workers) + + # Disable autosharding for shared jobs. + if job_name is not None: + options = options_lib.Options() + options.experimental_distribute.auto_shard_policy = AutoShardPolicy.OFF + dataset = dataset.with_options(options) + return dataset + + +@tf_export("data.experimental.service.from_dataset_id") +def from_dataset_id(processing_mode, + service, + dataset_id, + element_spec=None, + job_name=None, + consumer_index=None, + num_consumers=None, + max_outstanding_requests=None, + data_transfer_protocol=None, + cross_trainer_cache=None, + target_workers="AUTO") -> dataset_ops.Dataset: + """Creates a dataset which reads data from the tf.data service. + + This is useful when the dataset is registered by one process, then used in + another process. When the same process is both registering and reading from + the dataset, it is simpler to use `tf.data.experimental.service.distribute` + instead. + + Before using `from_dataset_id`, the dataset must have been registered with the + tf.data service using `tf.data.experimental.service.register_dataset`. + `register_dataset` returns a dataset id for the registered dataset. That is + the `dataset_id` which should be passed to `from_dataset_id`. + + The `element_spec` argument indicates the `tf.TypeSpec`s for the elements + produced by the dataset. Currently `element_spec` must be explicitly + specified, and match the dataset registered under `dataset_id`. `element_spec` + defaults to `None` so that in the future we can support automatically + discovering the `element_spec` by querying the tf.data service. + + `tf.data.experimental.service.distribute` is a convenience method which + combines `register_dataset` and `from_dataset_id` into a dataset + transformation. + See the documentation for `tf.data.experimental.service.distribute` for more + detail about how `from_dataset_id` works. + + >>> dispatcher = tf.data.experimental.service.DispatchServer() + >>> dispatcher_address = dispatcher.target.split("://")[1] + >>> worker = tf.data.experimental.service.WorkerServer( + ... tf.data.experimental.service.WorkerConfig( + ... dispatcher_address=dispatcher_address)) + >>> dataset = tf.data.Dataset.range(10) + >>> dataset_id = tf.data.experimental.service.register_dataset( + ... dispatcher.target, dataset) + >>> dataset = tf.data.experimental.service.from_dataset_id( + ... processing_mode="parallel_epochs", + ... service=dispatcher.target, + ... dataset_id=dataset_id, + ... element_spec=dataset.element_spec) + >>> print(list(dataset.as_numpy_iterator())) + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + + Args: + processing_mode: A `tf.data.experimental.service.ShardingPolicy` specifying + how to shard the dataset among tf.data workers. See + `tf.data.experimental.service.ShardingPolicy` for details. For backwards + compatibility, `processing_mode` may also be set to the strings + `"parallel_epochs"` or `"distributed_epoch"`, which are respectively + equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`. + service: A string or a tuple indicating how to connect to the tf.data + service. If it's a string, it should be in the format + `[://]
`, where `
` identifies the dispatcher + address and `` can optionally be used to override the default + protocol to use. If it's a tuple, it should be (protocol, address). + dataset_id: The id of the dataset to read from. This id is returned by + `register_dataset` when the dataset is registered with the tf.data + service. + element_spec: A nested structure of `tf.TypeSpec`s representing the type of + elements produced by the dataset. This argument is only required inside a + tf.function. Use `tf.data.Dataset.element_spec` to get the element spec + for a given dataset. + job_name: (Optional.) The name of the job. If provided, it must be a + non-empty string. This argument makes it possible for multiple datasets to + share the same job. The default behavior is that the dataset creates + anonymous, exclusively owned jobs. + consumer_index: (Optional.) The index of the consumer in the range from `0` + to `num_consumers`. Must be specified alongside `num_consumers`. When + specified, consumers will read from the job in a strict round-robin order, + instead of the default first-come-first-served order. + num_consumers: (Optional.) The number of consumers which will consume from + the job. Must be specified alongside `consumer_index`. When specified, + consumers will read from the job in a strict round-robin order, instead of + the default first-come-first-served order. When `num_consumers` is + specified, the dataset must have infinite cardinality to prevent a + producer from running out of data early and causing consumers to go out of + sync. + max_outstanding_requests: (Optional.) A limit on how many elements may be + requested at the same time. You can use this option to control the amount + of memory used, since `distribute` won't use more than `element_size` * + `max_outstanding_requests` of memory. + data_transfer_protocol: (Optional.) The protocol to use for transferring + data with the tf.data service. By default, data is transferred using gRPC. + cross_trainer_cache: (Optional.) If a `CrossTrainerCache` object is + provided, dataset iteration will be shared across concurrently running + trainers. See + https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers + for details. + target_workers: (Optional.) Which workers to read from. If `"AUTO"`, tf.data + runtime decides which workers to read from. If `"ANY"`, reads from any + tf.data service workers. If `"LOCAL"`, only reads from local in-processs + tf.data service workers. `"AUTO"` works well for most cases, while users + can specify other targets. For example, `"LOCAL"` helps avoid RPCs and + data copy if every TF worker colocates with a tf.data service worker. + Consumers of a shared job must use the same `target_workers`. Defaults to + `"AUTO"`. + + Returns: + A `tf.data.Dataset` which reads from the tf.data service. + """ + _validate_job_name(job_name) + if job_name is not None: + job_name = string_ops.string_join( + ["dataset_id=", _to_string(dataset_id), job_name], "/") + + return _from_dataset_id( + processing_mode=processing_mode, + service=service, + dataset_id=dataset_id, + element_spec=element_spec, + job_name=job_name, + consumer_index=consumer_index, + num_consumers=num_consumers, + max_outstanding_requests=max_outstanding_requests, + data_transfer_protocol=data_transfer_protocol, + cross_trainer_cache=cross_trainer_cache, + target_workers=target_workers) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/error_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/error_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..263de71deb9b0a284a34acd65786c1980cc95b3c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/error_ops.py @@ -0,0 +1,51 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Ignore_errors dataset transformations.""" +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +@tf_export("data.experimental.ignore_errors") +@deprecation.deprecated(None, "Use `tf.data.Dataset.ignore_errors` instead.") +def ignore_errors(log_warning=False): + """Creates a `Dataset` from another `Dataset` and silently ignores any errors. + + Use this transformation to produce a dataset that contains the same elements + as the input, but silently drops any elements that caused an error. For + example: + + ```python + dataset = tf.data.Dataset.from_tensor_slices([1., 2., 0., 4.]) + + # Computing `tf.debugging.check_numerics(1. / 0.)` will raise an + InvalidArgumentError. + dataset = dataset.map(lambda x: tf.debugging.check_numerics(1. / x, "error")) + + # Using `ignore_errors()` will drop the element that causes an error. + dataset = + dataset.apply(tf.data.experimental.ignore_errors()) # ==> {1., 0.5, 0.2} + ``` + Args: + log_warning: (Optional.) A 'tf.bool' scalar indicating whether ignored + errors should be logged to stderr. Defaults to 'False'. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + """ + def _apply_fn(dataset): + return dataset.ignore_errors(log_warning) + + return _apply_fn diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/get_single_element.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/get_single_element.py new file mode 100644 index 0000000000000000000000000000000000000000..453d51dd9ebb0a2e1becb0044f0e65c71b41b603 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/get_single_element.py @@ -0,0 +1,109 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Python wrappers for Datasets and Iterators.""" +from tensorflow.python.types import data as data_types +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +@deprecation.deprecated(None, "Use `tf.data.Dataset.get_single_element()`.") +@tf_export("data.experimental.get_single_element") +def get_single_element(dataset): + """Returns the single element of the `dataset` as a nested structure of tensors. + + The function enables you to use a `tf.data.Dataset` in a stateless + "tensor-in tensor-out" expression, without creating an iterator. + This facilitates the ease of data transformation on tensors using the + optimized `tf.data.Dataset` abstraction on top of them. + + For example, lets consider a `preprocessing_fn` which would take as an + input the raw features and returns the processed feature along with + it's label. + + ```python + def preprocessing_fn(raw_feature): + # ... the raw_feature is preprocessed as per the use-case + return feature + + raw_features = ... # input batch of BATCH_SIZE elements. + dataset = (tf.data.Dataset.from_tensor_slices(raw_features) + .map(preprocessing_fn, num_parallel_calls=BATCH_SIZE) + .batch(BATCH_SIZE)) + + processed_features = tf.data.experimental.get_single_element(dataset) + ``` + + In the above example, the `raw_features` tensor of length=BATCH_SIZE + was converted to a `tf.data.Dataset`. Next, each of the `raw_feature` was + mapped using the `preprocessing_fn` and the processed features were + grouped into a single batch. The final `dataset` contains only one element + which is a batch of all the processed features. + + NOTE: The `dataset` should contain only one element. + + Now, instead of creating an iterator for the `dataset` and retrieving the + batch of features, the `tf.data.experimental.get_single_element()` function + is used to skip the iterator creation process and directly output the batch + of features. + + This can be particularly useful when your tensor transformations are + expressed as `tf.data.Dataset` operations, and you want to use those + transformations while serving your model. + + # Keras + + ```python + + model = ... # A pre-built or custom model + + class PreprocessingModel(tf.keras.Model): + def __init__(self, model): + super().__init__(self) + self.model = model + + @tf.function(input_signature=[...]) + def serving_fn(self, data): + ds = tf.data.Dataset.from_tensor_slices(data) + ds = ds.map(preprocessing_fn, num_parallel_calls=BATCH_SIZE) + ds = ds.batch(batch_size=BATCH_SIZE) + return tf.argmax( + self.model(tf.data.experimental.get_single_element(ds)), + axis=-1 + ) + + preprocessing_model = PreprocessingModel(model) + your_exported_model_dir = ... # save the model to this path. + tf.saved_model.save(preprocessing_model, your_exported_model_dir, + signatures={'serving_default': preprocessing_model.serving_fn}) + ``` + + Args: + dataset: A `tf.data.Dataset` object containing a single element. + + Returns: + A nested structure of `tf.Tensor` objects, corresponding to the single + element of `dataset`. + + Raises: + TypeError: if `dataset` is not a `tf.data.Dataset` object. + InvalidArgumentError: (at runtime) if `dataset` does not contain exactly + one element. + """ + if not isinstance(dataset, data_types.DatasetV2): + raise TypeError( + f"Invalid `dataset`. Expected a `tf.data.Dataset` object " + f"but got {type(dataset)}.") + + return dataset.get_single_element() diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/grouping.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/grouping.py new file mode 100644 index 0000000000000000000000000000000000000000..2bc0890fe442fc5fc0988e227b5fd1b49832d05a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/grouping.py @@ -0,0 +1,428 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Grouping dataset transformations.""" +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.data.ops import structured_function +from tensorflow.python.data.util import nest +from tensorflow.python.data.util import structure +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_spec +from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +@tf_export("data.experimental.group_by_reducer") +def group_by_reducer(key_func, reducer): + """A transformation that groups elements and performs a reduction. + + This transformation maps element of a dataset to a key using `key_func` and + groups the elements by key. The `reducer` is used to process each group; its + `init_func` is used to initialize state for each group when it is created, the + `reduce_func` is used to update the state every time an element is mapped to + the matching group, and the `finalize_func` is used to map the final state to + an output value. + + Args: + key_func: A function mapping a nested structure of tensors + (having shapes and types defined by `self.output_shapes` and + `self.output_types`) to a scalar `tf.int64` tensor. + reducer: An instance of `Reducer`, which captures the reduction logic using + the `init_func`, `reduce_func`, and `finalize_func` functions. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + """ + + def _apply_fn(dataset): + """Function from `Dataset` to `Dataset` that applies the transformation.""" + return _GroupByReducerDataset(dataset, key_func, reducer) + + return _apply_fn + + +@deprecation.deprecated(None, "Use `tf.data.Dataset.group_by_window(...)`.") +@tf_export("data.experimental.group_by_window") +def group_by_window(key_func, + reduce_func, + window_size=None, + window_size_func=None): + """A transformation that groups windows of elements by key and reduces them. + + This transformation maps each consecutive element in a dataset to a key + using `key_func` and groups the elements by key. It then applies + `reduce_func` to at most `window_size_func(key)` elements matching the same + key. All except the final window for each key will contain + `window_size_func(key)` elements; the final window may be smaller. + + You may provide either a constant `window_size` or a window size determined by + the key through `window_size_func`. + + Args: + key_func: A function mapping a nested structure of tensors + (having shapes and types defined by `self.output_shapes` and + `self.output_types`) to a scalar `tf.int64` tensor. + reduce_func: A function mapping a key and a dataset of up to `window_size` + consecutive elements matching that key to another dataset. + window_size: A `tf.int64` scalar `tf.Tensor`, representing the number of + consecutive elements matching the same key to combine in a single + batch, which will be passed to `reduce_func`. Mutually exclusive with + `window_size_func`. + window_size_func: A function mapping a key to a `tf.int64` scalar + `tf.Tensor`, representing the number of consecutive elements matching + the same key to combine in a single batch, which will be passed to + `reduce_func`. Mutually exclusive with `window_size`. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + + Raises: + ValueError: if neither or both of {`window_size`, `window_size_func`} are + passed. + """ + + def _apply_fn(dataset): + """Function from `Dataset` to `Dataset` that applies the transformation.""" + return dataset.group_by_window( + key_func=key_func, + reduce_func=reduce_func, + window_size=window_size, + window_size_func=window_size_func) + + return _apply_fn + + +@deprecation.deprecated(None, + "Use `tf.data.Dataset.bucket_by_sequence_length(...)`.") +@tf_export("data.experimental.bucket_by_sequence_length") +def bucket_by_sequence_length(element_length_func, + bucket_boundaries, + bucket_batch_sizes, + padded_shapes=None, + padding_values=None, + pad_to_bucket_boundary=False, + no_padding=False, + drop_remainder=False): + """A transformation that buckets elements in a `Dataset` by length. + + Elements of the `Dataset` are grouped together by length and then are padded + and batched. + + This is useful for sequence tasks in which the elements have variable length. + Grouping together elements that have similar lengths reduces the total + fraction of padding in a batch which increases training step efficiency. + + Below is an example to bucketize the input data to the 3 buckets + "[0, 3), [3, 5), [5, inf)" based on sequence length, with batch size 2. + + >>> elements = [ + ... [0], [1, 2, 3, 4], [5, 6, 7], + ... [7, 8, 9, 10, 11], [13, 14, 15, 16, 19, 20], [21, 22]] + + >>> dataset = tf.data.Dataset.from_generator( + ... lambda: elements, tf.int64, output_shapes=[None]) + + >>> dataset = dataset.apply( + ... tf.data.experimental.bucket_by_sequence_length( + ... element_length_func=lambda elem: tf.shape(elem)[0], + ... bucket_boundaries=[3, 5], + ... bucket_batch_sizes=[2, 2, 2])) + + >>> for elem in dataset.as_numpy_iterator(): + ... print(elem) + [[1 2 3 4] + [5 6 7 0]] + [[ 7 8 9 10 11 0] + [13 14 15 16 19 20]] + [[ 0 0] + [21 22]] + + There is also a possibility to pad the dataset till the bucket boundary. + You can also provide which value to be used while padding the data. + Below example uses `-1` as padding and it also shows the input data + being bucketizied to two buckets "[0,3], [4,6]". + + >>> elements = [ + ... [0], [1, 2, 3, 4], [5, 6, 7], + ... [7, 8, 9, 10, 11], [13, 14, 15, 16, 19, 20], [21, 22]] + + >>> dataset = tf.data.Dataset.from_generator( + ... lambda: elements, tf.int32, output_shapes=[None]) + + >>> dataset = dataset.apply( + ... tf.data.experimental.bucket_by_sequence_length( + ... element_length_func=lambda elem: tf.shape(elem)[0], + ... bucket_boundaries=[4, 7], + ... bucket_batch_sizes=[2, 2, 2], + ... pad_to_bucket_boundary=True, + ... padding_values=-1)) + + >>> for elem in dataset.as_numpy_iterator(): + ... print(elem) + [[ 0 -1 -1] + [ 5 6 7]] + [[ 1 2 3 4 -1 -1] + [ 7 8 9 10 11 -1]] + [[21 22 -1]] + [[13 14 15 16 19 20]] + + When using `pad_to_bucket_boundary` option, it can be seen that it is + not always possible to maintain the bucket batch size. + You can drop the batches that do not maintain the bucket batch size by + using the option `drop_remainder`. Using the same input data as in the + above example you get the following result. + + >>> elements = [ + ... [0], [1, 2, 3, 4], [5, 6, 7], + ... [7, 8, 9, 10, 11], [13, 14, 15, 16, 19, 20], [21, 22]] + + >>> dataset = tf.data.Dataset.from_generator( + ... lambda: elements, tf.int32, output_shapes=[None]) + + >>> dataset = dataset.apply( + ... tf.data.experimental.bucket_by_sequence_length( + ... element_length_func=lambda elem: tf.shape(elem)[0], + ... bucket_boundaries=[4, 7], + ... bucket_batch_sizes=[2, 2, 2], + ... pad_to_bucket_boundary=True, + ... padding_values=-1, + ... drop_remainder=True)) + + >>> for elem in dataset.as_numpy_iterator(): + ... print(elem) + [[ 0 -1 -1] + [ 5 6 7]] + [[ 1 2 3 4 -1 -1] + [ 7 8 9 10 11 -1]] + + Args: + element_length_func: function from element in `Dataset` to `tf.int32`, + determines the length of the element, which will determine the bucket it + goes into. + bucket_boundaries: `list`, upper length boundaries of the buckets. + bucket_batch_sizes: `list`, batch size per bucket. Length should be + `len(bucket_boundaries) + 1`. + padded_shapes: Nested structure of `tf.TensorShape` to pass to + `tf.data.Dataset.padded_batch`. If not provided, will use + `dataset.output_shapes`, which will result in variable length dimensions + being padded out to the maximum length in each batch. + padding_values: Values to pad with, passed to + `tf.data.Dataset.padded_batch`. Defaults to padding with 0. + pad_to_bucket_boundary: bool, if `False`, will pad dimensions with unknown + size to maximum length in batch. If `True`, will pad dimensions with + unknown size to bucket boundary minus 1 (i.e., the maximum length in each + bucket), and caller must ensure that the source `Dataset` does not contain + any elements with length longer than `max(bucket_boundaries)`. + no_padding: `bool`, indicates whether to pad the batch features (features + need to be either of type `tf.sparse.SparseTensor` or of same shape). + drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing + whether the last batch should be dropped in the case it has fewer than + `batch_size` elements; the default behavior is not to drop the smaller + batch. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + + Raises: + ValueError: if `len(bucket_batch_sizes) != len(bucket_boundaries) + 1`. + """ + + def _apply_fn(dataset): + return dataset.bucket_by_sequence_length( + element_length_func=element_length_func, + bucket_boundaries=bucket_boundaries, + bucket_batch_sizes=bucket_batch_sizes, + padded_shapes=padded_shapes, + padding_values=padding_values, + pad_to_bucket_boundary=pad_to_bucket_boundary, + no_padding=no_padding, + drop_remainder=drop_remainder) + + return _apply_fn + + +class _GroupByReducerDataset(dataset_ops.UnaryDataset): + """A `Dataset` that groups its input and performs a reduction.""" + + def __init__(self, input_dataset, key_func, reducer): + """See `group_by_reducer()` for details.""" + self._input_dataset = input_dataset + self._make_key_func(key_func, input_dataset) + self._make_init_func(reducer.init_func) + self._make_reduce_func(reducer.reduce_func, input_dataset) + self._make_finalize_func(reducer.finalize_func) + variant_tensor = ged_ops.experimental_group_by_reducer_dataset( + self._input_dataset._variant_tensor, # pylint: disable=protected-access + self._key_func.function.captured_inputs, + self._init_func.function.captured_inputs, + self._reduce_func.function.captured_inputs, + self._finalize_func.function.captured_inputs, + key_func=self._key_func.function, + init_func=self._init_func.function, + reduce_func=self._reduce_func.function, + finalize_func=self._finalize_func.function, + **self._flat_structure) + super(_GroupByReducerDataset, self).__init__(input_dataset, variant_tensor) + + def _make_key_func(self, key_func, input_dataset): + """Make wrapping defun for key_func.""" + self._key_func = structured_function.StructuredFunctionWrapper( + key_func, self._transformation_name(), dataset=input_dataset) + if not self._key_func.output_structure.is_compatible_with( + tensor_spec.TensorSpec([], dtypes.int64)): + raise ValueError( + f"Invalid `key_func`. Expected `key_func` to return a scalar " + f"tf.int64 tensor, but instead `key_func` has output " + f"types={self._key_func.output_types} " + f"and shapes={self._key_func.output_shapes}." + ) + + def _make_init_func(self, init_func): + """Make wrapping defun for init_func.""" + self._init_func = structured_function.StructuredFunctionWrapper( + init_func, + self._transformation_name(), + input_structure=tensor_spec.TensorSpec([], dtypes.int64)) + + def _make_reduce_func(self, reduce_func, input_dataset): + """Make wrapping defun for reduce_func.""" + + # Iteratively rerun the reduce function until reaching a fixed point on + # `self._state_structure`. + self._state_structure = self._init_func.output_structure + state_types = self._init_func.output_types + state_shapes = self._init_func.output_shapes + state_classes = self._init_func.output_classes + need_to_rerun = True + while need_to_rerun: + + wrapped_func = structured_function.StructuredFunctionWrapper( + reduce_func, + self._transformation_name(), + input_structure=(self._state_structure, input_dataset.element_spec), + add_to_graph=False) + + # Extract and validate class information from the returned values. + for new_state_class, state_class in zip( + nest.flatten(wrapped_func.output_classes), + nest.flatten(state_classes)): + if not issubclass(new_state_class, state_class): + raise TypeError( + f"Invalid `reducer`. The output class of the " + f"`reducer.reduce_func` {wrapped_func.output_classes}, " + f"does not match the class of the reduce state " + f"{self._state_classes}.") + + # Extract and validate type information from the returned values. + for new_state_type, state_type in zip( + nest.flatten(wrapped_func.output_types), nest.flatten(state_types)): + if new_state_type != state_type: + raise TypeError( + f"Invalid `reducer`. The element types for the new state " + f"{wrapped_func.output_types} do not match the element types " + f"of the old state {self._init_func.output_types}." + ) + + # Extract shape information from the returned values. + flat_state_shapes = nest.flatten(state_shapes) + flat_new_state_shapes = nest.flatten(wrapped_func.output_shapes) + weakened_state_shapes = [ + original.most_specific_compatible_shape(new) + for original, new in zip(flat_state_shapes, flat_new_state_shapes) + ] + + need_to_rerun = False + for original_shape, weakened_shape in zip(flat_state_shapes, + weakened_state_shapes): + if original_shape.ndims is not None and ( + weakened_shape.ndims is None or + original_shape.as_list() != weakened_shape.as_list()): + need_to_rerun = True + break + + if need_to_rerun: + state_shapes = nest.pack_sequence_as( + self._init_func.output_shapes, weakened_state_shapes) + self._state_structure = structure.convert_legacy_structure( + state_types, state_shapes, state_classes) + + self._reduce_func = wrapped_func + self._reduce_func.function.add_to_graph(ops.get_default_graph()) + + def _make_finalize_func(self, finalize_func): + """Make wrapping defun for finalize_func.""" + self._finalize_func = structured_function.StructuredFunctionWrapper( + finalize_func, + self._transformation_name(), + input_structure=self._state_structure) + + @property + def element_spec(self): + return self._finalize_func.output_structure + + def _functions(self): + return [ + self._key_func, self._init_func, self._reduce_func, self._finalize_func + ] + + def _transformation_name(self): + return "tf.data.experimental.group_by_reducer()" + + +@tf_export("data.experimental.Reducer") +class Reducer: + """A reducer is used for reducing a set of elements. + + A reducer is represented as a tuple of the three functions: + - init_func - to define initial value: key => initial state + - reducer_func - operation to perform on values with same key: (old state, input) => new state + - finalize_func - value to return in the end: state => result + + For example, + + ``` + def init_func(_): + return (0.0, 0.0) + + def reduce_func(state, value): + return (state[0] + value['features'], state[1] + 1) + + def finalize_func(s, n): + return s / n + + reducer = tf.data.experimental.Reducer(init_func, reduce_func, finalize_func) + ``` + """ + + def __init__(self, init_func, reduce_func, finalize_func): + self._init_func = init_func + self._reduce_func = reduce_func + self._finalize_func = finalize_func + + @property + def init_func(self): + return self._init_func + + @property + def reduce_func(self): + return self._reduce_func + + @property + def finalize_func(self): + return self._finalize_func diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/lookup_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/lookup_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..aef2902813eca123686a13119be2b29772d61fee --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/lookup_ops.py @@ -0,0 +1,238 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#============================================================================== +"""Lookup operations.""" + +from tensorflow.python.data.experimental.ops.cardinality import assert_cardinality +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor +from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops +from tensorflow.python.ops import lookup_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.util.tf_export import tf_export + + +def _check_table_initializer_element_spec(element_spec): + """Raises an error if the given table initializer element spec is invalid.""" + base_error = ("Datasets used to initialize lookup tables must " + "produce elements in the form (key, value), where " + "the keys and values are scalar tensors. ") + specific_error = None + if len(element_spec) != 2: + raise ValueError(base_error + "However, the given dataset produces " + f"{len(element_spec)} components instead of two " + "(key, value) components. Full dataset element spec: " + f"{element_spec}.") + if not isinstance(element_spec[0], tensor.TensorSpec): + raise ValueError(base_error + "However, the given dataset produces " + f"non-Tensor keys of type {type(element_spec[0])}.") + if not isinstance(element_spec[1], tensor.TensorSpec): + raise ValueError(base_error + "However, the given dataset produces " + f"non-Tensor values of type {type(element_spec[1])}.") + if element_spec[0].shape.rank not in (None, 0): + raise ValueError( + base_error + "However, the given dataset produces " + f"non-scalar key Tensors of rank {element_spec[0].shape.rank}.") + if element_spec[1].shape.rank not in (None, 0): + raise ValueError( + base_error + "However, the given dataset produces " + f"non-scalar value Tensors of rank {element_spec[1].shape.rank}.") + + +@tf_export("data.experimental.DatasetInitializer") +class DatasetInitializer(lookup_ops.TableInitializerBase): + """Creates a table initializer from a `tf.data.Dataset`. + + Sample usage: + + >>> keys = tf.data.Dataset.range(100) + >>> values = tf.data.Dataset.range(100).map( + ... lambda x: tf.strings.as_string(x * 2)) + >>> ds = tf.data.Dataset.zip((keys, values)) + >>> init = tf.data.experimental.DatasetInitializer(ds) + >>> table = tf.lookup.StaticHashTable(init, "") + >>> table.lookup(tf.constant([0, 1, 2], dtype=tf.int64)).numpy() + array([b'0', b'2', b'4'], dtype=object) + + Attributes: + dataset: A `tf.data.Dataset` object that produces tuples of scalars. The + first scalar is treated as a key and the second as value. + Raises: ValueError if `dataset` doesn't conform to specifications. + """ + + def __init__(self, dataset): + """Creates a table initializer from a `tf.data.Dataset`. + + Args: + dataset: A `tf.data.Dataset` object that produces tuples of scalars. The + first scalar is treated as a key and the second as value. + Raises: ValueError if `dataset` doesn't conform to specifications. + Returns: A `DatasetInitializer` object + """ + # Assert that the dataset element spec is a tuple of TensorSpecs where + # each tensor is a scalar. + self.dataset = dataset + elem_spec = self.dataset.element_spec + _check_table_initializer_element_spec(elem_spec) + + key_type = elem_spec[0].dtype + value_type = elem_spec[1].dtype + super(DatasetInitializer, self).__init__(key_type, value_type) + + def initialize(self, table): + lookup_ops.check_table_dtypes(table, self._key_dtype, self._value_dtype) + init_op = ged_ops.initialize_table_from_dataset( + table.resource_handle, self.dataset._variant_tensor) # pylint: disable=protected-access + ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op) + return init_op + + +@tf_export("data.experimental.table_from_dataset") +def table_from_dataset(dataset=None, + num_oov_buckets=0, + vocab_size=None, + default_value=None, + hasher_spec=lookup_ops.FastHashSpec, + key_dtype=dtypes.string, + name=None): + """Returns a lookup table based on the given dataset. + + This operation constructs a lookup table based on the given dataset of pairs + of (key, value). + + Any lookup of an out-of-vocabulary token will return a bucket ID based on its + hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the + `default_value`. + The bucket ID range is + `[vocabulary size, vocabulary size + num_oov_buckets - 1]`. + + Sample Usages: + + >>> keys = tf.data.Dataset.range(100) + >>> values = tf.data.Dataset.range(100).map( + ... lambda x: tf.strings.as_string(x * 2)) + >>> ds = tf.data.Dataset.zip((keys, values)) + >>> table = tf.data.experimental.table_from_dataset( + ... ds, default_value='n/a', key_dtype=tf.int64) + >>> table.lookup(tf.constant([0, 1, 2], dtype=tf.int64)).numpy() + array([b'0', b'2', b'4'], dtype=object) + + Args: + dataset: A dataset containing (key, value) pairs. + num_oov_buckets: The number of out-of-vocabulary buckets. + vocab_size: Number of the elements in the vocabulary, if known. + default_value: The value to use for out-of-vocabulary feature values. + Defaults to -1. + hasher_spec: A `HasherSpec` to specify the hash function to use for + assignation of out-of-vocabulary buckets. + key_dtype: The `key` data type. + name: A name for this op (optional). + + Returns: + The lookup table based on the given dataset. + + Raises: + ValueError: If + * `dataset` does not contain pairs + * The 2nd item in the `dataset` pairs has a dtype which is incompatible + with `default_value` + * `num_oov_buckets` is negative + * `vocab_size` is not greater than zero + * The `key_dtype` is not integer or string + """ + elem_spec = dataset.element_spec + _check_table_initializer_element_spec(elem_spec) + if default_value is None: + default_value = -1 + if not (elem_spec[1].dtype.is_integer or elem_spec[1].dtype.is_floating): + raise ValueError("`default_value` must be specified when creating a " + "table from a dataset that produces values of type " + f"{elem_spec[1].dtype}.") + if num_oov_buckets < 0: + raise ValueError("`num_oov_buckets` must be greater than or equal to 0, " + f"got {num_oov_buckets}.") + if (not isinstance(vocab_size, tensor.Tensor) and vocab_size is not None and + vocab_size < 1): + raise ValueError(f"`vocab_size` must be greater than 0, got {vocab_size}.") + if (not key_dtype.is_integer) and (dtypes.string != key_dtype.base_dtype): + raise TypeError("`key_dtype` must be either an integer or string type, " + f"but got {key_dtype}") + if vocab_size is not None: + if isinstance(vocab_size, tensor.Tensor): + vocab_size = math_ops.cast(vocab_size, dtypes.int64) + dataset = dataset.take(vocab_size) + dataset = dataset.apply(assert_cardinality(vocab_size)) + with ops.name_scope(name, "string_to_index"): + initializer = DatasetInitializer(dataset) + with ops.name_scope(None, "hash_table"): + table = lookup_ops.StaticHashTableV1(initializer, default_value) + if num_oov_buckets: + table = lookup_ops.IdTableWithHashBuckets( + table, + num_oov_buckets=num_oov_buckets, + hasher_spec=hasher_spec, + key_dtype=key_dtype) + return table + + +@tf_export("data.experimental.index_table_from_dataset") +def index_table_from_dataset(dataset=None, + num_oov_buckets=0, + vocab_size=None, + default_value=-1, + hasher_spec=lookup_ops.FastHashSpec, + key_dtype=dtypes.string, + name=None): + """Returns an index lookup table based on the given dataset. + + This operation constructs a lookup table based on the given dataset of keys. + + Any lookup of an out-of-vocabulary token will return a bucket ID based on its + hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the + `default_value`. + The bucket ID range is + `[vocabulary size, vocabulary size + num_oov_buckets - 1]`. + + Sample Usages: + + >>> ds = tf.data.Dataset.range(100).map(lambda x: tf.strings.as_string(x * 2)) + >>> table = tf.data.experimental.index_table_from_dataset( + ... ds, key_dtype=dtypes.int64) + >>> table.lookup(tf.constant(['0', '2', '4'], dtype=tf.string)).numpy() + array([0, 1, 2]) + + Args: + dataset: A dataset of keys. + num_oov_buckets: The number of out-of-vocabulary buckets. + vocab_size: Number of the elements in the vocabulary, if known. + default_value: The value to use for out-of-vocabulary feature values. + Defaults to -1. + hasher_spec: A `HasherSpec` to specify the hash function to use for + assignation of out-of-vocabulary buckets. + key_dtype: The `key` data type. + name: A name for this op (optional). + + Returns: + The lookup table based on the given dataset. + + Raises: + ValueError: If + * `num_oov_buckets` is negative + * `vocab_size` is not greater than zero + * The `key_dtype` is not integer or string + """ + return table_from_dataset(dataset.enumerate().map(lambda v, k: (k, v)), + num_oov_buckets, vocab_size, default_value, + hasher_spec, key_dtype, name) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/map_defun.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/map_defun.py new file mode 100644 index 0000000000000000000000000000000000000000..86848b507aa9c61a47c876f0bd6d83afbfdfdac2 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/map_defun.py @@ -0,0 +1,65 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Experimental API for optimizing `tf.data` pipelines.""" + +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import gen_dataset_ops + + +def map_defun(fn, + elems, + output_dtypes, + output_shapes, + max_intra_op_parallelism=1): + """Map a function on the list of tensors unpacked from `elems` on dimension 0. + + Args: + fn: A function (`function.defun`) that takes a list of tensors and returns + another list of tensors. The output list has the same types as + output_dtypes. The elements of the output list have the same dimension 0 + as `elems`, and the remaining dimensions correspond to those of + `fn_output_shapes`. + elems: A list of tensors. + output_dtypes: A list of dtypes corresponding to the output types of the + function. + output_shapes: A list of `TensorShape`s corresponding to the output shapes + from each invocation of the function on slices of inputs. + max_intra_op_parallelism: An integer. If positive, sets the max parallelism + limit of each function call to this. + + Raises: + ValueError: if any of the inputs are malformed. + + Returns: + A list of `Tensor` objects with the same types as `output_dtypes`. + """ + if not isinstance(elems, list): + raise ValueError(f"`elems` must be a list of tensors, but was {elems}.") + if not isinstance(output_dtypes, list): + raise ValueError("`output_dtypes` must be a list of `tf.DType` objects, " + f"but was {output_dtypes}.") + if not isinstance(output_shapes, list): + raise ValueError("`output_shapes` must be a list of `tf.TensorShape` " + f"objects, but was {output_shapes}.") + + concrete_fn = fn.get_concrete_function() # pylint: disable=protected-access + # TODO(shivaniagrawal/rachelim): what about functions created without + # input_signature. + elems = [ops.convert_to_tensor(e) for e in elems] + output_shapes = [tensor_shape.TensorShape(s) for s in output_shapes] + return gen_dataset_ops.map_defun(elems, concrete_fn.captured_inputs, + output_dtypes, output_shapes, concrete_fn, + max_intra_op_parallelism) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/matching_files.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/matching_files.py new file mode 100644 index 0000000000000000000000000000000000000000..deb934126cd5c982b03ed3c3118681074db2edc6 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/matching_files.py @@ -0,0 +1,35 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Experimental API for matching input filenames.""" + +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_spec +from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops + + +class MatchingFilesDataset(dataset_ops.DatasetSource): + """A `Dataset` that list the files according to the input patterns.""" + + def __init__(self, patterns): + self._patterns = ops.convert_to_tensor( + patterns, dtype=dtypes.string, name="patterns") + variant_tensor = ged_ops.matching_files_dataset(self._patterns) + super(MatchingFilesDataset, self).__init__(variant_tensor) + + @property + def element_spec(self): + return tensor_spec.TensorSpec([], dtypes.string) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/scan_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/scan_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..85eba7551a75f8dd5e2b620f4a1be22dd7b60c8e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/scan_ops.py @@ -0,0 +1,45 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Scan dataset transformation.""" +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +@deprecation.deprecated(None, "Use `tf.data.Dataset.scan(...) instead") +@tf_export("data.experimental.scan") +def scan(initial_state, scan_func): + """A transformation that scans a function across an input dataset. + + This transformation is a stateful relative of `tf.data.Dataset.map`. + In addition to mapping `scan_func` across the elements of the input dataset, + `scan()` accumulates one or more state tensors, whose initial values are + `initial_state`. + + Args: + initial_state: A nested structure of tensors, representing the initial state + of the accumulator. + scan_func: A function that maps `(old_state, input_element)` to + `(new_state, output_element)`. It must take two arguments and return a + pair of nested structures of tensors. The `new_state` must match the + structure of `initial_state`. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + """ + def _apply_fn(dataset): + return dataset.scan(initial_state=initial_state, scan_func=scan_func) + + return _apply_fn diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/snapshot.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/snapshot.py new file mode 100644 index 0000000000000000000000000000000000000000..204e6b9c29010a41fc990f5b64bc47d475886448 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/snapshot.py @@ -0,0 +1,276 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Dataset snapshot and related functionality.""" +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import random_seed +from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + +COMPRESSION_GZIP = "GZIP" +COMPRESSION_SNAPPY = "SNAPPY" +COMPRESSION_NONE = None + + +class _LegacySnapshotDataset(dataset_ops.UnaryUnchangedStructureDataset): + """A Dataset that captures a snapshot or reads from a snapshot.""" + + def __init__(self, + input_dataset, + path, + compression=None, + reader_path_prefix=None, + writer_path_prefix=None, + shard_size_bytes=None, + pending_snapshot_expiry_seconds=None, + num_reader_threads=None, + reader_buffer_size=None, + num_writer_threads=None, + writer_buffer_size=None, + shuffle_on_read=None, + shuffle_seed=None, + mode=None, + snapshot_name=None): + + self._compression = compression if compression is not None else "" + self._reader_path_prefix = ( + reader_path_prefix if reader_path_prefix is not None else "") + self._writer_path_prefix = ( + writer_path_prefix if writer_path_prefix is not None else "") + self._shard_size_bytes = ( + shard_size_bytes if shard_size_bytes is not None else -1) + self._pending_snapshot_expiry_seconds = ( + pending_snapshot_expiry_seconds + if pending_snapshot_expiry_seconds is not None else -1) + self._num_reader_threads = ( + num_reader_threads if num_reader_threads is not None else -1) + self._reader_buffer_size = ( + reader_buffer_size if reader_buffer_size is not None else -1) + self._num_writer_threads = ( + num_writer_threads if num_writer_threads is not None else -1) + self._writer_buffer_size = ( + writer_buffer_size if writer_buffer_size is not None else -1) + self._shuffle_on_read = ( + shuffle_on_read if shuffle_on_read is not None else False) + self._mode = (mode if mode is not None else "auto") + self._snapshot_name = (snapshot_name if snapshot_name is not None else "") + + self._seed, self._seed2 = random_seed.get_seed(shuffle_seed) + + self._input_dataset = input_dataset + self._path = ops.convert_to_tensor(path, dtype=dtypes.string, name="path") + + variant_tensor = ged_ops.snapshot_dataset( + self._input_dataset._variant_tensor, # pylint: disable=protected-access + path=self._path, + compression=self._compression, + reader_path_prefix=self._reader_path_prefix, + writer_path_prefix=self._writer_path_prefix, + shard_size_bytes=self._shard_size_bytes, + pending_snapshot_expiry_seconds=self._pending_snapshot_expiry_seconds, + num_reader_threads=self._num_reader_threads, + reader_buffer_size=self._reader_buffer_size, + num_writer_threads=self._num_writer_threads, + writer_buffer_size=self._writer_buffer_size, + shuffle_on_read=self._shuffle_on_read, + seed=self._seed, + seed2=self._seed2, + mode=self._mode, + snapshot_name=self._snapshot_name, + **self._flat_structure) + + super(_LegacySnapshotDataset, self).__init__(input_dataset, variant_tensor) + + +@deprecation.deprecated(None, "Use `tf.data.Dataset.shapshot(...)` instead.") +def legacy_snapshot(path, + compression=None, + reader_path_prefix=None, + writer_path_prefix=None, + shard_size_bytes=None, + pending_snapshot_expiry_seconds=None, + num_reader_threads=None, + reader_buffer_size=None, + num_writer_threads=None, + writer_buffer_size=None, + shuffle_on_read=None, + shuffle_seed=None, + mode=None, + snapshot_name=None): + """Writes to/reads from a snapshot of a dataset. + + This function attempts to determine whether a valid snapshot exists at the + `path`, and reads from the snapshot if so. If not, it will run the + preprocessing pipeline as usual, and write out a snapshot of the data + processed for future use. + + Args: + path: A directory where we want to save our snapshots and/or read from a + previously saved snapshot. + compression: The type of compression to apply to the Dataset. Currently + supports "GZIP" or None. Defaults to None (no compression). + reader_path_prefix: A prefix to add to the path when reading from snapshots. + Defaults to None. + writer_path_prefix: A prefix to add to the path when writing to snapshots. + Defaults to None. + shard_size_bytes: The size of each shard to be written by the snapshot + dataset op. Defaults to 10 GiB. + pending_snapshot_expiry_seconds: How long to wait (in seconds) before the + snapshot op considers a previously unfinished snapshot to be stale. + num_reader_threads: Number of threads to parallelize reading from snapshot. + Especially useful if compression is turned on since the decompression + operation tends to be intensive. Defaults to 1. If > 1, then this might + introduce non-determinism i.e. the order in which the elements are read + from the snapshot are different from the order they're written. + reader_buffer_size: Maximum number of elements we can prefetch reading from + the snapshot. Defaults to 1. Increasing this might improve performance but + will increase memory consumption. + num_writer_threads: Number of threads to parallelize writing from snapshot. + We'll open up `num_writer_threads` files and write to them in parallel. + Especially useful if compression is turned on since the compression + operation tends to be intensive. Defaults to 1. If > 1, then this might + introduce non-determinism i.e. the order in which the elements are read + from the upstream iterator are different from the order they're written. + writer_buffer_size: Maximum number of pipeline elements to fill up the + buffer before writing them out using `num_writer_threads`. + shuffle_on_read: If this is True, then the order in which examples are + produced when reading from a snapshot will be random. Defaults to False. + shuffle_seed: Optional. If shuffle_seed is set, the random number generator + used for shuffling (when shuffle_on_read is turned on) is seeded by the + given seed. Otherwise, it is seeded by a random seed that differs for + every run. + mode: The mode at which snapshot should operate. Valid options are "auto", + "read", "write", and "passthrough". The default mode is "auto", where the + snapshot op will automatically determine what mode to operate in. + snapshot_name: If set, use the supplied string as a named snapshot name + instead of introspecting the data pipeline and automatically generating a + unique identifier for the snapshot. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + """ + + def _apply_fn(dataset): + return _LegacySnapshotDataset( + input_dataset=dataset, + path=path, + compression=compression, + reader_path_prefix=reader_path_prefix, + writer_path_prefix=writer_path_prefix, + shard_size_bytes=shard_size_bytes, + pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds, + num_reader_threads=num_reader_threads, + reader_buffer_size=reader_buffer_size, + num_writer_threads=num_writer_threads, + writer_buffer_size=writer_buffer_size, + shuffle_on_read=shuffle_on_read, + shuffle_seed=shuffle_seed, + mode=mode, + snapshot_name=snapshot_name) + + return _apply_fn + + +@deprecation.deprecated(None, "Use `tf.data.Dataset.snapshot(...)`.") +@tf_export("data.experimental.snapshot") +def snapshot(path, compression="AUTO", reader_func=None, shard_func=None): + """API to persist the output of the input dataset. + + The snapshot API allows users to transparently persist the output of their + preprocessing pipeline to disk, and materialize the pre-processed data on a + different training run. + + This API enables repeated preprocessing steps to be consolidated, and allows + re-use of already processed data, trading off disk storage and network + bandwidth for freeing up more valuable CPU resources and accelerator compute + time. + + https://github.com/tensorflow/community/blob/master/rfcs/20200107-tf-data-snapshot.md + has detailed design documentation of this feature. + + Users can specify various options to control the behavior of snapshot, + including how snapshots are read from and written to by passing in + user-defined functions to the `reader_func` and `shard_func` parameters. + + `shard_func` is a user specified function that maps input elements to snapshot + shards. + + Users may want to specify this function to control how snapshot files should + be written to disk. Below is an example of how a potential shard_func could + be written. + + ```python + dataset = ... + dataset = dataset.enumerate() + dataset = dataset.apply(tf.data.Dataset.shapshot("/path/to/snapshot/dir", + shard_func=lambda x, y: x % NUM_SHARDS, ...)) + dataset = dataset.map(lambda x, y: y) + ``` + + `reader_func` is a user specified function that accepts a single argument: + (1) a Dataset of Datasets, each representing a "split" of elements of the + original dataset. The cardinality of the input dataset matches the + number of the shards specified in the `shard_func` (see above). The function + should return a Dataset of elements of the original dataset. + + Users may want specify this function to control how snapshot files should be + read from disk, including the amount of shuffling and parallelism. + + Here is an example of a standard reader function a user can define. This + function enables both dataset shuffling and parallel reading of datasets: + + ```python + def user_reader_func(datasets): + # shuffle the datasets splits + datasets = datasets.shuffle(NUM_CORES) + # read datasets in parallel and interleave their elements + return datasets.interleave(lambda x: x, num_parallel_calls=AUTOTUNE) + + dataset = dataset.apply(tf.data.Dataset.shapshot("/path/to/snapshot/dir", + reader_func=user_reader_func)) + ``` + + By default, snapshot parallelizes reads by the number of cores available on + the system, but will not attempt to shuffle the data. + + Args: + path: Required. A directory to use for storing / loading the snapshot to / + from. + compression: Optional. The type of compression to apply to the snapshot + written to disk. Supported options are `GZIP`, `SNAPPY`, `AUTO` or None. + Defaults to AUTO, which attempts to pick an appropriate compression + algorithm for the dataset. + reader_func: Optional. A function to control how to read data from snapshot + shards. + shard_func: Optional. A function to control how to shard data when writing a + snapshot. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + """ + + def _apply_fn(dataset): + """Actual dataset transformation.""" + return dataset.snapshot( + path=path, + compression=compression, + reader_func=reader_func, + shard_func=shard_func) + + return _apply_fn diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/unique.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/unique.py new file mode 100644 index 0000000000000000000000000000000000000000..a028e8b87daae7990a0067e40ed37034eb192547 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/unique.py @@ -0,0 +1,43 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Unique element dataset transformations.""" +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +@deprecation.deprecated(None, "Use `tf.data.Dataset.unique(...)") +@tf_export("data.experimental.unique") +def unique(): + """Creates a `Dataset` from another `Dataset`, discarding duplicates. + + Use this transformation to produce a dataset that contains one instance of + each unique element in the input. For example: + + ```python + dataset = tf.data.Dataset.from_tensor_slices([1, 37, 2, 37, 2, 1]) + + # Using `unique()` will drop the duplicate elements. + dataset = dataset.apply(tf.data.experimental.unique()) # ==> { 1, 37, 2 } + ``` + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + """ + + def _apply_fn(dataset): + return dataset.unique() + + return _apply_fn diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/writers.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/writers.py new file mode 100644 index 0000000000000000000000000000000000000000..7a77d7029130c4e12115d0b9110fdb90a5955cab --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/writers.py @@ -0,0 +1,126 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Python wrappers for tf.data writers.""" +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.data.util import convert +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_spec +from tensorflow.python.ops import gen_experimental_dataset_ops +from tensorflow.python.types import data as data_types +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +@tf_export("data.experimental.TFRecordWriter") +@deprecation.deprecated( + None, "To write TFRecords to disk, use `tf.io.TFRecordWriter`. To save " + "and load the contents of a dataset, use `tf.data.experimental.save` " + "and `tf.data.experimental.load`") +class TFRecordWriter: + """Writes a dataset to a TFRecord file. + + The elements of the dataset must be scalar strings. To serialize dataset + elements as strings, you can use the `tf.io.serialize_tensor` function. + + ```python + dataset = tf.data.Dataset.range(3) + dataset = dataset.map(tf.io.serialize_tensor) + writer = tf.data.experimental.TFRecordWriter("/path/to/file.tfrecord") + writer.write(dataset) + ``` + + To read back the elements, use `TFRecordDataset`. + + ```python + dataset = tf.data.TFRecordDataset("/path/to/file.tfrecord") + dataset = dataset.map(lambda x: tf.io.parse_tensor(x, tf.int64)) + ``` + + To shard a `dataset` across multiple TFRecord files: + + ```python + dataset = ... # dataset to be written + + def reduce_func(key, dataset): + filename = tf.strings.join([PATH_PREFIX, tf.strings.as_string(key)]) + writer = tf.data.experimental.TFRecordWriter(filename) + writer.write(dataset.map(lambda _, x: x)) + return tf.data.Dataset.from_tensors(filename) + + dataset = dataset.enumerate() + dataset = dataset.apply(tf.data.experimental.group_by_window( + lambda i, _: i % NUM_SHARDS, reduce_func, tf.int64.max + )) + + # Iterate through the dataset to trigger data writing. + for _ in dataset: + pass + ``` + """ + + def __init__(self, filename, compression_type=None): + """Initializes a `TFRecordWriter`. + + Args: + filename: a string path indicating where to write the TFRecord data. + compression_type: (Optional.) a string indicating what type of compression + to use when writing the file. See `tf.io.TFRecordCompressionType` for + what types of compression are available. Defaults to `None`. + """ + self._filename = ops.convert_to_tensor( + filename, dtypes.string, name="filename") + self._compression_type = convert.optional_param_to_tensor( + "compression_type", + compression_type, + argument_default="", + argument_dtype=dtypes.string) + + def write(self, dataset): + """Writes a dataset to a TFRecord file. + + An operation that writes the content of the specified dataset to the file + specified in the constructor. + + If the file exists, it will be overwritten. + + Args: + dataset: a `tf.data.Dataset` whose elements are to be written to a file + + Returns: + In graph mode, this returns an operation which when executed performs the + write. In eager mode, the write is performed by the method itself and + there is no return value. + + Raises + TypeError: if `dataset` is not a `tf.data.Dataset`. + TypeError: if the elements produced by the dataset are not scalar strings. + """ + if not isinstance(dataset, data_types.DatasetV2): + raise TypeError( + f"Invalid `dataset.` Expected a `tf.data.Dataset` object but got " + f"{type(dataset)}." + ) + if not dataset_ops.get_structure(dataset).is_compatible_with( + tensor_spec.TensorSpec([], dtypes.string)): + raise TypeError( + f"Invalid `dataset`. Expected a`dataset` that produces scalar " + f"`tf.string` elements, but got a dataset which produces elements " + f"with shapes {dataset_ops.get_legacy_output_shapes(dataset)} and " + f"types {dataset_ops.get_legacy_output_types(dataset)}.") + # pylint: disable=protected-access + dataset = dataset._apply_debug_options() + return gen_experimental_dataset_ops.dataset_to_tf_record( + dataset._variant_tensor, self._filename, self._compression_type) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4a3f3ce8e9d23fb0f4d839b0af55e31afec0ffcd --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/__init__.py @@ -0,0 +1,426 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""API for using the tf.data service. + +This module contains: + +1. tf.data server implementations for running the tf.data service. +2. APIs for registering datasets with the tf.data service and reading from + the registered datasets. + +The tf.data service provides the following benefits: + +- Horizontal scaling of tf.data input pipeline processing to solve input + bottlenecks. +- Data coordination for distributed training. Coordinated reads + enable all replicas to train on similar-length examples across each global + training step, improving step times in synchronous training. +- Dynamic balancing of data across training replicas. + +>>> dispatcher = tf.data.experimental.service.DispatchServer() +>>> dispatcher_address = dispatcher.target.split("://")[1] +>>> worker = tf.data.experimental.service.WorkerServer( +... tf.data.experimental.service.WorkerConfig( +... dispatcher_address=dispatcher_address)) +>>> dataset = tf.data.Dataset.range(10) +>>> dataset = dataset.apply(tf.data.experimental.service.distribute( +... processing_mode=tf.data.experimental.service.ShardingPolicy.OFF, +... service=dispatcher.target)) +>>> print(list(dataset.as_numpy_iterator())) +[0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + +## Setup + +This section goes over how to set up the tf.data service. + +### Run tf.data servers + +The tf.data service consists of one dispatch server and `n` worker servers. +tf.data servers should be brought up alongside your training jobs, then brought +down when the jobs are finished. +Use `tf.data.experimental.service.DispatchServer` to start a dispatch server, +and `tf.data.experimental.service.WorkerServer` to start worker servers. Servers +can be run in the same process for testing purposes, or scaled up on separate +machines. + +See https://github.com/tensorflow/ecosystem/tree/master/data_service for an +example of using Google Kubernetes Engine (GKE) to manage the tf.data service. +Note that the server implementation in +[tf_std_data_server.py](https://github.com/tensorflow/ecosystem/blob/master/data_service/tf_std_data_server.py) +is not GKE-specific, and can be used to run the tf.data service in other +contexts. + +### Custom ops + +If your dataset uses custom ops, these ops need to be made available to tf.data +servers by calling +[load_op_library](https://www.tensorflow.org/api_docs/python/tf/load_op_library) +from the dispatcher and worker processes at startup. + +## Usage + +Users interact with tf.data service by programmatically registering their +datasets with tf.data service, then creating datasets that read from the +registered datasets. The +[register_dataset](https://www.tensorflow.org/api_docs/python/tf/data/experimental/service/register_dataset) +function registers a dataset, then the +[from_dataset_id](https://www.tensorflow.org/api_docs/python/tf/data/experimental/service/from_dataset_id) +function creates a new dataset which reads from the registered dataset. +The +[distribute](https://www.tensorflow.org/api_docs/python/tf/data/experimental/service/distribute) +function wraps `register_dataset` and `from_dataset_id` into a single convenient +transformation which registers its input dataset and then reads from it. +`distribute` enables tf.data service to be used with a one-line code change. +However, it assumes that the dataset is created and consumed by the same entity +and this assumption might not always be valid or desirable. In particular, in +certain scenarios, such as distributed training, it might be desirable to +decouple the creation and consumption of the dataset (via `register_dataset` +and `from_dataset_id` respectively) to avoid having to create the dataset on +each of the training workers. + +### Example + +#### `distribute` + +To use the `distribute` transformation, apply the transformation after the +prefix of your input pipeline that you would like to be executed using tf.data +service (typically at the end). + +``` +dataset = ... # Define your dataset here. +# Move dataset processing from the local machine to the tf.data service +dataset = dataset.apply( + tf.data.experimental.service.distribute( + processing_mode=tf.data.experimental.service.ShardingPolicy.OFF, + service=FLAGS.tf_data_service_address, + job_name="shared_job")) +# Any transformations added after `distribute` will be run on the local machine. +dataset = dataset.prefetch(1) +``` + +The above code will create a tf.data service "job", which iterates through the +dataset to generate data. To share the data from a job across multiple clients +(e.g. when using TPUStrategy or MultiWorkerMirroredStrategy), set a common +`job_name` across all clients. + +#### `register_dataset` and `from_dataset_id` + +`register_dataset` registers a dataset with the tf.data service, returning a +dataset id for the registered dataset. `from_dataset_id` creates a dataset that +reads from the registered dataset. These APIs can be used to reduce dataset +building time for distributed training. Instead of building the dataset on all +training workers, we can build the dataset just once and then register the +dataset using `register_dataset`. Then all workers can call `from_dataset_id` +without needing to build the dataset themselves. + +``` +dataset = ... # Define your dataset here. +dataset_id = tf.data.experimental.service.register_dataset( + service=FLAGS.tf_data_service_address, + dataset=dataset) +# Use `from_dataset_id` to create per-worker datasets. +per_worker_datasets = {} +for worker in workers: + per_worker_datasets[worker] = tf.data.experimental.service.from_dataset_id( + processing_mode=tf.data.experimental.service.ShardingPolicy.OFF, + service=FLAGS.tf_data_service_address, + dataset_id=dataset_id, + job_name="shared_job") +``` + +### Processing Modes + +`processing_mode` specifies how to shard a dataset among tf.data service +workers. tf.data service supports `OFF`, `DYNAMIC`, `FILE`, `DATA`, +`FILE_OR_DATA`, `HINT` sharding policies. + +OFF: No sharding will be performed. The entire input dataset will be processed +independently by each of the tf.data service workers. For this reason, it is +important to shuffle data (e.g. filenames) non-deterministically, so that each +worker will process the elements of the dataset in a different order. This mode +can be used to distribute datasets that aren't splittable. + +If a worker is added or restarted during ShardingPolicy.OFF processing, the +worker will instantiate a new copy of the dataset and begin producing data from +the beginning. + +#### Dynamic Sharding + +DYNAMIC: In this mode, tf.data service divides the dataset into two components: +a source component that generates "splits" such as filenames, and a processing +component that takes splits and outputs dataset elements. The source component +is executed in a centralized fashion by the tf.data service dispatcher, which +generates different splits of input data. The processing component is executed +in a parallel fashion by the tf.data service workers, each operating on a +different set of input data splits. + +For example, consider the following dataset: + +``` +dataset = tf.data.Dataset.from_tensor_slices(filenames) +dataset = dataset.interleave(TFRecordDataset) +dataset = dataset.map(preprocess_fn) +dataset = dataset.batch(batch_size) +dataset = dataset.apply( + tf.data.experimental.service.distribute( + processing_mode=tf.data.experimental.service.ShardingPolicy.DYNAMIC, + ...)) +``` + +The `from_tensor_slices` will be run on the dispatcher, while the `interleave`, +`map`, and `batch` will be run on tf.data service workers. The workers will pull +filenames from the dispatcher for processing. To process a dataset with +dynamic sharding, the dataset must have a splittable source, and all of +its transformations must be compatible with splitting. While most sources and +transformations support splitting, there are exceptions, such as custom datasets +which may not implement the splitting API. Please file a Github issue if you +would like to use distributed epoch processing for a currently unsupported +dataset source or transformation. + +If no workers are restarted during training, dynamic sharding mode will visit +every example exactly once. If workers are restarted during training, the splits +they were processing will not be fully visited. The dispatcher maintains a +cursor through the dataset's splits. Assuming fault tolerance is enabled (See +"Fault Tolerance" below), the dispatcher will store cursor state in write-ahead +logs so that the cursor can be restored in case the dispatcher is restarted +mid-training. This provides an at-most-once visitation guarantee in the presence +of server restarts. + +#### Static Sharding + +The following are static sharding policies. The semantics are similar to +`tf.data.experimental.AutoShardPolicy`. These policies require: + + * The tf.data service cluster is configured with a fixed list of workers + in DispatcherConfig. + * Each client only reads from the local tf.data service worker. + +If a worker is restarted while performing static sharding, the worker will +begin processing its shard again from the beginning. + +FILE: Shards by input files (i.e. each worker will get a fixed set of files to +process). When this option is selected, make sure that there is at least as +many files as workers. If there are fewer input files than workers, a runtime +error will be raised. + +DATA: Shards by elements produced by the dataset. Each worker will process the +whole dataset and discard the portion that is not for itself. Note that for +this mode to correctly partition the dataset elements, the dataset needs to +produce elements in a deterministic order. + +FILE_OR_DATA: Attempts FILE-based sharding, falling back to DATA-based +sharding on failure. + +HINT: Looks for the presence of `shard(SHARD_HINT, ...)` which is treated as a +placeholder to replace with `shard(num_workers, worker_index)`. + +For backwards compatibility, `processing_mode` may also be set to the strings +`"parallel_epochs"` or `"distributed_epoch"`, which are respectively equivalent +to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`. + +### Coordinated Data Read + +By default, when multiple consumers read from the same job, they receive data on +a first-come first-served basis. In some use cases, it is advantageous to +coordinate the consumers. At each step, consumers read data from the same +worker. + +For example, the tf.data service can be used to coordinate example sizes across +a cluster during synchronous training, so that during each step all replicas +train on similar-sized elements. To achieve this, define a dataset which +generates rounds of `num_consumers` consecutive similar-sized batches, then +enable coordinated reads by setting `consumer_index` and `num_consumers`. + +NOTE: To keep consumers in sync, coordinated reads require that the dataset have +infinite cardinality. You can get this by adding `.repeat()` at the end of the +dataset definition. + +### Jobs + +A tf.data service "job" refers to the process of reading from a dataset managed +by the tf.data service, using one or more data consumers. Jobs are created when +iterating over datasets that read from tf.data service. The data produced by a +job is determined by (1) dataset associated with the job and (2) the job's +processing mode. For example, if a job is created for the dataset +`Dataset.range(5)`, and the processing mode is `ShardingPolicy.OFF`, each +tf.data worker will produce the elements `{0, 1, 2, 3, 4}` for the job, +resulting in the +job producing `5 * num_workers` elements. If the processing mode is +`ShardingPolicy.DYNAMIC`, the job will only produce `5` elements. + +One or more consumers can consume data from a job. By default, jobs are +"anonymous", meaning that only the consumer which created the job can read from +it. To share the output of a job across multiple consumers, you can set a common +`job_name`. + +### Fault Tolerance + +By default, the tf.data dispatch server stores its state in-memory, making it a +single point of failure during training. To avoid this, pass +`fault_tolerant_mode=True` when creating your `DispatchServer`. Dispatcher +fault tolerance requires `work_dir` to be configured and accessible from the +dispatcher both before and after restart (e.g. a GCS path). With fault tolerant +mode enabled, the dispatcher will journal its state to the work directory so +that no state is lost when the dispatcher is restarted. + +WorkerServers may be freely restarted, added, or removed during training. At +startup, workers will register with the dispatcher and begin processing all +outstanding jobs from the beginning. + +### Usage with tf.distribute + +tf.distribute is the TensorFlow API for distributed training. There are +several ways to use tf.data with tf.distribute: +`strategy.experimental_distribute_dataset`, +`strategy.distribute_datasets_from_function`, and (for PSStrategy) +`coordinator.create_per_worker_dataset`. The following sections give code +examples for each. + +In general we recommend using +`tf.data.experimental.service.{register_dataset,from_dataset_id}` over +`tf.data.experimental.service.distribute` for two reasons: + +- The dataset only needs to be constructed and optimized once, instead of once + per worker. This can significantly reduce startup time, because the current + `experimental_distribute_dataset` and `distribute_datasets_from_function` + implementations create and optimize worker datasets sequentially. +- If a dataset depends on lookup tables or variables that are only present on + one host, the dataset needs to be registered from that host. Typically this + only happens when resources are placed on the chief or worker 0. Registering + the dataset from the chief will avoid issues with depending on remote + resources. + +#### strategy.experimental_distribute_dataset + +Nothing special is required when using +`strategy.experimental_distribute_dataset`, just apply `register_dataset` and +`from_dataset_id` as above, making sure to specify a `job_name` so that all +workers consume from the same tf.data service job. + +``` +dataset = ... # Define your dataset here. +dataset_id = tf.data.experimental.service.register_dataset( + service=FLAGS.tf_data_service_address, + dataset=dataset) +dataset = tf.data.experimental.service.from_dataset_id( + processing_mode=tf.data.experimental.service.ShardingPolicy.OFF, + service=FLAGS.tf_data_service_address, + dataset_id=dataset_id, + job_name="shared_job") + +dataset = strategy.experimental_distribute_dataset(dataset) +``` + +#### strategy.distribute_datasets_from_function + +First, make sure the dataset produced by the `dataset_fn` does not depend on the +`input_context` for the training worker on which it is run. Instead of each +worker building its own (sharded) dataset, one worker should register an +unsharded dataset, and the remaining workers should consume data from that +dataset. + +``` +dataset = dataset_fn() +dataset_id = tf.data.experimental.service.register_dataset( + service=FLAGS.tf_data_service_address, + dataset=dataset) + +def new_dataset_fn(input_context): + del input_context + return tf.data.experimental.service.from_dataset_id( + processing_mode=tf.data.experimental.service.ShardingPolicy.OFF, + service=FLAGS.tf_data_service_address, + dataset_id=dataset_id, + job_name="shared_job") + +dataset = strategy.distribute_datasets_from_function(new_dataset_fn) +``` + +#### coordinator.create_per_worker_dataset + +`create_per_worker_dataset` works the same as +`distribute_datasets_from_function`. + +``` +dataset = dataset_fn() +dataset_id = tf.data.experimental.service.register_dataset( + service=FLAGS.tf_data_service_address, + dataset=dataset) + +def new_dataset_fn(input_context): + del input_context + return tf.data.experimental.service.from_dataset_id( + processing_mode=tf.data.experimental.service.ShardingPolicy.OFF, + service=FLAGS.tf_data_service_address, + dataset_id=dataset_id, + job_name="shared_job") + +dataset = coordinator.create_per_worker_dataset(new_dataset_fn) +``` + +### Sharing tf.data service with concurrent trainers + +If you run multiple trainers concurrently using the same training data, it could +save resources to cache the data in one tf.data service cluster and share the +cluster with the trainers. For example, if you use Vizier to tune +hyperparameters, the Vizier jobs can run concurrently and share one tf.data +service cluster. + +To enable this feature, each trainer needs to generate a unique trainer ID, and +you pass the trainer ID to `tf.data.experimental.service.distribute`. Once a job +has consumed data, the data remains in the cache and is re-used by jobs with +different `trainer_id`s. Requests with the same `trainer_id` do not re-use data. +For example: + +``` +dataset = expensive_computation() +dataset = dataset.apply(tf.data.experimental.service.distribute( + processing_mode=tf.data.experimental.service.ShardingPolicy.OFF, + service=FLAGS.tf_data_service_address, + job_name="job", + cross_trainer_cache=data_service_ops.CrossTrainerCache( + trainer_id=trainer_id()))) +``` + +tf.data service uses a sliding-window cache to store shared data. When one +trainer consumes data, the data remains in the cache. When other trainers need +data, they can get data from the cache instead of repeating the expensive +computation. The cache has a bounded size, so some workers may not read the full +dataset. To ensure all the trainers get sufficient training data, we require the +input dataset to be infinite. This can be achieved, for example, by repeating +the dataset and performing random augmentation on the training instances. + +## Limitations + +- Python-based data processing: Datasets which use Python-based data processing + (e.g. `tf.py_function`, `tf.numpy_function`, or + `tf.data.Dataset.from_generator`) are currently not supported. +- Non-Serializable Resources: Datasets may only depend on TF resources that + support serialization. Serialization is currently supported for lookup + tables and variables. If your dataset depends on a TF resource that cannot be + serialized, please file a Github issue. +- Remote Resources: If a dataset depends on a resource, the dataset must be + registered from the same process that created the resource (e.g. the "chief" + job of ParameterServerStrategy). +""" + +from tensorflow.python.data.experimental.ops.data_service_ops import distribute +from tensorflow.python.data.experimental.ops.data_service_ops import from_dataset_id +from tensorflow.python.data.experimental.ops.data_service_ops import register_dataset +from tensorflow.python.data.experimental.ops.data_service_ops import ShardingPolicy +from tensorflow.python.data.experimental.service.server_lib import DispatcherConfig +from tensorflow.python.data.experimental.service.server_lib import DispatchServer +from tensorflow.python.data.experimental.service.server_lib import WorkerConfig +from tensorflow.python.data.experimental.service.server_lib import WorkerServer diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3677de1d8c7e2e88bf1c630ddc2032b0123e4990 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/__pycache__/server_lib.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/__pycache__/server_lib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec786064e40dd24c401147a13e4e238c3c570d49 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/__pycache__/server_lib.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/_pywrap_server_lib.pyi b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/_pywrap_server_lib.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d39c6ac8225da83eee388dcf303f75430c6526a9 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/_pywrap_server_lib.pyi @@ -0,0 +1,54 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from typing import Any + +class DispatchGrpcDataServer: + def __init__(self, *args, **kwargs) -> None: ... + def bound_port(self) -> int: ... + def join(self) -> None: ... + def num_workers(self) -> int: ... + def snapshot_streams(self, *args, **kwargs) -> Any: ... + def start(self) -> Status: ... + def stop(self) -> None: ... + +class SnapshotStreamInfoWrapper: + def __init__(self) -> None: ... + @property + def index(self) -> int: ... + @property + def state(self) -> int: ... + +class SnapshotTaskProgressWrapper: + def __init__(self) -> None: ... + @property + def completed(self) -> bool: ... + @property + def snapshot_task_base_path(self) -> bytes: ... + @property + def snapshot_task_stream_index(self) -> int: ... + +class WorkerGrpcDataServer: + def __init__(self, *args, **kwargs) -> None: ... + def bound_port(self) -> int: ... + def join(self) -> None: ... + def num_tasks(self) -> int: ... + def snapshot_task_progresses(self, *args, **kwargs) -> Any: ... + def start(self) -> Status: ... + def stop(self) -> None: ... + +def TF_DATA_GetDataServiceMetadataByID(*args, **kwargs) -> Any: ... +def TF_DATA_NewDispatchServer(arg0: str) -> DispatchGrpcDataServer: ... +def TF_DATA_NewWorkerServer(arg0: str) -> WorkerGrpcDataServer: ... diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/_pywrap_snapshot_utils.pyi b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/_pywrap_snapshot_utils.pyi new file mode 100644 index 0000000000000000000000000000000000000000..c59b0f0b04b7544926e0a13e10bb9e0b5a77055f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/_pywrap_snapshot_utils.pyi @@ -0,0 +1,19 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +def TF_DATA_CommittedChunksDirectory(arg0: str) -> str: ... +def TF_DATA_SnapshotDoneFilePath(arg0: str) -> str: ... +def TF_DATA_SnapshotErrorFilePath(arg0: str) -> str: ... +def TF_DATA_SnapshotMetadataFilePath(arg0: str) -> str: ... diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/_pywrap_utils.pyi b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/_pywrap_utils.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e88ec5672773ef21ab32ee0c5768395c555127f5 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/_pywrap_utils.pyi @@ -0,0 +1,17 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +def TF_DATA_DefaultProtocol() -> str: ... +def TF_DATA_DisableCompressionAtRegistrationTime() -> bool: ... diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/server_lib.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/server_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..f00c1ad631427a7feb37e54813584084898f5f44 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/server_lib.py @@ -0,0 +1,478 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A Python interface for creating dataset servers.""" + +import collections +from typing import Iterable + +# pylint: disable=invalid-import-order,g-bad-import-order, unused-import +from tensorflow.core.protobuf import service_config_pb2 +from tensorflow.python import pywrap_tensorflow +from tensorflow.python.data.experimental.service import _pywrap_server_lib +from tensorflow.python.data.experimental.service import _pywrap_utils +from tensorflow.python.util.tf_export import tf_export + + +def _get_time_or_placeholder(value) -> int: + """Modifies time-based config values to account for special behaviors.""" + + # Servers interpret time values of 0 to mean "choose a reasonable + # default". However, the Python API uses `None` for this, and allows 0 as a + # normal value. To account for this, if a user explicitly configures the + # interval/timeout to 0, we interpret it to mean "a very small number", and + # replace it with 1. + if value == 0: + return 1 + # `None` indicates that the user wants to leave the behavior to the runtime. + if value is None: + return 0 + return value + + +@tf_export("data.experimental.service.DispatcherConfig") +class DispatcherConfig( + collections.namedtuple( + "DispatcherConfig", + [ + "port", + "protocol", + "work_dir", + "fault_tolerant_mode", + "worker_addresses", + "job_gc_check_interval_ms", + "job_gc_timeout_ms", + "worker_timeout_ms", + "worker_max_concurrent_snapshots", + ], + ) +): + """Configuration class for tf.data service dispatchers. + + Fields: + port: Specifies the port to bind to. A value of 0 indicates that the server + may bind to any available port. + protocol: The protocol to use for communicating with the tf.data service, + e.g. "grpc". + work_dir: A directory to store dispatcher state in. This + argument is required for the dispatcher to be able to recover from + restarts. + fault_tolerant_mode: Whether the dispatcher should write its state to a + journal so that it can recover from restarts. Dispatcher state, including + registered datasets and created jobs, is synchronously written to the + journal before responding to RPCs. If `True`, `work_dir` must also be + specified. + worker_addresses: If the job uses auto-sharding, it needs to specify a fixed + list of worker addresses that will register with the dispatcher. The + worker addresses should be in the format `"host"` or `"host:port"`, where + `"port"` is an integer, named port, or `%port%` to match any port. + job_gc_check_interval_ms: How often the dispatcher should scan through to + delete old and unused jobs, in milliseconds. If not set, the runtime will + select a reasonable default. A higher value will reduce load on the + dispatcher, while a lower value will reduce the time it takes for the + dispatcher to garbage collect expired jobs. + job_gc_timeout_ms: How long a job needs to be unused before it becomes a + candidate for garbage collection, in milliseconds. A value of -1 indicates + that jobs should never be garbage collected. If not set, the runtime will + select a reasonable default. A higher value will cause jobs to stay around + longer with no consumers. This is useful if there is a large gap in + time between when consumers read from the job. A lower value will reduce + the time it takes to reclaim the resources from expired jobs. + worker_timeout_ms: How long to wait for a worker to heartbeat before + considering it missing. If not set, the runtime will select a reasonable + default. + worker_max_concurrent_snapshots: The maximum number of snapshots a worker + can concurrently process. + """ + + def __new__( + cls, + port=0, + protocol=None, + work_dir=None, + fault_tolerant_mode=False, + worker_addresses=None, + job_gc_check_interval_ms=None, + job_gc_timeout_ms=None, + worker_timeout_ms=None, + worker_max_concurrent_snapshots=0, + ): + if protocol is None: + protocol = _pywrap_utils.TF_DATA_DefaultProtocol() + job_gc_check_interval_ms = _get_time_or_placeholder( + job_gc_check_interval_ms) + job_gc_timeout_ms = _get_time_or_placeholder(job_gc_timeout_ms) + return super().__new__( + cls, + port, + protocol, + work_dir, + fault_tolerant_mode, + worker_addresses, + job_gc_check_interval_ms, + job_gc_timeout_ms, + worker_timeout_ms, + worker_max_concurrent_snapshots, + ) + + +@tf_export("data.experimental.service.DispatchServer", v1=[]) +class DispatchServer: + """An in-process tf.data service dispatch server. + + A `tf.data.experimental.service.DispatchServer` coordinates a cluster of + `tf.data.experimental.service.WorkerServer`s. When the workers start, they + register themselves with the dispatcher. + + >>> dispatcher = tf.data.experimental.service.DispatchServer() + >>> dispatcher_address = dispatcher.target.split("://")[1] + >>> worker = tf.data.experimental.service.WorkerServer( + ... tf.data.experimental.service.WorkerConfig( + ... dispatcher_address=dispatcher_address)) + >>> dataset = tf.data.Dataset.range(10) + >>> dataset = dataset.apply(tf.data.experimental.service.distribute( + ... processing_mode="parallel_epochs", service=dispatcher.target)) + >>> print(list(dataset.as_numpy_iterator())) + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + + When starting a dedicated tf.data dispatch process, use join() to block + after starting up the server, until the server terminates. + + ``` + dispatcher = tf.data.experimental.service.DispatchServer( + tf.data.experimental.service.DispatcherConfig(port=5050)) + dispatcher.join() + ``` + + Call stop() to gracefully terminate the dispatcher. The server automatically + stops when all reference to it have been deleted. + + To start a `DispatchServer` in fault-tolerant mode, set `work_dir` and + `fault_tolerant_mode` like below: + + ``` + dispatcher = tf.data.experimental.service.DispatchServer( + tf.data.experimental.service.DispatcherConfig( + port=5050, + work_dir="gs://my-bucket/dispatcher/work_dir", + fault_tolerant_mode=True)) + ``` + """ + + def __init__(self, config=None, start=True): + """Creates a new dispatch server. + + Args: + config: (Optional.) A `tf.data.experimental.service.DispatcherConfig` + configration. If `None`, the dispatcher will use default + configuration values. + start: (Optional.) Boolean, indicating whether to start the server after + creating it. Defaults to True. + """ + config = config or DispatcherConfig() + if config.fault_tolerant_mode and not config.work_dir: + raise ValueError( + "Cannot enable fault tolerant mode without configuring a work dir. " + "Make sure to set `work_dir` in the `config` object passed to " + "`DispatcherServer`.") + self._config = config + if isinstance(config, service_config_pb2.DispatcherConfig): + config_proto = config + else: + config_proto = service_config_pb2.DispatcherConfig( + port=config.port, + protocol=config.protocol, + work_dir=config.work_dir, + fault_tolerant_mode=config.fault_tolerant_mode, + worker_addresses=config.worker_addresses, + job_gc_check_interval_ms=config.job_gc_check_interval_ms, + job_gc_timeout_ms=config.job_gc_timeout_ms, + worker_timeout_ms=config.worker_timeout_ms, + worker_max_concurrent_snapshots=config.worker_max_concurrent_snapshots + ) + self._server = _pywrap_server_lib.TF_DATA_NewDispatchServer( + config_proto.SerializeToString()) + if start: + self._server.start() + + def start(self): + """Starts this server. + + >>> dispatcher = tf.data.experimental.service.DispatchServer(start=False) + >>> dispatcher.start() + + Raises: + tf.errors.OpError: Or one of its subclasses if an error occurs while + starting the server. + """ + self._server.start() + + def join(self) -> None: + """Blocks until the server has shut down. + + This is useful when starting a dedicated dispatch process. + + ``` + dispatcher = tf.data.experimental.service.DispatchServer( + tf.data.experimental.service.DispatcherConfig(port=5050)) + dispatcher.join() + ``` + + Raises: + tf.errors.OpError: Or one of its subclasses if an error occurs while + joining the server. + """ + self._server.join() + + def stop(self) -> None: + """Stops the server. + + Raises: + tf.errors.OpError: Or one of its subclasses if an error occurs while + stopping the server. + """ + self._stop() + + @property + def target(self) -> str: + """Returns a target that can be used to connect to the server. + + >>> dispatcher = tf.data.experimental.service.DispatchServer() + >>> dataset = tf.data.Dataset.range(10) + >>> dataset = dataset.apply(tf.data.experimental.service.distribute( + ... processing_mode="parallel_epochs", service=dispatcher.target)) + + The returned string will be in the form protocol://address, e.g. + "grpc://localhost:5050". + """ + return "{0}://localhost:{1}".format(self._config.protocol, + self._server.bound_port()) + + def _stop(self) -> None: + """Stops the server. + + Raises: + tf.errors.OpError: Or one of its subclasses if an error occurs while + stopping the server. + """ + self._server.stop() + + def __del__(self) -> None: + self._stop() + + @property + def _address(self) -> str: + """Returns the address of the server. + + The returned string will be in the form address:port, e.g. "localhost:1000". + """ + return "localhost:{0}".format(self._server.bound_port()) + + def _num_workers(self) -> int: + """Returns the number of workers registered with the dispatcher.""" + return self._server.num_workers() + + def _snapshot_streams( + self, path) -> Iterable[_pywrap_server_lib.SnapshotStreamInfoWrapper]: + """Returns information about all the streams for a snapshot.""" + return self._server.snapshot_streams(path) + + +@tf_export("data.experimental.service.WorkerConfig") +class WorkerConfig( + collections.namedtuple("WorkerConfig", [ + "dispatcher_address", "worker_address", "port", "protocol", + "heartbeat_interval_ms", "dispatcher_timeout_ms", + "data_transfer_protocol", "data_transfer_address" + ])): + """Configuration class for tf.data service dispatchers. + + Fields: + dispatcher_address: Specifies the address of the dispatcher. + worker_address: Specifies the address of the worker server. This address is + passed to the dispatcher so that the dispatcher can tell clients how to + connect to this worker. + port: Specifies the port to bind to. A value of 0 indicates that the worker + can bind to any available port. + protocol: A string indicating the protocol to be used by the worker to + connect to the dispatcher. E.g. "grpc". + heartbeat_interval_ms: How often the worker should heartbeat to the + dispatcher, in milliseconds. If not set, the runtime will select a + reasonable default. A higher value will reduce the load on the dispatcher, + while a lower value will reduce the time it takes to reclaim resources + from finished jobs. + dispatcher_timeout_ms: How long, in milliseconds, to retry requests to the + dispatcher before giving up and reporting an error. Defaults to 1 hour. + data_transfer_protocol: A string indicating the protocol to be used by the + worker to transfer data to the client. E.g. "grpc". + data_transfer_address: A string indicating the data transfer address of the + worker server. + """ + + def __new__(cls, + dispatcher_address, + worker_address=None, + port=0, + protocol=None, + heartbeat_interval_ms=None, + dispatcher_timeout_ms=None, + data_transfer_protocol=None, + data_transfer_address=None): + if worker_address is None: + worker_address = "localhost:%port%" + if protocol is None: + protocol = _pywrap_utils.TF_DATA_DefaultProtocol() + if data_transfer_address is None: + data_transfer_address = "localhost:%port%" + heartbeat_interval_ms = _get_time_or_placeholder(heartbeat_interval_ms) + dispatcher_timeout_ms = _get_time_or_placeholder(dispatcher_timeout_ms) + + return super(WorkerConfig, + cls).__new__(cls, dispatcher_address, worker_address, port, + protocol, heartbeat_interval_ms, + dispatcher_timeout_ms, data_transfer_protocol, + data_transfer_address) + + +@tf_export("data.experimental.service.WorkerServer", v1=[]) +class WorkerServer: + """An in-process tf.data service worker server. + + A `tf.data.experimental.service.WorkerServer` performs `tf.data.Dataset` + processing for user-defined datasets, and provides the resulting elements over + RPC. A worker is associated with a single + `tf.data.experimental.service.DispatchServer`. + + >>> dispatcher = tf.data.experimental.service.DispatchServer() + >>> dispatcher_address = dispatcher.target.split("://")[1] + >>> worker = tf.data.experimental.service.WorkerServer( + ... tf.data.experimental.service.WorkerConfig( + ... dispatcher_address=dispatcher_address)) + >>> dataset = tf.data.Dataset.range(10) + >>> dataset = dataset.apply(tf.data.experimental.service.distribute( + ... processing_mode="parallel_epochs", service=dispatcher.target)) + >>> print(list(dataset.as_numpy_iterator())) + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + + When starting a dedicated tf.data worker process, use join() to block + after starting up the worker, until the worker terminates. + + ``` + worker = tf.data.experimental.service.WorkerServer( + port=5051, dispatcher_address="localhost:5050") + worker.join() + ``` + + Call stop() to gracefully terminate the worker. The worker automatically stops + when all reference to it have been deleted. + """ + + def __init__(self, config, start=True): + """Creates a new worker server. + + Args: + config: A `tf.data.experimental.service.WorkerConfig` configration. + start: (Optional.) Boolean, indicating whether to start the server after + creating it. Defaults to True. + """ + if config.dispatcher_address is None: + raise ValueError( + "Must specify a `dispatcher_address` in the `config` passed " + "to `WorkerServer`.") + if isinstance(config, service_config_pb2.WorkerConfig): + config_proto = config + else: + config_proto = service_config_pb2.WorkerConfig( + dispatcher_address=config.dispatcher_address, + worker_address=config.worker_address, + port=config.port, + protocol=config.protocol, + heartbeat_interval_ms=config.heartbeat_interval_ms, + dispatcher_timeout_ms=config.dispatcher_timeout_ms, + data_transfer_protocol=config.data_transfer_protocol, + data_transfer_address=config.data_transfer_address) + self._server = _pywrap_server_lib.TF_DATA_NewWorkerServer( + config_proto.SerializeToString()) + if start: + self._server.start() + + def start(self) -> None: + """Starts this server. + + Raises: + tf.errors.OpError: Or one of its subclasses if an error occurs while + starting the server. + """ + self._server.start() + + def join(self) -> None: + """Blocks until the server has shut down. + + This is useful when starting a dedicated worker process. + + ``` + worker_server = tf.data.experimental.service.WorkerServer( + port=5051, dispatcher_address="localhost:5050") + worker_server.join() + ``` + + This method currently blocks forever. + + Raises: + tf.errors.OpError: Or one of its subclasses if an error occurs while + joining the server. + """ + self._server.join() + + def stop(self) -> None: + """Stops the server. + + Raises: + tf.errors.OpError: Or one of its subclasses if an error occurs while + stopping the server. + """ + self._stop() + + def _stop(self) -> None: + """Stops the server. + + Raises: + tf.errors.OpError: Or one of its subclasses if an error occurs while + stopping the server. + """ + self._server.stop() + + def __del__(self) -> None: + self._stop() + + @property + def _address(self) -> str: + """Returns the address of the server. + + The returned string will be in the form address:port, e.g. "localhost:1000". + """ + return "localhost:{0}".format(self._server.bound_port()) + + def _num_tasks(self) -> int: + """Returns the number of tasks currently being executed on the worker.""" + return self._server.num_tasks() + + def _snapshot_task_progresses( + self) -> Iterable[_pywrap_server_lib.SnapshotTaskProgressWrapper]: + """Returns the progresses of the snapshot tasks currently being executed. + + Returns: + An `Iterable[common_pb2.SnapshotTaskProgress]`. + """ + return self._server.snapshot_task_progresses() diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/kernel_tests/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/kernel_tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/kernel_tests/__pycache__/checkpoint_test_base.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/kernel_tests/__pycache__/checkpoint_test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9167c78a79c8b0d951ee678af51e38a5e623493c Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/kernel_tests/__pycache__/checkpoint_test_base.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/kernel_tests/__pycache__/test_base.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/kernel_tests/__pycache__/test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61e850fa74791c181890582c1e8985c16535307b Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/kernel_tests/__pycache__/test_base.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/kernel_tests/__pycache__/tf_record_test_base.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/kernel_tests/__pycache__/tf_record_test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..233270f66cceeb57f1e4c4c440e0573354e34060 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/kernel_tests/__pycache__/tf_record_test_base.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/kernel_tests/checkpoint_test_base.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/kernel_tests/checkpoint_test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..4d42070b6a63e6581a416b232ba966a942748e49 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/kernel_tests/checkpoint_test_base.py @@ -0,0 +1,660 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Base test class for checkpointing datasets.""" + +import os + +import numpy as np +from tensorflow.python.checkpoint import checkpoint as tracking_util +from tensorflow.python.checkpoint import checkpoint_management +from tensorflow.python.checkpoint import checkpoint_options +from tensorflow.python.data.experimental.ops import iterator_ops as contrib_iterator_ops +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.data.ops import options as options_lib +from tensorflow.python.eager import context +from tensorflow.python.framework import combinations +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors +from tensorflow.python.framework import ops +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.framework import tensor +from tensorflow.python.ops import lookup_ops +from tensorflow.python.ops import variables +from tensorflow.python.ops.ragged import ragged_tensor_value +from tensorflow.python.platform import gfile +from tensorflow.python.platform import test +from tensorflow.python.training import saver as saver_lib +from tensorflow.python.util import nest + + +def remove_variants(get_next_op): + # TODO(b/72408568): Remove this once session.run can get variant tensors. + """Remove variants from a nest structure, so sess.run will execute.""" + + def _remove_variant(x): + if isinstance(x, tensor.Tensor) and x.dtype == dtypes.variant: + return () + else: + return x + + return nest.map_structure(_remove_variant, get_next_op) + + +def default_test_combinations(): + """Returns the default test combinations for testing checkpointing.""" + + def disable_optimizations(ds_fn): + options = options_lib.Options() + options.experimental_optimization.apply_default_optimizations = False + + def ds_fn_no_opt(): + return ds_fn().with_options(options) + + return ds_fn_no_opt + + def verify_unused_iterator( + obj, ds_fn, num_outputs, sparse_tensors=False, assert_items_equal=False): + obj.verify_unused_iterator( + ds_fn=disable_optimizations(ds_fn=ds_fn), + num_outputs=num_outputs, + sparse_tensors=sparse_tensors, + assert_items_equal=assert_items_equal) + + verify_unused_iterator_combination = combinations.combine( + verify_fn=combinations.NamedObject( + "verify_unused_iterator", verify_unused_iterator)) + + def verify_fully_used_iterator( + obj, ds_fn, num_outputs, sparse_tensors=False, assert_items_equal=False): + obj.verify_fully_used_iterator( + ds_fn=disable_optimizations(ds_fn=ds_fn), + num_outputs=num_outputs, + sparse_tensors=sparse_tensors, + assert_items_equal=assert_items_equal) + + verify_fully_used_iterator_combination = combinations.combine( + verify_fn=combinations.NamedObject( + "verify_fully_used_iterator", verify_fully_used_iterator)) + + def verify_exhausted_iterator( + obj, ds_fn, num_outputs, sparse_tensors=False, assert_items_equal=False): + obj.verify_exhausted_iterator( + ds_fn=disable_optimizations(ds_fn=ds_fn), + num_outputs=num_outputs, + sparse_tensors=sparse_tensors, + assert_items_equal=assert_items_equal) + + verify_exhausted_iterator_combination = combinations.combine( + verify_fn=combinations.NamedObject( + "verify_exhausted_iterator", verify_exhausted_iterator)) + + def verify_multiple_breaks( + obj, ds_fn, num_outputs, sparse_tensors=False, assert_items_equal=False): + obj.verify_multiple_breaks( + ds_fn=disable_optimizations(ds_fn=ds_fn), + num_outputs=num_outputs, + sparse_tensors=sparse_tensors, + assert_items_equal=assert_items_equal) + + verify_multiple_breaks_combination = combinations.combine( + verify_fn=combinations.NamedObject( + "verify_multiple_breaks", verify_multiple_breaks)) + + def verify_reset_restored_iterator( + obj, ds_fn, num_outputs, sparse_tensors=False, assert_items_equal=False): + obj.verify_reset_restored_iterator( + ds_fn=disable_optimizations(ds_fn=ds_fn), + num_outputs=num_outputs, + sparse_tensors=sparse_tensors, + assert_items_equal=assert_items_equal) + + verify_reset_restored_iterator_combination = combinations.combine( + verify_fn=combinations.NamedObject( + "verify_reset_restored_iterator", verify_reset_restored_iterator)) + + return (verify_unused_iterator_combination + + verify_fully_used_iterator_combination + + verify_exhausted_iterator_combination + + verify_multiple_breaks_combination + + verify_reset_restored_iterator_combination) + + +# TODO(b/72657739): Remove sparse_tensor argument, which is to test the +# (deprecated) saveable `SparseTensorSliceDataset`, once the API +# `from_sparse_tensor_slices()` and related tests are deleted. +class CheckpointTestBase(test.TestCase): + """Base test class for checkpointing datasets.""" + + def tearDown(self): + self._delete_ckpt() + super(CheckpointTestBase, self).tearDown() + + def verify_unused_iterator(self, + ds_fn, + num_outputs, + sparse_tensors=False, + verify_exhausted=True, + assert_items_equal=False): + """Verifies that saving and restoring an unused iterator works. + + Args: + ds_fn: 0-argument function that returns a Dataset. + num_outputs: Total number of outputs expected from this Dataset. + sparse_tensors: Whether dataset is built from SparseTensor(s). + verify_exhausted: Whether to verify that the iterator has been exhausted + after producing `num_outputs` elements. + assert_items_equal: Tests the output has the expected elements regardless + of order. + + Raises: + AssertionError if any test fails. + """ + self.verify_run_with_breaks( + ds_fn, [0], + num_outputs, + sparse_tensors=sparse_tensors, + verify_exhausted=verify_exhausted, + assert_items_equal=assert_items_equal) + + def verify_fully_used_iterator(self, + ds_fn, + num_outputs, + sparse_tensors=False, + assert_items_equal=False): + """Verifies that saving and restoring a fully used iterator works. + + Note that this only checks saving and restoring an iterator from which + `num_outputs` items have been produced but does not check for an + exhausted iterator, i.e., one from which an OutOfRange error has been + returned. + + Args: + ds_fn: 0-argument function that returns a Dataset. + num_outputs: Total number of outputs expected from this Dataset. + sparse_tensors: Whether dataset is built from SparseTensor(s). + assert_items_equal: Tests the output has the expected elements regardless + of order. + + Raises: + AssertionError if test fails. + """ + self.verify_run_with_breaks( + ds_fn, + [num_outputs], + num_outputs, + sparse_tensors=sparse_tensors, + assert_items_equal=assert_items_equal) + + def verify_exhausted_iterator( + self, ds_fn, num_outputs, sparse_tensors=False, assert_items_equal=False): + """Verifies that saving and restoring an exhausted iterator works. + + An exhausted iterator is one which has returned an OutOfRange error. + + Args: + ds_fn: 0-argument function that returns a Dataset. + num_outputs: Total number of outputs expected from this Dataset. + sparse_tensors: Whether dataset is built from SparseTensor(s). + assert_items_equal: Tests the output has the expected elements regardless + of order. + + Raises: + AssertionError if any test fails. + """ + del assert_items_equal + self.gen_outputs( + ds_fn, [], + num_outputs, + verify_exhausted=True, + sparse_tensors=sparse_tensors) + actual = self.gen_outputs( + ds_fn, [], + 0, + ckpt_saved=True, + verify_exhausted=True, + sparse_tensors=sparse_tensors) + self.assertLen(actual, 0) + + def verify_multiple_breaks(self, + ds_fn, + num_outputs, + num_breaks=10, + sparse_tensors=False, + verify_exhausted=True, + assert_items_equal=False): + """Attempts to save/restore at multiple break points. + + Args: + ds_fn: 0-argument function that returns a Dataset. + num_outputs: Total number of outputs expected from this Dataset. + num_breaks: The number of break points. These are uniformly spread in [0, + num_outputs] both inclusive. + sparse_tensors: Whether dataset is built from SparseTensor(s). + verify_exhausted: Whether to verify that the iterator has been exhausted + after producing `num_outputs` elements. + assert_items_equal: Tests the output has the expected elements regardless + of order. + + Raises: + AssertionError if any test fails. + """ + self.verify_run_with_breaks( + ds_fn, + self.gen_break_points(num_outputs, num_breaks), + num_outputs, + sparse_tensors=sparse_tensors, + verify_exhausted=verify_exhausted, + assert_items_equal=assert_items_equal) + + def verify_reset_restored_iterator(self, + ds_fn, + num_outputs, + break_point=None, + sparse_tensors=False, + verify_exhausted=True, + assert_items_equal=False): + """Attempts to re-initialize a restored iterator. + + This is useful when restoring a training checkpoint during validation. + + Args: + ds_fn: 0-argument function that returns a Dataset. + num_outputs: Total number of outputs expected from this Dataset. + break_point: Break point. Optional. Defaults to num_outputs/2. + sparse_tensors: Whether dataset is built from SparseTensor(s). + verify_exhausted: Whether to verify that the iterator has been exhausted + after producing `num_outputs` elements. + assert_items_equal: Tests the output has the expected elements regardless + of order. + + Raises: + AssertionError if any test fails. + """ + if context.executing_eagerly(): + self.skipTest("Eager mode iteration do not support re-initialization.") + + break_point = num_outputs // 2 if not break_point else break_point + + # Collect ground truth containing all outputs. + expected = self.gen_outputs( + ds_fn, [], + num_outputs, + sparse_tensors=sparse_tensors, + verify_exhausted=verify_exhausted) + + # Skip some items and save checkpoint. + self.gen_outputs( + ds_fn, [], + break_point, + sparse_tensors=sparse_tensors, + verify_exhausted=False) + + actual = [] + # Restore from checkpoint and then run init_op. + with ops.Graph().as_default() as g: + saver = self._import_meta_graph() + init_op, get_next_op = self._get_iterator_ops_from_collection( + ds_fn, sparse_tensors=sparse_tensors) + get_next_op = remove_variants(get_next_op) + with self.session(graph=g) as sess: + self._initialize(init_op, sess) + self._restore(saver, sess) + self._initialize(init_op, sess) + for _ in range(num_outputs): + actual.append(sess.run(get_next_op)) + if verify_exhausted: + with self.assertRaises(errors.OutOfRangeError): + sess.run(get_next_op) + self.match(expected, actual, assert_items_equal=assert_items_equal) + + def verify_error_on_save(self, + ds_fn, + num_outputs, + error, + break_point=None, + sparse_tensors=False, + assert_items_equal=False): + """Attempts to save a non-saveable iterator. + + Args: + ds_fn: 0-argument function that returns a Dataset. + num_outputs: Total number of outputs expected from this Dataset. + error: Declared error when trying to save iterator. + break_point: Break point. Optional. Defaults to num_outputs/2. + sparse_tensors: Whether dataset is built from SparseTensor(s). + assert_items_equal: Tests the output has the expected elements regardless + of order. + + Raises: + AssertionError if any test fails. + """ + del assert_items_equal + break_point = num_outputs // 2 if not break_point else break_point + if context.executing_eagerly(): + iterator = iter(ds_fn()) + ckpt = tracking_util.Checkpoint(iterator=iterator) + for _ in range(break_point): + next(iterator) + with self.assertRaises(error): + ckpt.save(self._ckpt_path()) + else: + with ops.Graph().as_default() as g: + init_op, get_next_op, saver = self._build_graph( + ds_fn, sparse_tensors=sparse_tensors) + get_next_op = remove_variants(get_next_op) + with self.session(graph=g) as sess: + self._initialize(init_op, sess) + for _ in range(break_point): + sess.run(get_next_op) + with self.assertRaises(error): + self._save(sess, saver) + + def verify_run_with_breaks(self, + ds_fn, + break_points, + num_outputs, + sparse_tensors=False, + verify_exhausted=True, + assert_items_equal=False): + """Verifies that ds_fn() produces the same outputs with and without breaks. + + 1. Builds a Dataset using `ds_fn` and produces `num_outputs` items from it + *without* stopping at break points. + 2. Builds a Dataset using `ds_fn` and produces `num_outputs` items from it + with stopping at break points. + + Deep matches outputs from 1 and 2. + + Args: + ds_fn: 0-argument function that returns a Dataset. + break_points: A list of integers. For each `break_point` in + `break_points`, we produce outputs till `break_point` number of items + have been produced and then checkpoint the state. The current graph and + session are destroyed and a new graph and session are used to produce + outputs till next checkpoint or till `num_outputs` elements have been + produced. `break_point` must be <= `num_outputs`. + num_outputs: Total number of outputs expected from this Dataset. + sparse_tensors: Whether dataset is built from SparseTensor(s). + verify_exhausted: Whether to verify that the iterator has been exhausted + after producing `num_outputs` elements. + assert_items_equal: Tests the output has the expected elements regardless + of order. + + Raises: + AssertionError if any test fails. + """ + expected = self.gen_outputs( + ds_fn, [], + num_outputs, + sparse_tensors=sparse_tensors, + verify_exhausted=verify_exhausted) + + actual = self.gen_outputs( + ds_fn, + break_points, + num_outputs, + sparse_tensors=sparse_tensors, + verify_exhausted=verify_exhausted) + + self.match(expected, actual, assert_items_equal=assert_items_equal) + + def gen_outputs(self, + ds_fn, + break_points, + num_outputs, + ckpt_saved=False, + sparse_tensors=False, + verify_exhausted=True, + save_checkpoint_at_end=True): + """Generates elements from input dataset while stopping at break points. + + Produces `num_outputs` outputs and saves the state of the iterator in the + Saver checkpoint. + + Args: + ds_fn: 0-argument function that returns the dataset. + break_points: A list of integers. For each `break_point` in + `break_points`, we produce outputs till `break_point` number of items + have been produced and then checkpoint the state. The current graph and + session are destroyed and a new graph and session are used to produce + outputs till next checkpoint or till `num_outputs` elements have been + produced. `break_point` must be <= `num_outputs`. + num_outputs: The total number of outputs to produce from the iterator. + ckpt_saved: Whether a checkpoint already exists. + sparse_tensors: Whether dataset is built from SparseTensor(s). + verify_exhausted: Whether to verify that the iterator has been exhausted + after producing `num_outputs` elements. + save_checkpoint_at_end: Whether to save a checkpoint after producing all + outputs. If False, checkpoints are saved each break point but not at the + end. Note that checkpoints overwrite each other so there is always only + a single checkpoint available. Defaults to True. + + Returns: + A list of `num_outputs` items. + """ + outputs = [] + + if context.executing_eagerly(): + for i in range(len(break_points) + 1): + iterator = iter(ds_fn()) + ckpt = tracking_util.Checkpoint(iterator=iterator) + if ckpt_saved: + ckpt_path = self._latest_ckpt() + ckpt.restore(ckpt_path) + start = break_points[i - 1] if i > 0 else 0 + end = break_points[i] if i < len(break_points) else num_outputs + num_iters = end - start + for _ in range(num_iters): + outputs.append(self.evaluate(next(iterator))) + if i == len(break_points) and verify_exhausted: + with self.assertRaises(StopIteration): + next(iterator) + if save_checkpoint_at_end or i < len(break_points): + # TODO(b/275117275): Verify if TF2 async checkpoint works. + ckpt_options = checkpoint_options.CheckpointOptions() + ckpt_options.experimental_enable_async_checkpoint = False + ckpt_options.enable_async = False + ckpt_path = ckpt.save(self._ckpt_path(), options=ckpt_options) + ckpt_saved = True + else: + def get_ops(): + if ckpt_saved: + saver = self._import_meta_graph() + init_op, get_next_op = self._get_iterator_ops_from_collection( + ds_fn, sparse_tensors=sparse_tensors) + else: + init_op, get_next_op, saver = self._build_graph( + ds_fn, sparse_tensors=sparse_tensors) + return init_op, get_next_op, saver + + for i in range(len(break_points) + 1): + with ops.Graph().as_default() as g: + init_op, get_next_op, saver = get_ops() + get_next_op = remove_variants(get_next_op) + with self.session(graph=g) as sess: + if ckpt_saved: + self._initialize(init_op, sess) + self._restore(saver, sess) + else: + self._initialize(init_op, sess) + start = break_points[i - 1] if i > 0 else 0 + end = break_points[i] if i < len(break_points) else num_outputs + num_iters = end - start + for _ in range(num_iters): + outputs.append(sess.run(get_next_op)) + if i == len(break_points) and verify_exhausted: + with self.assertRaises(errors.OutOfRangeError): + sess.run(get_next_op) + if save_checkpoint_at_end or i < len(break_points): + self._save(sess, saver) + ckpt_saved = True + + return outputs + + def match(self, expected, actual, assert_items_equal=False): + """Matches nested structures. + + Recursively matches shape and values of `expected` and `actual`. + Handles scalars, numpy arrays and other python sequence containers + e.g. list, dict, as well as SparseTensorValue and RaggedTensorValue. + + Args: + expected: Nested structure 1. + actual: Nested structure 2. + assert_items_equal: Tests the output has the expected elements regardless + of order. + + Raises: + AssertionError if matching fails. + """ + if isinstance(expected, np.ndarray): + expected = expected.tolist() + if isinstance(actual, np.ndarray): + actual = actual.tolist() + self.assertEqual(type(expected), type(actual)) + + if nest.is_nested(expected): + self.assertEqual(len(expected), len(actual)) + if isinstance(expected, dict): + for key1, key2 in zip(sorted(expected), sorted(actual)): + self.assertEqual(key1, key2) + self.match(expected[key1], actual[key2]) + elif assert_items_equal: + for item1, item2 in zip(sorted(expected), sorted(actual)): + self.match(item1, item2) + else: + for item1, item2 in zip(expected, actual): + self.match(item1, item2) + elif isinstance(expected, sparse_tensor.SparseTensorValue): + self.match((expected.indices, expected.values, expected.dense_shape), + (actual.indices, actual.values, actual.dense_shape)) + elif isinstance(expected, ragged_tensor_value.RaggedTensorValue): + self.match((expected.values, expected.row_splits), + (actual.values, actual.row_splits)) + else: + self.assertEqual(expected, actual) + + def does_not_match(self, expected, actual): + with self.assertRaises(AssertionError): + self.match(expected, actual) + + def gen_break_points(self, num_outputs, num_samples=10): + """Generates `num_samples` unique break points in [0, num_outputs].""" + return np.unique(np.linspace(0, num_outputs, num_samples, dtype=int)) + + def _build_graph(self, ds_fn, sparse_tensors=False): + dataset = ds_fn() + iterator = dataset_ops.make_initializable_iterator(dataset) + external_state_policy = dataset.options().experimental_external_state_policy + saveable = contrib_iterator_ops.make_saveable_from_iterator( + iterator, external_state_policy=external_state_policy) + ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable) + init_op = iterator.initializer + if sparse_tensors: + get_next = sparse_tensor.SparseTensor(*iterator.get_next()) + else: + get_next = iterator.get_next() + self._add_iterator_ops_to_collection(init_op, get_next, ds_fn, + sparse_tensors) + saver = saver_lib.Saver(allow_empty=True) + return init_op, get_next, saver + + def _add_iterator_ops_to_collection(self, + init_op, + get_next, + ds_fn, + sparse_tensors=False): + ops.add_to_collection("iterator_ops", init_op) + # `get_next` may be a tuple e.g. in TensorSliceDataset. Since Collections + # do not support tuples we flatten the tensors and restore the shape in + # `_get_iterator_ops_from_collection`. + if sparse_tensors: # specific for deprecated `from_sparse_tensor_slices`. + ops.add_to_collection("iterator_ops", get_next.indices) + ops.add_to_collection("iterator_ops", get_next.values) + ops.add_to_collection("iterator_ops", get_next.dense_shape) + return + + get_next_list = nest.flatten(get_next) + for i, output_class in enumerate( + nest.flatten(self._get_output_classes(ds_fn))): + if output_class is sparse_tensor.SparseTensor: + ops.add_to_collection("iterator_ops", get_next_list[i].indices) + ops.add_to_collection("iterator_ops", get_next_list[i].values) + ops.add_to_collection("iterator_ops", get_next_list[i].dense_shape) + else: + ops.add_to_collection("iterator_ops", get_next_list[i]) + + def _get_iterator_ops_from_collection(self, ds_fn, sparse_tensors=False): + all_ops = ops.get_collection("iterator_ops") + if sparse_tensors: # specific for deprecated `from_sparse_tensor_slices`. + init_op, indices, values, dense_shape = all_ops + return init_op, sparse_tensor.SparseTensor(indices, values, dense_shape) + get_next_list = [] + i = 1 + for output_class in nest.flatten(self._get_output_classes(ds_fn)): + if output_class is sparse_tensor.SparseTensor: + indices, values, dense_shape = all_ops[i:i + 3] + i += 3 + get_next_list.append( + sparse_tensor.SparseTensor(indices, values, dense_shape)) + else: + get_next_list.append(all_ops[i]) + i += 1 + return all_ops[0], nest.pack_sequence_as( + self._get_output_types(ds_fn), get_next_list) + + def _get_output_types(self, ds_fn): + assert not context.executing_eagerly() + with ops.Graph().as_default(): + return dataset_ops.get_legacy_output_types(ds_fn()) + + def _get_output_shapes(self, ds_fn): + assert not context.executing_eagerly() + with ops.Graph().as_default(): + return dataset_ops.get_legacy_output_shapes(ds_fn()) + + def _get_output_classes(self, ds_fn): + assert not context.executing_eagerly() + with ops.Graph().as_default(): + return dataset_ops.get_legacy_output_classes(ds_fn()) + + def _ckpt_path(self): + return os.path.join(self.get_temp_dir(), "iterator") + + def _latest_ckpt(self): + return checkpoint_management.latest_checkpoint(self.get_temp_dir()) + + def _save(self, sess, saver): + saver.save(sess, self._ckpt_path()) + + def _restore(self, saver, sess): + sess.run(lookup_ops.tables_initializer()) + saver.restore(sess, self._latest_ckpt()) + + def _initialize(self, init_op, sess): + sess.run(variables.global_variables_initializer()) + sess.run(lookup_ops.tables_initializer()) + sess.run(init_op) + + def _import_meta_graph(self): + meta_file_path = self._ckpt_path() + ".meta" + return saver_lib.import_meta_graph(meta_file_path) + + def _delete_ckpt(self): + # Remove all checkpoint files. + prefix = self._ckpt_path() + pattern = prefix + "*" + files = gfile.Glob(pattern) + map(gfile.Remove, files) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/kernel_tests/test_base.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/kernel_tests/test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..cb46d47c77009e1e2db7d06450883413b79d2a3b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/kernel_tests/test_base.py @@ -0,0 +1,448 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test utilities for tf.data functionality.""" +import os +import random +import re + +from tensorflow.python.data.experimental.ops import lookup_ops as data_lookup_ops +from tensorflow.python.data.experimental.ops import random_access +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.data.ops import test_mode +from tensorflow.python.data.util import nest +from tensorflow.python.data.util import structure +from tensorflow.python.eager import context +from tensorflow.python.framework import combinations +from tensorflow.python.framework import config +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors +from tensorflow.python.framework import ops +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gen_dataset_ops +from tensorflow.python.ops import gen_experimental_dataset_ops +from tensorflow.python.ops import lookup_ops +from tensorflow.python.ops import tensor_array_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.platform import test + + +def default_test_combinations(): + """Returns the default test combinations for tf.data tests.""" + return combinations.combine(tf_api_version=[1, 2], mode=["eager", "graph"]) + + +def eager_only_combinations(): + """Returns the default test combinations for eager mode only tf.data tests.""" + return combinations.combine(tf_api_version=[1, 2], mode="eager") + + +def graph_only_combinations(): + """Returns the default test combinations for graph mode only tf.data tests.""" + return combinations.combine(tf_api_version=[1, 2], mode="graph") + + +def v1_only_combinations(): + """Returns the default test combinations for v1 only tf.data tests.""" + return combinations.combine(tf_api_version=1, mode=["eager", "graph"]) + + +def v2_only_combinations(): + """Returns the default test combinations for v2 only tf.data tests.""" + return combinations.combine(tf_api_version=2, mode=["eager", "graph"]) + + +def v2_eager_only_combinations(): + """Returns the default test combinations for v2 eager only tf.data tests.""" + return combinations.combine(tf_api_version=2, mode="eager") + + +class DatasetTestBase(test.TestCase): + """Base class for dataset tests.""" + + def setUp(self): + super().setUp() + test_mode.toggle_test_mode(True) + + def assert_op_cancelled(self, op): + with self.assertRaises(errors.CancelledError): + self.evaluate(op) + + def assertValuesEqual(self, expected, actual): + """Asserts that two values are equal.""" + if isinstance(expected, dict): + self.assertItemsEqual(list(expected.keys()), list(actual.keys())) + for k in expected.keys(): + self.assertValuesEqual(expected[k], actual[k]) + elif sparse_tensor.is_sparse(expected): + self.assertAllEqual(expected.indices, actual.indices) + self.assertAllEqual(expected.values, actual.values) + self.assertAllEqual(expected.dense_shape, actual.dense_shape) + else: + self.assertAllEqual(expected, actual) + + def getNext(self, dataset, requires_initialization=False, shared_name=None): + """Returns a callable that returns the next element of the dataset. + + Example use: + ```python + # In both graph and eager modes + dataset = ... + get_next = self.getNext(dataset) + result = self.evaluate(get_next()) + ``` + + Args: + dataset: A dataset whose elements will be returned. + requires_initialization: Indicates that when the test is executed in graph + mode, it should use an initializable iterator to iterate through the + dataset (e.g. when it contains stateful nodes). Defaults to False. + shared_name: (Optional.) If non-empty, the returned iterator will be + shared under the given name across multiple sessions that share the same + devices (e.g. when using a remote server). + Returns: + A callable that returns the next element of `dataset`. Any `TensorArray` + objects `dataset` outputs are stacked. + """ + def ta_wrapper(gn): + def _wrapper(): + r = gn() + if isinstance(r, tensor_array_ops.TensorArray): + return r.stack() + else: + return r + return _wrapper + + # Create an anonymous iterator if we are in eager-mode or are graph inside + # of a tf.function. + if context.executing_eagerly() or ops.inside_function(): + iterator = iter(dataset) + return ta_wrapper(iterator._next_internal) # pylint: disable=protected-access + else: + if requires_initialization: + iterator = dataset_ops.make_initializable_iterator(dataset, shared_name) + self.evaluate(iterator.initializer) + else: + iterator = dataset_ops.make_one_shot_iterator(dataset) + get_next = iterator.get_next() + return ta_wrapper(lambda: get_next) + + def _compareOutputToExpected(self, result_values, expected_values, + assert_items_equal): + if assert_items_equal: + # TODO(shivaniagrawal): add support for nested elements containing sparse + # tensors when needed. + self.assertItemsEqual(result_values, expected_values) + return + for i in range(len(result_values)): + nest.assert_same_structure(result_values[i], expected_values[i]) + for result_value, expected_value in zip( + nest.flatten(result_values[i]), nest.flatten(expected_values[i])): + self.assertValuesEqual(expected_value, result_value) + + def getDatasetOutput(self, dataset, requires_initialization=False): + get_next = self.getNext( + dataset, requires_initialization=requires_initialization) + return self.getIteratorOutput(get_next) + + def getIteratorOutput(self, get_next): + """Evaluates `get_next` until end of input, returning the results.""" + results = [] + while True: + try: + results.append(self.evaluate(get_next())) + except errors.OutOfRangeError: + break + return results + + def assertDatasetProduces(self, + dataset, + expected_output=None, + expected_shapes=None, + expected_error=None, + requires_initialization=False, + num_test_iterations=1, + assert_items_equal=False, + expected_error_iter=1): + """Asserts that a dataset produces the expected output / error. + + Args: + dataset: A dataset to check for the expected output / error. + expected_output: A list of elements that the dataset is expected to + produce. + expected_shapes: A list of TensorShapes which is expected to match + output_shapes of dataset. + expected_error: A tuple `(type, predicate)` identifying the expected error + `dataset` should raise. The `type` should match the expected exception + type, while `predicate` should either be 1) a unary function that inputs + the raised exception and returns a boolean indicator of success or 2) a + regular expression that is expected to match the error message + partially. + requires_initialization: Indicates that when the test is executed in graph + mode, it should use an initializable iterator to iterate through the + dataset (e.g. when it contains stateful nodes). Defaults to False. + num_test_iterations: Number of times `dataset` will be iterated. Defaults + to 1. + assert_items_equal: Tests expected_output has (only) the same elements + regardless of order. + expected_error_iter: How many times to iterate before expecting an error, + if an error is expected. + """ + self.assertTrue( + expected_error is not None or expected_output is not None, + "Exactly one of expected_output or expected error should be provided.") + if expected_error: + self.assertTrue( + expected_output is None, + "Exactly one of expected_output or expected error should be provided." + ) + with self.assertRaisesWithPredicateMatch(expected_error[0], + expected_error[1]): + get_next = self.getNext( + dataset, requires_initialization=requires_initialization) + for _ in range(expected_error_iter): + self.evaluate(get_next()) + return + if expected_shapes: + self.assertEqual(expected_shapes, + dataset_ops.get_legacy_output_shapes(dataset)) + self.assertGreater(num_test_iterations, 0) + for _ in range(num_test_iterations): + get_next = self.getNext( + dataset, requires_initialization=requires_initialization) + result = [] + for _ in range(len(expected_output)): + try: + result.append(self.evaluate(get_next())) + except errors.OutOfRangeError: + raise AssertionError( + "Dataset ended early, producing %d elements out of %d. " + "Dataset output: %s" % + (len(result), len(expected_output), str(result))) + self._compareOutputToExpected(result, expected_output, assert_items_equal) + with self.assertRaises(errors.OutOfRangeError): + self.evaluate(get_next()) + with self.assertRaises(errors.OutOfRangeError): + self.evaluate(get_next()) + + def assertDatasetsEqual(self, dataset1, dataset2): + """Checks that datasets are equal. Supports both graph and eager mode.""" + self.assertTrue( + structure.are_compatible( + dataset_ops.get_structure(dataset1), + dataset_ops.get_structure(dataset2))) + + flattened_types = nest.flatten( + dataset_ops.get_legacy_output_types(dataset1)) + + next1 = self.getNext(dataset1) + next2 = self.getNext(dataset2) + + while True: + try: + op1 = self.evaluate(next1()) + except errors.OutOfRangeError: + with self.assertRaises(errors.OutOfRangeError): + self.evaluate(next2()) + break + op2 = self.evaluate(next2()) + + op1 = nest.flatten(op1) + op2 = nest.flatten(op2) + assert len(op1) == len(op2) + for i in range(len(op1)): + if sparse_tensor.is_sparse(op1[i]) or ragged_tensor.is_ragged(op1[i]): + self.assertValuesEqual(op1[i], op2[i]) + elif flattened_types[i] == dtypes.string: + self.assertAllEqual(op1[i], op2[i]) + else: + self.assertAllClose(op1[i], op2[i]) + + def assertDatasetsRaiseSameError(self, + dataset1, + dataset2, + exception_class, + replacements=None): + """Checks that datasets raise the same error on the first get_next call.""" + if replacements is None: + replacements = [] + next1 = self.getNext(dataset1) + next2 = self.getNext(dataset2) + try: + self.evaluate(next1()) + raise ValueError( + "Expected dataset to raise an error of type %s, but it did not." % + repr(exception_class)) + except exception_class as e: + expected_message = e.message + for old, new, count in replacements: + expected_message = expected_message.replace(old, new, count) + # Check that the first segment of the error messages are the same. + with self.assertRaisesRegexp(exception_class, + re.escape(expected_message)): + self.evaluate(next2()) + + def structuredDataset(self, dataset_structure, shape=None, + dtype=dtypes.int64): + """Returns a singleton dataset with the given structure.""" + if shape is None: + shape = [] + if dataset_structure is None: + return dataset_ops.Dataset.from_tensors( + array_ops.zeros(shape, dtype=dtype)) + else: + return dataset_ops.Dataset.zip( + tuple([ + self.structuredDataset(substructure, shape, dtype) + for substructure in dataset_structure + ])) + + def verifyRandomAccess(self, dataset, expected): + self.verifyRandomAccessInfiniteCardinality(dataset, expected) + with self.assertRaises(errors.OutOfRangeError): + self.evaluate(random_access.at(dataset, index=len(expected))) + + def verifyRandomAccessInfiniteCardinality(self, dataset, expected): + """Tests randomly accessing elements of a dataset.""" + # Tests accessing the elements in a shuffled order with repeats. + len_expected = len(expected) + indices = list(range(len_expected)) * 2 + random.shuffle(indices) + for i in indices: + self.assertAllEqual(expected[i], + self.evaluate(random_access.at(dataset, i))) + + # Tests accessing the elements in order. + indices = set(sorted(indices)) + for i in indices: + self.assertAllEqual(expected[i], + self.evaluate(random_access.at(dataset, i))) + + def textFileInitializer(self, vals): + file = os.path.join(self.get_temp_dir(), "text_file_initializer") + with open(file, "w") as f: + f.write("\n".join(str(v) for v in vals) + "\n") + return lookup_ops.TextFileInitializer(file, dtypes.int64, + lookup_ops.TextFileIndex.LINE_NUMBER, + dtypes.int64, + lookup_ops.TextFileIndex.WHOLE_LINE) + + def keyValueTensorInitializer(self, vals): + keys_tensor = constant_op.constant( + list(range(len(vals))), dtype=dtypes.int64) + vals_tensor = constant_op.constant(vals) + return lookup_ops.KeyValueTensorInitializer(keys_tensor, vals_tensor) + + def datasetInitializer(self, vals): + keys = dataset_ops.Dataset.range(len(vals)) + values = dataset_ops.Dataset.from_tensor_slices(vals) + ds = dataset_ops.Dataset.zip((keys, values)) + return data_lookup_ops.DatasetInitializer(ds) + + def lookupTableInitializer(self, init_source, vals): + """Returns a lookup table initializer for the given source and values. + + Args: + init_source: One of ["textfile", "keyvalue", "dataset"], indicating what + type of initializer to use. + vals: The initializer values. The keys will be `range(len(vals))`. + """ + if init_source == "textfile": + return self.textFileInitializer(vals) + elif init_source == "keyvaluetensor": + return self.keyValueTensorInitializer(vals) + elif init_source == "dataset": + return self.datasetInitializer(vals) + else: + raise ValueError("Unrecognized init_source: " + init_source) + + def graphRoundTrip(self, dataset, allow_stateful=False): + """Converts a dataset to a graph and back.""" + graph = gen_dataset_ops.dataset_to_graph( + dataset._variant_tensor, allow_stateful=allow_stateful) # pylint: disable=protected-access + return dataset_ops.from_variant( + gen_experimental_dataset_ops.dataset_from_graph(graph), + dataset.element_spec) + + def structuredElement(self, element_structure, shape=None, + dtype=dtypes.int64): + """Returns an element with the given structure.""" + if shape is None: + shape = [] + if element_structure is None: + return array_ops.zeros(shape, dtype=dtype) + else: + return tuple([ + self.structuredElement(substructure, shape, dtype) + for substructure in element_structure + ]) + + def checkDeterminism(self, dataset_fn, expect_determinism, expected_elements): + """Tests whether a dataset produces its elements deterministically. + + `dataset_fn` takes a delay_ms argument, which tells it how long to delay + production of the first dataset element. This gives us a way to trigger + out-of-order production of dataset elements. + + Args: + dataset_fn: A function taking a delay_ms argument. + expect_determinism: Whether to expect deterministic ordering. + expected_elements: The elements expected to be produced by the dataset, + assuming the dataset produces elements in deterministic order. + """ + if expect_determinism: + dataset = dataset_fn(100) + actual = self.getDatasetOutput(dataset) + self.assertAllEqual(expected_elements, actual) + return + + # We consider the test a success if it succeeds under any delay_ms. The + # delay_ms needed to observe non-deterministic ordering varies across + # test machines. Usually 10 or 100 milliseconds is enough, but on slow + # machines it could take longer. + for delay_ms in [10, 100, 1000, 20000, 100000]: + dataset = dataset_fn(delay_ms) + actual = self.getDatasetOutput(dataset) + self.assertCountEqual(expected_elements, actual) + for i in range(len(actual)): + if actual[i] != expected_elements[i]: + return + self.fail("Failed to observe nondeterministic ordering") + + def configureDevicesForMultiDeviceTest(self, num_devices): + """Configures number of logical devices for multi-device tests. + + It returns a list of device names. If invoked in GPU-enabled runtime, the + last device name will be for a GPU device. Otherwise, all device names will + be for a CPU device. + + Args: + num_devices: The number of devices to configure. + + Returns: + A list of device names to use for a multi-device test. + """ + cpus = config.list_physical_devices("CPU") + gpus = config.list_physical_devices("GPU") + config.set_logical_device_configuration(cpus[0], [ + context.LogicalDeviceConfiguration() for _ in range(num_devices) + ]) + devices = ["/device:CPU:" + str(i) for i in range(num_devices - 1)] + if gpus: + devices.append("/device:GPU:0") + else: + devices.append("/device:CPU:" + str(num_devices - 1)) + return devices diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/kernel_tests/tf_record_test_base.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/kernel_tests/tf_record_test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..2fd4d4afccc9039f0b166d342cb788e642ec637e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/kernel_tests/tf_record_test_base.py @@ -0,0 +1,338 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Base class for testing reader datasets.""" + +import os + +from tensorflow.core.example import example_pb2 +from tensorflow.core.example import feature_pb2 +from tensorflow.python.data.experimental.ops import readers +from tensorflow.python.data.kernel_tests import test_base +from tensorflow.python.data.ops import readers as core_readers +from tensorflow.python.framework import dtypes +from tensorflow.python.lib.io import python_io +from tensorflow.python.ops import parsing_ops +from tensorflow.python.util import compat + + +class FeaturesTestBase(test_base.DatasetTestBase): + """Base class for testing TFRecord-based features.""" + + def setUp(self): + super(FeaturesTestBase, self).setUp() + self._num_files = 2 + self._num_records = 7 + self._filenames = self._createFiles() + + def make_batch_feature(self, + filenames, + num_epochs, + batch_size, + label_key=None, + reader_num_threads=1, + parser_num_threads=1, + shuffle=False, + shuffle_seed=None, + drop_final_batch=False): + self.filenames = filenames + self.num_epochs = num_epochs + self.batch_size = batch_size + + return readers.make_batched_features_dataset( + file_pattern=self.filenames, + batch_size=self.batch_size, + features={ + "file": parsing_ops.FixedLenFeature([], dtypes.int64), + "record": parsing_ops.FixedLenFeature([], dtypes.int64), + "keywords": parsing_ops.VarLenFeature(dtypes.string), + "label": parsing_ops.FixedLenFeature([], dtypes.string), + }, + label_key=label_key, + reader=core_readers.TFRecordDataset, + num_epochs=self.num_epochs, + shuffle=shuffle, + shuffle_seed=shuffle_seed, + reader_num_threads=reader_num_threads, + parser_num_threads=parser_num_threads, + drop_final_batch=drop_final_batch) + + def _record(self, f, r, l): + example = example_pb2.Example( + features=feature_pb2.Features( + feature={ + "file": + feature_pb2.Feature( + int64_list=feature_pb2.Int64List(value=[f])), + "record": + feature_pb2.Feature( + int64_list=feature_pb2.Int64List(value=[r])), + "keywords": + feature_pb2.Feature( + bytes_list=feature_pb2.BytesList( + value=self._get_keywords(f, r))), + "label": + feature_pb2.Feature( + bytes_list=feature_pb2.BytesList( + value=[compat.as_bytes(l)])) + })) + return example.SerializeToString() + + def _get_keywords(self, f, r): + num_keywords = 1 + (f + r) % 2 + keywords = [] + for index in range(num_keywords): + keywords.append(compat.as_bytes("keyword%d" % index)) + return keywords + + def _sum_keywords(self, num_files): + sum_keywords = 0 + for i in range(num_files): + for j in range(self._num_records): + sum_keywords += 1 + (i + j) % 2 + return sum_keywords + + def _createFiles(self): + filenames = [] + for i in range(self._num_files): + fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i) + filenames.append(fn) + writer = python_io.TFRecordWriter(fn) + for j in range(self._num_records): + writer.write(self._record(i, j, "fake-label")) + writer.close() + return filenames + + def _run_actual_batch(self, outputs, label_key_provided=False): + if label_key_provided: + # outputs would be a tuple of (feature dict, label) + features, label = self.evaluate(outputs()) + else: + features = self.evaluate(outputs()) + label = features["label"] + file_out = features["file"] + keywords_indices = features["keywords"].indices + keywords_values = features["keywords"].values + keywords_dense_shape = features["keywords"].dense_shape + record = features["record"] + return ([ + file_out, keywords_indices, keywords_values, keywords_dense_shape, + record, label + ]) + + def _next_actual_batch(self, label_key_provided=False): + return self._run_actual_batch(self.outputs, label_key_provided) + + def _interleave(self, iterators, cycle_length): + pending_iterators = iterators + open_iterators = [] + num_open = 0 + for i in range(cycle_length): + if pending_iterators: + open_iterators.append(pending_iterators.pop(0)) + num_open += 1 + + while num_open: + for i in range(min(cycle_length, len(open_iterators))): + if open_iterators[i] is None: + continue + try: + yield next(open_iterators[i]) + except StopIteration: + if pending_iterators: + open_iterators[i] = pending_iterators.pop(0) + else: + open_iterators[i] = None + num_open -= 1 + + def _next_expected_batch(self, + file_indices, + batch_size, + num_epochs, + cycle_length=1): + + def _next_record(file_indices): + for j in file_indices: + for i in range(self._num_records): + yield j, i, compat.as_bytes("fake-label") + + def _next_record_interleaved(file_indices, cycle_length): + return self._interleave([_next_record([i]) for i in file_indices], + cycle_length) + + file_batch = [] + keywords_batch_indices = [] + keywords_batch_values = [] + keywords_batch_max_len = 0 + record_batch = [] + batch_index = 0 + label_batch = [] + for _ in range(num_epochs): + if cycle_length == 1: + next_records = _next_record(file_indices) + else: + next_records = _next_record_interleaved(file_indices, cycle_length) + for record in next_records: + f = record[0] + r = record[1] + label_batch.append(record[2]) + file_batch.append(f) + record_batch.append(r) + keywords = self._get_keywords(f, r) + keywords_batch_values.extend(keywords) + keywords_batch_indices.extend( + [[batch_index, i] for i in range(len(keywords))]) + batch_index += 1 + keywords_batch_max_len = max(keywords_batch_max_len, len(keywords)) + if len(file_batch) == batch_size: + yield [ + file_batch, keywords_batch_indices, keywords_batch_values, + [batch_size, keywords_batch_max_len], record_batch, label_batch + ] + file_batch = [] + keywords_batch_indices = [] + keywords_batch_values = [] + keywords_batch_max_len = 0 + record_batch = [] + batch_index = 0 + label_batch = [] + if file_batch: + yield [ + file_batch, keywords_batch_indices, keywords_batch_values, + [len(file_batch), keywords_batch_max_len], record_batch, label_batch + ] + + def _verify_records(self, + batch_size, + file_index=None, + num_epochs=1, + label_key_provided=False, + interleave_cycle_length=1): + if file_index is not None: + file_indices = [file_index] + else: + file_indices = range(self._num_files) + + for expected_batch in self._next_expected_batch( + file_indices, + batch_size, + num_epochs, + cycle_length=interleave_cycle_length): + actual_batch = self._next_actual_batch( + label_key_provided=label_key_provided) + for i in range(len(expected_batch)): + self.assertAllEqual(expected_batch[i], actual_batch[i]) + + +class TFRecordTestBase(test_base.DatasetTestBase): + """Base class for TFRecord-based tests.""" + + def setUp(self): + super(TFRecordTestBase, self).setUp() + self._num_files = 2 + self._num_records = 7 + self._filenames = self._createFiles() + + def _interleave(self, iterators, cycle_length): + pending_iterators = iterators + open_iterators = [] + num_open = 0 + for i in range(cycle_length): + if pending_iterators: + open_iterators.append(pending_iterators.pop(0)) + num_open += 1 + + while num_open: + for i in range(min(cycle_length, len(open_iterators))): + if open_iterators[i] is None: + continue + try: + yield next(open_iterators[i]) + except StopIteration: + if pending_iterators: + open_iterators[i] = pending_iterators.pop(0) + else: + open_iterators[i] = None + num_open -= 1 + + def _next_expected_batch(self, file_indices, batch_size, num_epochs, + cycle_length, drop_final_batch, use_parser_fn): + + def _next_record(file_indices): + for j in file_indices: + for i in range(self._num_records): + yield j, i + + def _next_record_interleaved(file_indices, cycle_length): + return self._interleave([_next_record([i]) for i in file_indices], + cycle_length) + + record_batch = [] + batch_index = 0 + for _ in range(num_epochs): + if cycle_length == 1: + next_records = _next_record(file_indices) + else: + next_records = _next_record_interleaved(file_indices, cycle_length) + for f, r in next_records: + record = self._record(f, r) + if use_parser_fn: + record = record[1:] + record_batch.append(record) + batch_index += 1 + if len(record_batch) == batch_size: + yield record_batch + record_batch = [] + batch_index = 0 + if record_batch and not drop_final_batch: + yield record_batch + + def _verify_records(self, outputs, batch_size, file_index, num_epochs, + interleave_cycle_length, drop_final_batch, use_parser_fn): + if file_index is not None: + if isinstance(file_index, list): + file_indices = file_index + else: + file_indices = [file_index] + else: + file_indices = range(self._num_files) + + for expected_batch in self._next_expected_batch( + file_indices, batch_size, num_epochs, interleave_cycle_length, + drop_final_batch, use_parser_fn): + actual_batch = self.evaluate(outputs()) + self.assertAllEqual(expected_batch, actual_batch) + + def _record(self, f, r): + return compat.as_bytes("Record %d of file %d" % (r, f)) + + def _createFiles(self): + filenames = [] + for i in range(self._num_files): + fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i) + filenames.append(fn) + writer = python_io.TFRecordWriter(fn) + for j in range(self._num_records): + writer.write(self._record(i, j)) + writer.close() + return filenames + + def _writeFile(self, name, data): + filename = os.path.join(self.get_temp_dir(), name) + writer = python_io.TFRecordWriter(filename) + for d in data: + writer.write(compat.as_bytes(str(d))) + writer.close() + return filename + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77e21f3bf7d6a92fa37e27ea73b711388af49bf9 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/convert.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/convert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..086b6d24afc68e52cd8c59757ded9c875740a5c0 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/convert.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/nest.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/nest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ed28f6bc64d0c0da590f8c357c1cd7b79b79b7e Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/nest.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/options.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/options.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e92179324ffec4020bcb5846645dda43aca833c Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/options.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/random_seed.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/random_seed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26d8618cd74dfdfabb1826f5bf9e9d14c92cc69d Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/random_seed.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/sparse.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/sparse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b96889030bf067c05a13ce20f3aa47528605eb6c Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/sparse.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/structure.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/structure.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe9e0eb12c5eb7bcba6785ce8e745bf3b3686e78 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/structure.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/traverse.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/traverse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc07c81402e5957952dfb40f48a743d423a310fd Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/__pycache__/traverse.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/convert.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/convert.py new file mode 100644 index 0000000000000000000000000000000000000000..1575493644696f4108ad5a37ded08773d7767deb --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/convert.py @@ -0,0 +1,67 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Helpers constructing Datasets.""" +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape + + +def optional_param_to_tensor(argument_name, + argument_value, + argument_default=0, + argument_dtype=dtypes.int64): + if argument_value is not None: + return ops.convert_to_tensor( + argument_value, dtype=argument_dtype, name=argument_name) + else: + return constant_op.constant( + argument_default, dtype=argument_dtype, name=argument_name) + + +def partial_shape_to_tensor(shape_like): + """Returns a `tf.Tensor` that represents the given shape. + + Args: + shape_like: A value that can be converted to a `tf.TensorShape` or a + `tf.Tensor`. + + Returns: + A 1-D `tf.Tensor` of `tf.int64` elements representing the given shape, where + `-1` is substituted for any unknown dimensions. + """ + try: + # First attempt to convert the input to a shape, and return the + # "canonical" tensor representation, which uses `-1` in place of + # `None`. + shape_like = tensor_shape.as_shape(shape_like) + return ops.convert_to_tensor( + [dim if dim is not None else -1 for dim in shape_like.as_list()], + dtype=dtypes.int64) + except (TypeError, ValueError): + # The argument was not trivially convertible to a + # `tf.TensorShape`, so fall back on the conversion to tensor + # machinery. + ret = ops.convert_to_tensor(shape_like, preferred_dtype=dtypes.int64) + if ret.shape.dims is not None and len(ret.shape.dims) != 1: + raise ValueError("The given shape {} must be a 1-D tensor of `tf.int64` " + "values, but the shape was {}.".format( + shape_like, ret.shape)) + if ret.dtype != dtypes.int64: + raise TypeError("The given shape {} must be a 1-D tensor of `tf.int64` " + "values, but the element type was {}.".format( + shape_like, ret.dtype.name)) + + return ret diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/nest.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/nest.py new file mode 100644 index 0000000000000000000000000000000000000000..ece64dc2cea65c0c594dd9594241772cc6af25c4 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/nest.py @@ -0,0 +1,305 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""## Functions for working with arbitrarily nested sequences of elements. + +NOTE(mrry): This fork of the `tensorflow.python.util.nest` module +makes two changes: + +1. It removes support for lists as a level of nesting in nested structures. +2. It adds support for `SparseTensorValue` as an atomic element. + +The motivation for this change is twofold: + +1. It seems more natural for lists to be treated (e.g. in Dataset constructors) + as tensors, rather than lists of (lists of...) tensors. +2. This is needed because `SparseTensorValue` is implemented as a `namedtuple` + that would normally be flattened and we want to be able to create sparse + tensor from `SparseTensorValue's similarly to creating tensors from numpy + arrays. +""" + +from tensorflow.python.util import nest_util + + +def is_nested(structure): + return nest_util.is_nested(nest_util.Modality.DATA, structure) + + +def flatten(structure): + return nest_util.flatten(nest_util.Modality.DATA, structure) + + +def assert_same_structure(nest1, nest2, check_types=True): + """Asserts that two structures are nested in the same way. + + Args: + nest1: an arbitrarily nested structure. + nest2: an arbitrarily nested structure. + check_types: if `True` (default) types of sequences should be same as + well. For dictionary, "type" of dictionary is considered to include its + keys. In other words, two dictionaries with different keys are considered + to have a different "type". If set to `False`, two iterables are + considered same as long as they yield the elements that have same + structures. + + Raises: + ValueError: If the two structures do not have the same number of elements or + if the two structures are not nested in the same way. + TypeError: If the two structures differ in the type of sequence in any of + their substructures. Only possible if `check_types` is `True`. + """ + nest_util.assert_same_structure( + nest_util.Modality.DATA, nest1, nest2, check_types + ) + + +def pack_sequence_as(structure, flat_sequence): + """Returns a given flattened sequence packed into a nest. + + If `structure` is a scalar, `flat_sequence` must be a single-element list; + in this case the return value is `flat_sequence[0]`. + + Args: + structure: tuple or list constructed of scalars and/or other tuples/lists, + or a scalar. Note: numpy arrays are considered scalars. + flat_sequence: flat sequence to pack. + + Returns: + packed: `flat_sequence` converted to have the same recursive structure as + `structure`. + + Raises: + ValueError: If nest and structure have different element counts. + """ + return nest_util.pack_sequence_as( + nest_util.Modality.DATA, structure, flat_sequence, expand_composites=False + ) + + +def map_structure(func, *structure, **check_types_dict): + """Applies `func` to each entry in `structure` and returns a new structure. + + Applies `func(x[0], x[1], ...)` where x[i] is an entry in + `structure[i]`. All structures in `structure` must have the same arity, + and the return value will contain the results in the same structure. + + Args: + func: A callable that accepts as many arguments are there are structures. + *structure: scalar, or tuple or list of constructed scalars and/or other + tuples/lists, or scalars. Note: numpy arrays are considered scalars. + **check_types_dict: only valid keyword argument is `check_types`. If set to + `True` (default) the types of iterables within the structures have to be + same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError` + exception). To allow this set this argument to `False`. + + Returns: + A new structure with the same arity as `structure`, whose values correspond + to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding + location in `structure[i]`. If there are different sequence types and + `check_types` is `False` the sequence types of the first structure will be + used. + + Raises: + TypeError: If `func` is not callable or if the structures do not match + each other by depth tree. + ValueError: If no structure is provided or if the structures do not match + each other by type. + ValueError: If wrong keyword arguments are provided. + """ + return nest_util.map_structure( + nest_util.Modality.DATA, func, *structure, **check_types_dict + ) + + +def assert_shallow_structure(shallow_tree, input_tree, check_types=True): + """Asserts that `shallow_tree` is a shallow structure of `input_tree`. + + That is, this function tests if the `input_tree` structure can be created from + the `shallow_tree` structure by replacing its leaf nodes with deeper + tree structures. + + Examples: + + The following code will raise an exception: + ```python + shallow_tree = ["a", "b"] + input_tree = ["c", ["d", "e"], "f"] + assert_shallow_structure(shallow_tree, input_tree) + ``` + + The following code will not raise an exception: + ```python + shallow_tree = ["a", "b"] + input_tree = ["c", ["d", "e"]] + assert_shallow_structure(shallow_tree, input_tree) + ``` + + Args: + shallow_tree: an arbitrarily nested structure. + input_tree: an arbitrarily nested structure. + check_types: if `True` (default) the sequence types of `shallow_tree` and + `input_tree` have to be the same. + + Raises: + TypeError: If `shallow_tree` is a sequence but `input_tree` is not. + TypeError: If the sequence types of `shallow_tree` are different from + `input_tree`. Only raised if `check_types` is `True`. + ValueError: If the sequence lengths of `shallow_tree` are different from + `input_tree`. + """ + nest_util.assert_shallow_structure( + nest_util.Modality.DATA, shallow_tree, input_tree, check_types + ) + + +def flatten_up_to(shallow_tree, input_tree): + """Flattens `input_tree` up to `shallow_tree`. + + Any further depth in structure in `input_tree` is retained as elements in the + partially flatten output. + + If `shallow_tree` and `input_tree` are not sequences, this returns a + single-element list: `[input_tree]`. + + Use Case: + + Sometimes we may wish to partially flatten a nested sequence, retaining some + of the nested structure. We achieve this by specifying a shallow structure, + `shallow_tree`, we wish to flatten up to. + + The input, `input_tree`, can be thought of as having the same structure as + `shallow_tree`, but with leaf nodes that are themselves tree structures. + + Examples: + + ```python + input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]] + shallow_tree = [[True, True], [False, True]] + + flattened_input_tree = flatten_up_to(shallow_tree, input_tree) + flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree) + + # Output is: + # [[2, 2], [3, 3], [4, 9], [5, 5]] + # [True, True, False, True] + ``` + + ```python + input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]] + shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]] + + input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree) + input_tree_flattened = flatten(input_tree) + + # Output is: + # [('a', 1), ('b', 2), ('c', 3), ('d', 4)] + # ['a', 1, 'b', 2, 'c', 3, 'd', 4] + ``` + + Non-Sequence Edge Cases: + + ```python + flatten_up_to(0, 0) # Output: [0] + flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]] + flatten_up_to([0, 1, 2], 0) # Output: TypeError + flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2] + ``` + + Args: + shallow_tree: a possibly pruned structure of input_tree. + input_tree: an arbitrarily nested structure or a scalar object. + Note, numpy arrays are considered scalars. + + Returns: + A Python list, the partially flattened version of `input_tree` according to + the structure of `shallow_tree`. + + Raises: + TypeError: If `shallow_tree` is a sequence but `input_tree` is not. + TypeError: If the sequence types of `shallow_tree` are different from + `input_tree`. + ValueError: If the sequence lengths of `shallow_tree` are different from + `input_tree`. + """ + return nest_util.flatten_up_to( + nest_util.Modality.DATA, shallow_tree, input_tree + ) + + +def map_structure_up_to(shallow_tree, func, *inputs): + """Applies a function or op to a number of partially flattened inputs. + + The `inputs` are flattened up to `shallow_tree` before being mapped. + + Use Case: + + Sometimes we wish to apply a function to a partially flattened + sequence (for example when the function itself takes sequence inputs). We + achieve this by specifying a shallow structure, `shallow_tree` we wish to + flatten up to. + + The `inputs`, can be thought of as having the same structure as + `shallow_tree`, but with leaf nodes that are themselves tree structures. + + This function, therefore, will return something with the same base structure + as `shallow_tree`. + + Examples: + + ```python + ab_tuple = collections.namedtuple("ab_tuple", "a, b") + op_tuple = collections.namedtuple("op_tuple", "add, mul") + inp_val = ab_tuple(a=2, b=3) + inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3)) + out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul, + inp_val, inp_ops) + + # Output is: ab_tuple(a=6, b=15) + ``` + + ```python + data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]] + name_list = ['evens', ['odds', 'primes']] + out = map_structure_up_to( + name_list, + lambda name, sec: "first_{}_{}".format(len(sec), name), + name_list, data_list) + + # Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']] + ``` + + Args: + shallow_tree: a shallow tree, common to all the inputs. + func: callable which will be applied to each input individually. + *inputs: arbitrarily nested combination of objects that are compatible with + shallow_tree. The function `func` is applied to corresponding + partially flattened elements of each input, so the function must support + arity of `len(inputs)`. + + Raises: + TypeError: If `shallow_tree` is a sequence but `input_tree` is not. + TypeError: If the sequence types of `shallow_tree` are different from + `input_tree`. + ValueError: If the sequence lengths of `shallow_tree` are different from + `input_tree`. + + Returns: + result of repeatedly applying `func`, with same structure as + `shallow_tree`. + """ + return nest_util.map_structure_up_to( + nest_util.Modality.DATA, shallow_tree, func, *inputs + ) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/options.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/options.py new file mode 100644 index 0000000000000000000000000000000000000000..3ec1c53ff6508778f640885b403999cb7f0de224 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/options.py @@ -0,0 +1,173 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for tf.data options.""" + +import collections + +from absl import logging + + +def _internal_attr_name(name): + return "_" + name + + +class OptionsBase: + """Base class for representing a set of tf.data options. + + Attributes: + _options: Stores the option values. + """ + + def __init__(self): + # NOTE: Cannot use `self._options` here as we override `__setattr__` + object.__setattr__(self, "_options", {}) + object.__setattr__(self, "_mutable", True) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + for name in set(self._options) | set(other._options): # pylint: disable=protected-access + if getattr(self, name) != getattr(other, name): + return False + return True + + def __ne__(self, other): + if isinstance(other, self.__class__): + return not self.__eq__(other) + else: + return NotImplemented + + def __setattr__(self, name, value): + if not self._mutable: + raise ValueError("Mutating `tf.data.Options()` returned by " + "`tf.data.Dataset.options()` has no effect. Use " + "`tf.data.Dataset.with_options(options)` to set or " + "update dataset options.") + if hasattr(self, name): + object.__setattr__(self, name, value) + else: + raise AttributeError("Cannot set the property {} on {}.".format( + name, + type(self).__name__)) + + def _set_mutable(self, mutable): + """Change the mutability property to `mutable`.""" + object.__setattr__(self, "_mutable", mutable) + + def _to_proto(self): + """Convert options to protocol buffer.""" + raise NotImplementedError("{}._to_proto()".format(type(self).__name__)) + + def _from_proto(self, pb): + """Convert protocol buffer to options.""" + raise NotImplementedError("{}._from_proto()".format(type(self).__name__)) + + +# Creates a namedtuple with three keys for optimization graph rewrites settings. +def graph_rewrites(): + return collections.namedtuple("GraphRewrites", + ["enabled", "disabled", "default"]) + + +def create_option(name, ty, docstring, default_factory=lambda: None): + """Creates a type-checked property. + + Args: + name: The name to use. + ty: The type to use. The type of the property will be validated when it + is set. + docstring: The docstring to use. + default_factory: A callable that takes no arguments and returns a default + value to use if not set. + + Returns: + A type-checked property. + """ + + def get_fn(option): + # pylint: disable=protected-access + if name not in option._options: + option._options[name] = default_factory() + return option._options.get(name) + + def set_fn(option, value): + if not isinstance(value, ty): + raise TypeError( + "Property \"{}\" must be of type {}, got: {} (type: {})".format( + name, ty, value, type(value))) + option._options[name] = value # pylint: disable=protected-access + + return property(get_fn, set_fn, None, docstring) + + +def merge_options(*options_list): + """Merges the given options, returning the result as a new options object. + + The input arguments are expected to have a matching type that derives from + `tf.data.OptionsBase` (and thus each represent a set of options). The method + outputs an object of the same type created by merging the sets of options + represented by the input arguments. + + If an option is set to different values by different options objects, the + result will match the setting of the options object that appears in the input + list last. + + If an option is an instance of `tf.data.OptionsBase` itself, then this method + is applied recursively to the set of options represented by this option. + + Args: + *options_list: options to merge + + Raises: + TypeError: if the input arguments are incompatible or not derived from + `tf.data.OptionsBase` + + Returns: + A new options object which is the result of merging the given options. + """ + if len(options_list) < 1: + raise ValueError("At least one options should be provided") + result_type = type(options_list[0]) + + for options in options_list: + if not isinstance(options, result_type): + raise TypeError( + "Could not merge incompatible options of type {} and {}.".format( + type(options), result_type)) + + if not isinstance(options_list[0], OptionsBase): + raise TypeError( + "All options to be merged should inherit from `OptionsBase` but found " + "option of type {} which does not.".format(type(options_list[0]))) + + default_options = result_type() + result = result_type() + for options in options_list: + # Iterate over all set options and merge them into the result. + for name in options._options: # pylint: disable=protected-access + this = getattr(result, name) + that = getattr(options, name) + default = getattr(default_options, name) + if that == default: + continue + elif this == default: + setattr(result, name, that) + elif isinstance(this, OptionsBase): + setattr(result, name, merge_options(this, that)) + elif this != that: + logging.warning("Changing the value of option %s from %r to %r.", name, + this, that) + setattr(result, name, that) + return result diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/random_seed.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/random_seed.py new file mode 100644 index 0000000000000000000000000000000000000000..8910a8323627376659253582e2a39af00734e8d2 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/random_seed.py @@ -0,0 +1,54 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for generating Tensor-valued random seeds.""" + +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import random_seed +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops + + +def get_seed(seed): + """Returns the local seeds an operation should use given an op-specific seed. + + See `random_seed.get_seed` for more details. This wrapper adds support for + the case where `seed` may be a tensor. + + Args: + seed: An integer or a `tf.int64` scalar tensor. + + Returns: + A tuple of two `tf.int64` scalar tensors that should be used for the local + seed of the calling dataset. + """ + seed, seed2 = random_seed.get_seed(seed) + if seed is None: + seed = constant_op.constant(0, dtype=dtypes.int64, name="seed") + else: + seed = ops.convert_to_tensor(seed, dtype=dtypes.int64, name="seed") + if seed2 is None: + seed2 = constant_op.constant(0, dtype=dtypes.int64, name="seed2") + else: + with ops.name_scope("seed2") as scope: + seed2 = ops.convert_to_tensor(seed2, dtype=dtypes.int64) + seed2 = array_ops.where_v2( + math_ops.logical_and( + math_ops.equal(seed, 0), math_ops.equal(seed2, 0)), + constant_op.constant(2**31 - 1, dtype=dtypes.int64), + seed2, + name=scope) + return seed, seed2 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/sparse.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/sparse.py new file mode 100644 index 0000000000000000000000000000000000000000..1f1fc794b1ebaebab57f0e5c55007c3d47160ce3 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/sparse.py @@ -0,0 +1,148 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Python dataset sparse tensor utility functions.""" +from tensorflow.python.data.util import nest +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_shape +from tensorflow.python.ops import sparse_ops + + +def any_sparse(classes): + """Checks for sparse tensor. + + Args: + classes: a structure of objects that identify the dataset item classes + + Returns: + `True` if `classes` contains a sparse tensor type and `False` otherwise. + """ + return any(c is sparse_tensor.SparseTensor for c in nest.flatten(classes)) + + +def as_dense_shapes(shapes, classes): + """Converts sparse tensor shapes to their physical shapes. + + Args: + shapes: a structure of shapes to convert. + classes: a structure of objects that identify the dataset item classes + + Returns: + a structure matching the nested structure of `shapes`, containing + `tensor_shape.unknown_shape()` at positions where `classes` contains + `tf.sparse.SparseTensor` and matching contents of `shapes` otherwise + """ + ret = nest.pack_sequence_as(shapes, [ + tensor_shape.unknown_shape() if c is sparse_tensor.SparseTensor else shape + for shape, c in zip(nest.flatten(shapes), nest.flatten(classes)) + ]) + return ret + + +def as_dense_types(types, classes): + """Converts sparse tensor types to `dtypes.variant`. + + Args: + types: a structure of types to convert. + classes: a structure of objects that identify the dataset item classes + + Returns: + a structure matching the nested structure of `types`, containing + `dtypes.variant` at positions where `classes` contains + `tf.sparse.SparseTensor` and matching contents of `types` otherwise + """ + ret = nest.pack_sequence_as(types, [ + dtypes.variant if c is sparse_tensor.SparseTensor else ty + for ty, c in zip(nest.flatten(types), nest.flatten(classes)) + ]) + return ret + + +def deserialize_sparse_tensors(tensors, types, shapes, classes): + """Deserializes sparse tensors. + + Args: + tensors: a structure of tensors to deserialize. + types: a structure that holds information about types of `tensors` + shapes: a structure that holds information about shapes of `tensors` + classes: a structure of objects that identify the dataset item classes + + Returns: + `tensors` with any serialized sparse tensors replaced by their deserialized + version. + """ + ret = nest.pack_sequence_as(types, [ + sparse_ops.deserialize_sparse(tensor, dtype=ty, rank=shape.ndims) + if c is sparse_tensor.SparseTensor else tensor + for (tensor, ty, shape, c) in zip( + nest.flatten(tensors), nest.flatten(types), nest.flatten(shapes), + nest.flatten(classes)) + ]) + return ret + + +def get_classes(tensors): + """Gets classes for a structure of tensors. + + Args: + tensors: the tensor structure to get classes for. + + Returns: + a structure matching the nested structure of `tensors`, containing + `tf.sparse.SparseTensor` at positions where `tensors` contains a sparse + tensor and `tf.Tensor` otherwise. + """ + return nest.pack_sequence_as(tensors, [ + sparse_tensor.SparseTensor + if isinstance(tensor, sparse_tensor.SparseTensor) else tensor_lib.Tensor + for tensor in nest.flatten(tensors) + ]) + + +def serialize_many_sparse_tensors(tensors): + """Serializes many sparse tensors into a batch. + + Args: + tensors: a tensor structure to serialize. + + Returns: + `tensors` with any sparse tensors replaced by the serialized batch. + """ + + ret = nest.pack_sequence_as(tensors, [ + sparse_ops.serialize_many_sparse(tensor, out_type=dtypes.variant) + if sparse_tensor.is_sparse(tensor) else tensor + for tensor in nest.flatten(tensors) + ]) + return ret + + +def serialize_sparse_tensors(tensors): + """Serializes sparse tensors. + + Args: + tensors: a tensor structure to serialize. + + Returns: + `tensors` with any sparse tensors replaced by their serialized version. + """ + + ret = nest.pack_sequence_as(tensors, [ + sparse_ops.serialize_sparse(tensor, out_type=dtypes.variant) + if isinstance(tensor, sparse_tensor.SparseTensor) else tensor + for tensor in nest.flatten(tensors) + ]) + return ret diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/structure.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/structure.py new file mode 100644 index 0000000000000000000000000000000000000000..52fadac4faef798e84dd166ab257ad619950328d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/structure.py @@ -0,0 +1,521 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for describing the structure of a `tf.data` type.""" +import collections +import functools +import itertools + +import wrapt + +from tensorflow.python.data.util import nest +from tensorflow.python.framework import composite_tensor +from tensorflow.python.framework import none_tensor +from tensorflow.python.framework import ops +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.framework import tensor as tensor_lib +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import type_spec +from tensorflow.python.ops import resource_variable_ops +from tensorflow.python.ops import tensor_array_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.types import internal +from tensorflow.python.util import deprecation +from tensorflow.python.util.compat import collections_abc +from tensorflow.python.util.nest_util import CustomNestProtocol +from tensorflow.python.util.tf_export import tf_export + + +# pylint: disable=invalid-name +@tf_export(v1=["data.experimental.TensorStructure"]) +@deprecation.deprecated(None, "Use `tf.TensorSpec` instead.") +def _TensorStructure(dtype, shape): + return tensor_lib.TensorSpec(shape, dtype) + + +@tf_export(v1=["data.experimental.SparseTensorStructure"]) +@deprecation.deprecated(None, "Use `tf.SparseTensorSpec` instead.") +def _SparseTensorStructure(dtype, shape): + return sparse_tensor.SparseTensorSpec(shape, dtype) + + +@tf_export(v1=["data.experimental.TensorArrayStructure"]) +@deprecation.deprecated(None, "Use `tf.TensorArraySpec` instead.") +def _TensorArrayStructure(dtype, element_shape, dynamic_size, infer_shape): + return tensor_array_ops.TensorArraySpec(element_shape, dtype, + dynamic_size, infer_shape) + + +@tf_export(v1=["data.experimental.RaggedTensorStructure"]) +@deprecation.deprecated(None, "Use `tf.RaggedTensorSpec` instead.") +def _RaggedTensorStructure(dtype, shape, ragged_rank): + return ragged_tensor.RaggedTensorSpec(shape, dtype, ragged_rank) +# pylint: enable=invalid-name + + +# TODO(jsimsa): Remove the special-case for `TensorArray` pass-through once +# it is a subclass of `CompositeTensor`. +def normalize_element(element, element_signature=None): + """Normalizes a nested structure of element components. + + * Components matching `SparseTensorSpec` are converted to `SparseTensor`. + * Components matching `RaggedTensorSpec` are converted to `RaggedTensor`. + * Components matching `VariableSpec` are converted to `Tensor`. + * Components matching `DatasetSpec` or `TensorArraySpec` are passed through. + * `CompositeTensor` components are passed through. + * All other components are converted to `Tensor`. + + Args: + element: A nested structure of individual components. + element_signature: (Optional.) A nested structure of `tf.DType` objects + corresponding to each component of `element`. If specified, it will be + used to set the exact type of output tensor when converting input + components which are not tensors themselves (e.g. numpy arrays, native + python types, etc.) + + Returns: + A nested structure of `Tensor`, `Variable`, `Dataset`, `SparseTensor`, + `RaggedTensor`, or `TensorArray` objects. + """ + normalized_components = [] + if element_signature is None: + components = nest.flatten(element) + flattened_signature = [None] * len(components) + pack_as = element + else: + flattened_signature = nest.flatten(element_signature) + components = nest.flatten_up_to(element_signature, element) + pack_as = element_signature + with ops.name_scope("normalize_element"): + for i, (t, spec) in enumerate(zip(components, flattened_signature)): + try: + if spec is None: + spec = type_spec_from_value(t, use_fallback=False) + except TypeError: + # TypeError indicates it was not possible to compute a `TypeSpec` for + # the value. As a fallback try converting the value to a tensor. + normalized_components.append( + ops.convert_to_tensor(t, name="component_%d" % i)) + else: + # To avoid a circular dependency between dataset_ops and structure, + # we check the class name instead of using `isinstance`. + if spec.__class__.__name__ == "DatasetSpec": + normalized_components.append(t) + elif isinstance(spec, sparse_tensor.SparseTensorSpec): + normalized_components.append(sparse_tensor.SparseTensor.from_value(t)) + elif isinstance(spec, ragged_tensor.RaggedTensorSpec): + normalized_components.append( + ragged_tensor.convert_to_tensor_or_ragged_tensor( + t, name="component_%d" % i)) + elif isinstance(spec, (tensor_array_ops.TensorArraySpec)): + normalized_components.append(t) + elif isinstance(spec, none_tensor.NoneTensorSpec): + normalized_components.append(none_tensor.NoneTensor()) + elif isinstance(spec, resource_variable_ops.VariableSpec): + normalized_components.append( + ops.convert_to_tensor(t, name=f"component_{i}", dtype=spec.dtype)) + elif isinstance(t, composite_tensor.CompositeTensor): + normalized_components.append(t) + else: + dtype = getattr(spec, "dtype", None) + normalized_components.append( + ops.convert_to_tensor(t, name="component_%d" % i, dtype=dtype)) + return nest.pack_sequence_as(pack_as, normalized_components) + + +def convert_legacy_structure(output_types, output_shapes, output_classes): + """Returns a `Structure` that represents the given legacy structure. + + This method provides a way to convert from the existing `Dataset` and + `Iterator` structure-related properties to a `Structure` object. A "legacy" + structure is represented by the `tf.data.Dataset.output_types`, + `tf.data.Dataset.output_shapes`, and `tf.data.Dataset.output_classes` + properties. + + TODO(b/110122868): Remove this function once `Structure` is used throughout + `tf.data`. + + Args: + output_types: A nested structure of `tf.DType` objects corresponding to + each component of a structured value. + output_shapes: A nested structure of `tf.TensorShape` objects + corresponding to each component a structured value. + output_classes: A nested structure of Python `type` objects corresponding + to each component of a structured value. + + Returns: + A `Structure`. + + Raises: + TypeError: If a structure cannot be built from the arguments, because one of + the component classes in `output_classes` is not supported. + """ + flat_types = nest.flatten(output_types) + flat_shapes = nest.flatten(output_shapes) + flat_classes = nest.flatten(output_classes) + flat_ret = [] + for flat_type, flat_shape, flat_class in zip(flat_types, flat_shapes, + flat_classes): + if isinstance(flat_class, type_spec.TypeSpec): + flat_ret.append(flat_class) + elif issubclass(flat_class, sparse_tensor.SparseTensor): + flat_ret.append(sparse_tensor.SparseTensorSpec(flat_shape, flat_type)) + elif issubclass(flat_class, tensor_lib.Tensor): + flat_ret.append(tensor_lib.TensorSpec(flat_shape, flat_type)) + elif issubclass(flat_class, tensor_array_ops.TensorArray): + # We sneaked the dynamic_size and infer_shape into the legacy shape. + flat_ret.append( + tensor_array_ops.TensorArraySpec( + flat_shape[2:], flat_type, + dynamic_size=tensor_shape.dimension_value(flat_shape[0]), + infer_shape=tensor_shape.dimension_value(flat_shape[1]))) + else: + # NOTE(mrry): Since legacy structures produced by iterators only + # comprise Tensors, SparseTensors, and nests, we do not need to + # support all structure types here. + raise TypeError( + "Could not build a structure for output class {}. Make sure any " + "component class in `output_classes` inherits from one of the " + "following classes: `tf.TypeSpec`, `tf.sparse.SparseTensor`, " + "`tf.Tensor`, `tf.TensorArray`.".format(flat_class.__name__)) + + return nest.pack_sequence_as(output_classes, flat_ret) + + +def _from_tensor_list_helper(decode_fn, element_spec, tensor_list): + """Returns an element constructed from the given spec and tensor list. + + Args: + decode_fn: Method that constructs an element component from the element spec + component and a tensor list. + element_spec: A nested structure of `tf.TypeSpec` objects representing to + element type specification. + tensor_list: A list of tensors to use for constructing the value. + + Returns: + An element constructed from the given spec and tensor list. + + Raises: + ValueError: If the number of tensors needed to construct an element for + the given spec does not match the given number of tensors. + """ + + # pylint: disable=protected-access + + flat_specs = nest.flatten(element_spec) + flat_spec_lengths = [len(spec._flat_tensor_specs) for spec in flat_specs] + if sum(flat_spec_lengths) != len(tensor_list): + raise ValueError("Expected {} tensors but got {}.".format( + sum(flat_spec_lengths), len(tensor_list))) + + i = 0 + flat_ret = [] + for (component_spec, num_flat_values) in zip(flat_specs, flat_spec_lengths): + value = tensor_list[i:i + num_flat_values] + flat_ret.append(decode_fn(component_spec, value)) + i += num_flat_values + return nest.pack_sequence_as(element_spec, flat_ret) + + +def from_compatible_tensor_list(element_spec, tensor_list): + """Returns an element constructed from the given spec and tensor list. + + Args: + element_spec: A nested structure of `tf.TypeSpec` objects representing to + element type specification. + tensor_list: A list of tensors to use for constructing the value. + + Returns: + An element constructed from the given spec and tensor list. + + Raises: + ValueError: If the number of tensors needed to construct an element for + the given spec does not match the given number of tensors. + """ + + # pylint: disable=protected-access + # pylint: disable=g-long-lambda + return _from_tensor_list_helper( + lambda spec, value: spec._from_compatible_tensor_list(value), + element_spec, tensor_list) + + +def from_tensor_list(element_spec, tensor_list): + """Returns an element constructed from the given spec and tensor list. + + Args: + element_spec: A nested structure of `tf.TypeSpec` objects representing to + element type specification. + tensor_list: A list of tensors to use for constructing the value. + + Returns: + An element constructed from the given spec and tensor list. + + Raises: + ValueError: If the number of tensors needed to construct an element for + the given spec does not match the given number of tensors or the given + spec is not compatible with the tensor list. + """ + + # pylint: disable=protected-access + # pylint: disable=g-long-lambda + return _from_tensor_list_helper( + lambda spec, value: spec._from_tensor_list(value), element_spec, + tensor_list) + + +def get_flat_tensor_specs(element_spec): + """Returns a list `tf.TypeSpec`s for the element tensor representation. + + Args: + element_spec: A nested structure of `tf.TypeSpec` objects representing to + element type specification. + + Returns: + A list `tf.TypeSpec`s for the element tensor representation. + """ + + # pylint: disable=protected-access + return list( + itertools.chain.from_iterable( + spec._flat_tensor_specs for spec in nest.flatten(element_spec))) + + +def get_flat_tensor_shapes(element_spec): + """Returns a list `tf.TensorShapes`s for the element tensor representation. + + Args: + element_spec: A nested structure of `tf.TypeSpec` objects representing to + element type specification. + + Returns: + A list `tf.TensorShapes`s for the element tensor representation. + """ + return [spec.shape for spec in get_flat_tensor_specs(element_spec)] + + +def get_flat_tensor_types(element_spec): + """Returns a list `tf.DType`s for the element tensor representation. + + Args: + element_spec: A nested structure of `tf.TypeSpec` objects representing to + element type specification. + + Returns: + A list `tf.DType`s for the element tensor representation. + """ + return [spec.dtype for spec in get_flat_tensor_specs(element_spec)] + + +def _to_tensor_list_helper(encode_fn, element_spec, element): + """Returns a tensor list representation of the element. + + Args: + encode_fn: Method that constructs a tensor list representation from the + given element spec and element. + element_spec: A nested structure of `tf.TypeSpec` objects representing to + element type specification. + element: The element to convert to tensor list representation. + + Returns: + A tensor list representation of `element`. + + Raises: + ValueError: If `element_spec` and `element` do not have the same number of + elements or if the two structures are not nested in the same way. + TypeError: If `element_spec` and `element` differ in the type of sequence + in any of their substructures. + """ + + nest.assert_same_structure(element_spec, element) + + def reduce_fn(state, value): + spec, component = value + if isinstance(spec, internal.TensorSpec): + try: + component = ops.convert_to_tensor(component, spec.dtype) + except (TypeError, ValueError): + raise ValueError( + f"Value {component} is not convertible to a tensor with " + f"dtype {spec.dtype} and shape {spec.shape}." + ) + if not component.shape.is_compatible_with(spec.shape): + raise ValueError( + f"Value {component} is not convertible to a tensor with " + f"dtype {spec.dtype} and shape {spec.shape}." + ) + return encode_fn(state, spec, component) + + return functools.reduce( + reduce_fn, zip(nest.flatten(element_spec), nest.flatten(element)), []) + + +def to_batched_tensor_list(element_spec, element): + """Returns a tensor list representation of the element. + + Args: + element_spec: A nested structure of `tf.TypeSpec` objects representing to + element type specification. + element: The element to convert to tensor list representation. + + Returns: + A tensor list representation of `element`. + + Raises: + ValueError: If `element_spec` and `element` do not have the same number of + elements or if the two structures are not nested in the same way or the + rank of any of the tensors in the tensor list representation is 0. + TypeError: If `element_spec` and `element` differ in the type of sequence + in any of their substructures. + """ + + # pylint: disable=protected-access + # pylint: disable=g-long-lambda + return _to_tensor_list_helper( + lambda state, spec, component: state + spec._to_batched_tensor_list( + component), element_spec, element) + + +def to_tensor_list(element_spec, element): + """Returns a tensor list representation of the element. + + Args: + element_spec: A nested structure of `tf.TypeSpec` objects representing to + element type specification. + element: The element to convert to tensor list representation. + + Returns: + A tensor list representation of `element`. + + Raises: + ValueError: If `element_spec` and `element` do not have the same number of + elements or if the two structures are not nested in the same way. + TypeError: If `element_spec` and `element` differ in the type of sequence + in any of their substructures. + """ + + # pylint: disable=protected-access + # pylint: disable=g-long-lambda + return _to_tensor_list_helper( + lambda state, spec, component: state + spec._to_tensor_list(component), + element_spec, element) + + +def are_compatible(spec1, spec2): + """Indicates whether two type specifications are compatible. + + Two type specifications are compatible if they have the same nested structure + and the their individual components are pair-wise compatible. + + Args: + spec1: A `tf.TypeSpec` object to compare. + spec2: A `tf.TypeSpec` object to compare. + + Returns: + `True` if the two type specifications are compatible and `False` otherwise. + """ + + try: + nest.assert_same_structure(spec1, spec2) + except TypeError: + return False + except ValueError: + return False + + for s1, s2 in zip(nest.flatten(spec1), nest.flatten(spec2)): + if not s1.is_compatible_with(s2) or not s2.is_compatible_with(s1): + return False + return True + + +def type_spec_from_value(element, use_fallback=True): + """Creates a type specification for the given value. + + Args: + element: The element to create the type specification for. + use_fallback: Whether to fall back to converting the element to a tensor + in order to compute its `TypeSpec`. + + Returns: + A nested structure of `TypeSpec`s that represents the type specification + of `element`. + + Raises: + TypeError: If a `TypeSpec` cannot be built for `element`, because its type + is not supported. + """ + spec = type_spec._type_spec_from_value(element) # pylint: disable=protected-access + if spec is not None: + return spec + + if isinstance(element, collections_abc.Mapping): + # We create a shallow copy in an attempt to preserve the key order. + # + # Note that we do not guarantee that the key order is preserved, which is + # a limitation inherited from `copy()`. As a consequence, callers of + # `type_spec_from_value` should not assume that the key order of a `dict` + # in the returned nested structure matches the key order of the + # corresponding `dict` in the input value. + if isinstance(element, collections.defaultdict): + ctor = lambda items: type(element)(element.default_factory, items) + else: + ctor = type(element) + return ctor([(k, type_spec_from_value(v)) for k, v in element.items()]) + + if isinstance(element, tuple): + if hasattr(element, "_fields") and isinstance( + element._fields, collections_abc.Sequence) and all( + isinstance(f, str) for f in element._fields): + if isinstance(element, wrapt.ObjectProxy): + element_type = type(element.__wrapped__) + else: + element_type = type(element) + # `element` is a namedtuple + return element_type(*[type_spec_from_value(v) for v in element]) + # `element` is not a namedtuple + return tuple([type_spec_from_value(v) for v in element]) + + if hasattr(element.__class__, "__attrs_attrs__"): + # `element` is an `attr.s` decorated class + attrs = getattr(element.__class__, "__attrs_attrs__") + return type(element)(*[ + type_spec_from_value(getattr(element, a.name)) for a in attrs + ]) + + if isinstance(element, CustomNestProtocol): + # pylint: disable=protected-access + metadata, children = element.__tf_flatten__() + return element.__tf_unflatten__(metadata, type_spec_from_value(children)) + # pylint: enable=protected-access + + if use_fallback: + # As a fallback try converting the element to a tensor. + try: + tensor = ops.convert_to_tensor(element) + spec = type_spec_from_value(tensor) + if spec is not None: + return spec + except (ValueError, TypeError) as e: + logging.vlog( + 3, "Failed to convert %r to tensor: %s" % (type(element).__name__, e)) + + raise TypeError("Could not build a `TypeSpec` for {} with type {}".format( + element, + type(element).__name__)) + + +# TODO(b/149584798): remove legacy forwarding references +NoneTensor = none_tensor.NoneTensor +NoneTensorSpec = none_tensor.NoneTensorSpec diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/traverse.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/traverse.py new file mode 100644 index 0000000000000000000000000000000000000000..213a213a29675194dee53278b8e7b3137e61ce38 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/util/traverse.py @@ -0,0 +1,81 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Helpers to traverse the Dataset dependency structure.""" +import queue + +from tensorflow.python.framework import dtypes + + +OP_TYPES_ALLOWLIST = ["DummyIterationCounter"] +# We allowlist all ops that produce variant tensors as output. This is a bit +# of overkill but the other dataset _inputs() traversal strategies can't +# cover the case of function inputs that capture dataset variants. +TENSOR_TYPES_ALLOWLIST = [dtypes.variant] + + +def _traverse(dataset, op_filter_fn): + """Traverse a dataset graph, returning nodes matching `op_filter_fn`.""" + result = [] + bfs_q = queue.Queue() + bfs_q.put(dataset._variant_tensor.op) # pylint: disable=protected-access + visited = [] + while not bfs_q.empty(): + op = bfs_q.get() + visited.append(op) + if op_filter_fn(op): + result.append(op) + for i in op.inputs: + input_op = i.op + if input_op not in visited: + bfs_q.put(input_op) + return result + + +def obtain_capture_by_value_ops(dataset): + """Given an input dataset, finds all allowlisted ops used for construction. + + Allowlisted ops are stateful ops which are known to be safe to capture by + value. + + Args: + dataset: Dataset to find allowlisted stateful ops for. + + Returns: + A list of variant_tensor producing dataset ops used to construct this + dataset. + """ + + def capture_by_value(op): + return (op.outputs[0].dtype in TENSOR_TYPES_ALLOWLIST or + op.type in OP_TYPES_ALLOWLIST) + + return _traverse(dataset, capture_by_value) + + +def obtain_all_variant_tensor_ops(dataset): + """Given an input dataset, finds all dataset ops used for construction. + + A series of transformations would have created this dataset with each + transformation including zero or more Dataset ops, each producing a dataset + variant tensor. This method outputs all of them. + + Args: + dataset: Dataset to find variant tensors for. + + Returns: + A list of variant_tensor producing dataset ops used to construct this + dataset. + """ + return _traverse(dataset, lambda op: op.outputs[0].dtype == dtypes.variant) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/lib/core/_pywrap_py_func.so b/videochat2/lib/python3.10/site-packages/tensorflow/python/lib/core/_pywrap_py_func.so new file mode 100644 index 0000000000000000000000000000000000000000..6df0907615bfe0f1e7560bf69d990a1c8f5b0ca1 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/lib/core/_pywrap_py_func.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db566677dc07ac4a75c54bd336efec78246b1a01616b1118d21941dcf207e86a +size 137976 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/lib/io/_pywrap_record_io.so b/videochat2/lib/python3.10/site-packages/tensorflow/python/lib/io/_pywrap_record_io.so new file mode 100644 index 0000000000000000000000000000000000000000..65592829a34bb55e1312308ab1c51877bb4a25df --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/lib/io/_pywrap_record_io.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16b293a66e219425b659bafe663d126c83eff8ad34d8a56d06dc1447df9e9b6f +size 380672 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/metrics_impl.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/metrics_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67eaf9511db9e030c4be429ed9dc454f5578feca --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/metrics_impl.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86c72913417ccaa3786b029f310924600359b426ca8deea48a4e29f66bc78d83 +size 137125 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/__pycache__/pfor.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/__pycache__/pfor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..371b7575c515dc7b799b727d5dc4cf817e9d224f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/__pycache__/pfor.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e05a205a82c0f3c4e682a8514c6188e7a0658b3d8c4d11243afb14bb5c31374b +size 142202