diff --git a/.gitattributes b/.gitattributes index 841dca48e128949e243e3ec82d2b8550d5980741..2f6306070701cb6799b17356b3fb8844e0a5f70b 100644 --- a/.gitattributes +++ b/.gitattributes @@ -881,3 +881,4 @@ videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/sparse videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_training_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/nn_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_nn_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/llava_next/share/terminfo/m/mac b/llava_next/share/terminfo/m/mac new file mode 100644 index 0000000000000000000000000000000000000000..c2d5d2769afe2d2ea6140ab68a94e42bf71457a6 Binary files /dev/null and b/llava_next/share/terminfo/m/mac differ diff --git a/llava_next/share/terminfo/m/mach-color b/llava_next/share/terminfo/m/mach-color new file mode 100644 index 0000000000000000000000000000000000000000..aa795f0157ef05e2ad91c48c47eea3b956aca257 Binary files /dev/null and b/llava_next/share/terminfo/m/mach-color differ diff --git a/llava_next/share/terminfo/m/mai b/llava_next/share/terminfo/m/mai new file mode 100644 index 0000000000000000000000000000000000000000..2661ea9abc595c7159ff9c99d3b577d7aa54d743 Binary files /dev/null and b/llava_next/share/terminfo/m/mai differ diff --git a/llava_next/share/terminfo/m/microb b/llava_next/share/terminfo/m/microb new file mode 100644 index 0000000000000000000000000000000000000000..13025af0120ebc51dc4dac85eb37aa93ae67bd18 Binary files /dev/null and b/llava_next/share/terminfo/m/microb differ diff --git a/llava_next/share/terminfo/m/mime b/llava_next/share/terminfo/m/mime new file mode 100644 index 0000000000000000000000000000000000000000..53f5ccca3611b83b530b61254a044505db7fb361 Binary files /dev/null and b/llava_next/share/terminfo/m/mime differ diff --git a/llava_next/share/terminfo/m/mime-hb b/llava_next/share/terminfo/m/mime-hb new file mode 100644 index 0000000000000000000000000000000000000000..8f9c9f3832c01c9738bb3654db32ce329123cfa1 Binary files /dev/null and b/llava_next/share/terminfo/m/mime-hb differ diff --git a/llava_next/share/terminfo/m/mime2 b/llava_next/share/terminfo/m/mime2 new file mode 100644 index 0000000000000000000000000000000000000000..53f5ccca3611b83b530b61254a044505db7fb361 Binary files /dev/null and b/llava_next/share/terminfo/m/mime2 differ diff --git a/llava_next/share/terminfo/m/mime314 b/llava_next/share/terminfo/m/mime314 new file mode 100644 index 0000000000000000000000000000000000000000..cc6914c8e8d15bb46ae849d27b1e0444e3bbdace Binary files /dev/null and b/llava_next/share/terminfo/m/mime314 differ diff --git a/llava_next/share/terminfo/m/minitel1 b/llava_next/share/terminfo/m/minitel1 new file mode 100644 index 0000000000000000000000000000000000000000..9f9e10656abc8a75618b0d7ee04a61fa3f442fb3 Binary files /dev/null and b/llava_next/share/terminfo/m/minitel1 differ diff --git a/llava_next/share/terminfo/m/mintty b/llava_next/share/terminfo/m/mintty new file mode 100644 index 0000000000000000000000000000000000000000..c92b1a55d55400e254adc47a2978f46b9cf46a58 Binary files /dev/null and b/llava_next/share/terminfo/m/mintty differ diff --git a/llava_next/share/terminfo/m/mod24 b/llava_next/share/terminfo/m/mod24 new file mode 100644 index 0000000000000000000000000000000000000000..82943877827a843ffbf24d989b1c99bed58e65ce Binary files /dev/null and b/llava_next/share/terminfo/m/mod24 differ diff --git a/llava_next/share/terminfo/m/morphos b/llava_next/share/terminfo/m/morphos new file mode 100644 index 0000000000000000000000000000000000000000..1485b3bdb0c3f12a1691ad0680e4e6d406c166f3 Binary files /dev/null and b/llava_next/share/terminfo/m/morphos differ diff --git a/llava_next/share/terminfo/m/mosh-256color b/llava_next/share/terminfo/m/mosh-256color new file mode 100644 index 0000000000000000000000000000000000000000..ad31e9a0b8f95eb987a9a3eb68bc54ddca749452 Binary files /dev/null and b/llava_next/share/terminfo/m/mosh-256color differ diff --git a/llava_next/share/terminfo/m/mrxvt b/llava_next/share/terminfo/m/mrxvt new file mode 100644 index 0000000000000000000000000000000000000000..52b7a42be8a9646ab15e1eac8d40839d6eb1bd8c Binary files /dev/null and b/llava_next/share/terminfo/m/mrxvt differ diff --git a/llava_next/share/terminfo/m/ms-vt100 b/llava_next/share/terminfo/m/ms-vt100 new file mode 100644 index 0000000000000000000000000000000000000000..7c53b065a57135b69afcf4958d8d8dffe2aa1020 Binary files /dev/null and b/llava_next/share/terminfo/m/ms-vt100 differ diff --git a/llava_next/share/terminfo/m/ms-vt100-color b/llava_next/share/terminfo/m/ms-vt100-color new file mode 100644 index 0000000000000000000000000000000000000000..da376774b3debfa6f01c52aacc54f110795e4b2a Binary files /dev/null and b/llava_next/share/terminfo/m/ms-vt100-color differ diff --git a/llava_next/share/terminfo/m/msk227am b/llava_next/share/terminfo/m/msk227am new file mode 100644 index 0000000000000000000000000000000000000000..d7ade8c0eb7da1549f9dd2902fc9ae7d1f5fdaa1 Binary files /dev/null and b/llava_next/share/terminfo/m/msk227am differ diff --git a/llava_next/share/terminfo/m/mskermit227 b/llava_next/share/terminfo/m/mskermit227 new file mode 100644 index 0000000000000000000000000000000000000000..c0d5706a8b986276656b86233a11d052258021ea Binary files /dev/null and b/llava_next/share/terminfo/m/mskermit227 differ diff --git a/llava_next/share/terminfo/m/mskermit227am b/llava_next/share/terminfo/m/mskermit227am new file mode 100644 index 0000000000000000000000000000000000000000..d7ade8c0eb7da1549f9dd2902fc9ae7d1f5fdaa1 Binary files /dev/null and b/llava_next/share/terminfo/m/mskermit227am differ diff --git a/llava_next/share/terminfo/m/mt70 b/llava_next/share/terminfo/m/mt70 new file mode 100644 index 0000000000000000000000000000000000000000..ffe744ee93e72001003a90eb556ed1533114c09e Binary files /dev/null and b/llava_next/share/terminfo/m/mt70 differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/compat/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/compat/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68f11fd71402e6703fde227d48d9c9b85ea47ed5 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/compat/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/compat/__pycache__/compat.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/compat/__pycache__/compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..310fe55e7e0b9e936e9654abe3d0313b3d276c30 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/compat/__pycache__/compat.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..25602d92e590a38fc40060d6f017185209d4c0b3 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/__init__.py @@ -0,0 +1,33 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""`tf.data.Dataset` API for input pipelines. + +See [Importing Data](https://tensorflow.org/guide/data) for an overview. +""" + +# pylint: disable=unused-import +from tensorflow.python.data import experimental +from tensorflow.python.data.ops.dataset_ops import AUTOTUNE +from tensorflow.python.data.ops.dataset_ops import Dataset +from tensorflow.python.data.ops.dataset_ops import INFINITE as INFINITE_CARDINALITY +from tensorflow.python.data.ops.dataset_ops import make_initializable_iterator +from tensorflow.python.data.ops.dataset_ops import make_one_shot_iterator +from tensorflow.python.data.ops.dataset_ops import UNKNOWN as UNKNOWN_CARDINALITY +from tensorflow.python.data.ops.iterator_ops import Iterator +from tensorflow.python.data.ops.options import Options +from tensorflow.python.data.ops.readers import FixedLengthRecordDataset +from tensorflow.python.data.ops.readers import TextLineDataset +from tensorflow.python.data.ops.readers import TFRecordDataset +# pylint: enable=unused-import diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eaaca860a318e5bd1b78fe9fb253f76d425944d7 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/__init__.py @@ -0,0 +1,172 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Experimental API for building input pipelines. + +This module contains experimental `Dataset` sources and transformations that can +be used in conjunction with the `tf.data.Dataset` API. Note that the +`tf.data.experimental` API is not subject to the same backwards compatibility +guarantees as `tf.data`, but we will provide deprecation advice in advance of +removing existing functionality. + +See [Importing Data](https://tensorflow.org/guide/datasets) for an overview. + +@@AutoShardPolicy +@@AutotuneAlgorithm +@@AutotuneOptions +@@Counter +@@CsvDataset +@@DatasetInitializer +@@DatasetStructure +@@DistributeOptions +@@ExternalStatePolicy +@@OptimizationOptions +@@Optional +@@OptionalStructure +@@RaggedTensorStructure +@@RandomDataset +@@Reducer +@@SparseTensorStructure +@@SqlDataset +@@Structure +@@TFRecordWriter +@@TensorArrayStructure +@@TensorStructure +@@ThreadingOptions + +@@assert_cardinality +@@at +@@bucket_by_sequence_length +@@cardinality +@@choose_from_datasets +@@copy_to_device +@@dense_to_ragged_batch +@@dense_to_sparse_batch +@@distribute +@@enable_debug_mode +@@enumerate_dataset +@@from_list +@@from_variant +@@get_next_as_optional +@@get_single_element +@@get_structure +@@group_by_reducer +@@group_by_window +@@ignore_errors +@@index_table_from_dataset +@@load +@@make_batched_features_dataset +@@make_csv_dataset +@@make_saveable_from_iterator +@@map_and_batch +@@map_and_batch_with_legacy_function +@@pad_to_cardinality +@@parallel_interleave +@@parse_example_dataset +@@prefetch_to_device +@@rejection_resample +@@sample_from_datasets +@@save +@@scan +@@shuffle_and_repeat +@@snapshot +@@table_from_dataset +@@take_while +@@to_variant +@@unbatch +@@unique + +@@AUTOTUNE +@@INFINITE_CARDINALITY +@@SHARD_HINT +@@UNKNOWN_CARDINALITY +""" + +# pylint: disable=unused-import +from tensorflow.python.data.experimental import service +from tensorflow.python.data.experimental.ops.batching import dense_to_ragged_batch +from tensorflow.python.data.experimental.ops.batching import dense_to_sparse_batch +from tensorflow.python.data.experimental.ops.batching import map_and_batch +from tensorflow.python.data.experimental.ops.batching import map_and_batch_with_legacy_function +from tensorflow.python.data.experimental.ops.batching import unbatch +from tensorflow.python.data.experimental.ops.cardinality import assert_cardinality +from tensorflow.python.data.experimental.ops.cardinality import cardinality +from tensorflow.python.data.experimental.ops.cardinality import INFINITE as INFINITE_CARDINALITY +from tensorflow.python.data.experimental.ops.cardinality import UNKNOWN as UNKNOWN_CARDINALITY +from tensorflow.python.data.experimental.ops.counter import Counter +from tensorflow.python.data.experimental.ops.distribute import SHARD_HINT +from tensorflow.python.data.experimental.ops.enumerate_ops import enumerate_dataset +from tensorflow.python.data.experimental.ops.error_ops import ignore_errors +from tensorflow.python.data.experimental.ops.from_list import from_list +from tensorflow.python.data.experimental.ops.get_single_element import get_single_element +from tensorflow.python.data.experimental.ops.grouping import bucket_by_sequence_length +from tensorflow.python.data.experimental.ops.grouping import group_by_reducer +from tensorflow.python.data.experimental.ops.grouping import group_by_window +from tensorflow.python.data.experimental.ops.grouping import Reducer +from tensorflow.python.data.experimental.ops.interleave_ops import choose_from_datasets +from tensorflow.python.data.experimental.ops.interleave_ops import parallel_interleave +from tensorflow.python.data.experimental.ops.interleave_ops import sample_from_datasets +from tensorflow.python.data.experimental.ops.io import load +from tensorflow.python.data.experimental.ops.io import save +from tensorflow.python.data.experimental.ops.iterator_ops import make_saveable_from_iterator +from tensorflow.python.data.experimental.ops.lookup_ops import DatasetInitializer +from tensorflow.python.data.experimental.ops.lookup_ops import index_table_from_dataset +from tensorflow.python.data.experimental.ops.lookup_ops import table_from_dataset +from tensorflow.python.data.experimental.ops.pad_to_cardinality import pad_to_cardinality +from tensorflow.python.data.experimental.ops.parsing_ops import parse_example_dataset +from tensorflow.python.data.experimental.ops.prefetching_ops import copy_to_device +from tensorflow.python.data.experimental.ops.prefetching_ops import prefetch_to_device +from tensorflow.python.data.experimental.ops.random_access import at +from tensorflow.python.data.experimental.ops.random_ops import RandomDataset +from tensorflow.python.data.experimental.ops.readers import CsvDataset +from tensorflow.python.data.experimental.ops.readers import make_batched_features_dataset +from tensorflow.python.data.experimental.ops.readers import make_csv_dataset +from tensorflow.python.data.experimental.ops.readers import SqlDataset +from tensorflow.python.data.experimental.ops.resampling import rejection_resample +from tensorflow.python.data.experimental.ops.scan_ops import scan +from tensorflow.python.data.experimental.ops.shuffle_ops import shuffle_and_repeat +from tensorflow.python.data.experimental.ops.snapshot import snapshot +from tensorflow.python.data.experimental.ops.take_while_ops import take_while +from tensorflow.python.data.experimental.ops.unique import unique +from tensorflow.python.data.experimental.ops.writers import TFRecordWriter +from tensorflow.python.data.ops.dataset_ops import AUTOTUNE +from tensorflow.python.data.ops.dataset_ops import DatasetSpec as DatasetStructure +from tensorflow.python.data.ops.dataset_ops import from_variant +from tensorflow.python.data.ops.dataset_ops import get_structure +from tensorflow.python.data.ops.dataset_ops import to_variant +from tensorflow.python.data.ops.debug_mode import enable_debug_mode +from tensorflow.python.data.ops.iterator_ops import get_next_as_optional +from tensorflow.python.data.ops.optional_ops import Optional +from tensorflow.python.data.ops.optional_ops import OptionalSpec as OptionalStructure +from tensorflow.python.data.ops.options import AutoShardPolicy +from tensorflow.python.data.ops.options import AutotuneAlgorithm +from tensorflow.python.data.ops.options import AutotuneOptions +from tensorflow.python.data.ops.options import DistributeOptions +from tensorflow.python.data.ops.options import ExternalStatePolicy +from tensorflow.python.data.ops.options import OptimizationOptions +from tensorflow.python.data.ops.options import ThreadingOptions +from tensorflow.python.data.util.structure import _RaggedTensorStructure as RaggedTensorStructure +from tensorflow.python.data.util.structure import _SparseTensorStructure as SparseTensorStructure +from tensorflow.python.data.util.structure import _TensorArrayStructure as TensorArrayStructure +from tensorflow.python.data.util.structure import _TensorStructure as TensorStructure +from tensorflow.python.framework.type_spec import TypeSpec as Structure +# pylint: enable=unused-import + +from tensorflow.python.util.all_util import remove_undocumented + +_allowed_symbols = [ + "service", +] + +remove_undocumented(__name__, _allowed_symbols) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b19976fc1dd8e58c6779ea5bf4965628ac07c75 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13f051055bca99ad5929ef4d6c0b3f198d5c1ed9 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/__pycache__/multi_process_cluster.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/__pycache__/multi_process_cluster.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b47751c04ce960af073bf67cd68053e8679dc8f6 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/__pycache__/multi_process_cluster.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/__pycache__/test_base.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/__pycache__/test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8617c9674b90324cf8623e71c55d9faa0ad064b5 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/__pycache__/test_base.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/multi_process_cluster.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/multi_process_cluster.py new file mode 100644 index 0000000000000000000000000000000000000000..e70586188b50dbca6cbd1a411fe02df5d3eca404 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/multi_process_cluster.py @@ -0,0 +1,165 @@ +# Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""tf.data service test-cluster with local and remote workers.""" + +import tempfile + +from tensorflow.core.protobuf import data_service_pb2 +from tensorflow.core.protobuf import service_config_pb2 +from tensorflow.python.data.experimental.kernel_tests.service import test_base as data_service_test_base +from tensorflow.python.data.experimental.service import server_lib +from tensorflow.python.distribute import multi_process_lib +from tensorflow.python.framework import test_util +from tensorflow.python.platform import googletest + +_WORKER_SHUTDOWN_QUIET_PERIOD_MS = 100 + + +# pylint: disable=protected-access +class _RemoteWorkerProcess(multi_process_lib.Process): + """Runs a worker server in a new process to simulate a remote worker.""" + + def __init__(self, dispatcher_address, port, worker_tags, pipe_writer): + super(_RemoteWorkerProcess, self).__init__() + self._dispatcher_address = dispatcher_address + self._port = port + self._worker_tags = worker_tags + self._pipe_writer = pipe_writer + + def run(self): + self.start_worker() + + def start_worker(self): + self._worker = data_service_test_base.TestWorker( + self._dispatcher_address, + _WORKER_SHUTDOWN_QUIET_PERIOD_MS, + port=self._port, + worker_tags=self._worker_tags) + self._worker.start() + self._pipe_writer.send(self._worker.worker_address()) + self._worker.join() + + +class MultiProcessCluster: + """tf.data service cluster with local and remote workers. + + Represents a cluster with a dispatcher, `num_local_workers` local workers, and + `num_remote_workers` remote workers. Remote workers run in separate processes. + This is useful to test reading from local in-process workers. For example: + + ``` + cluster = multi_process_cluster.MultiProcessCluster( + num_local_workers=1, num_remote_workers=3) + num_elements = 10 + dataset = self.make_distributed_range_dataset( + num_elements, cluster, target_workers="LOCAL") + self.assertDatasetProduces(dataset, list(range(num_elements))) + ``` + """ + + def __init__(self, + num_local_workers, + num_remote_workers, + worker_tags=None, + worker_addresses=None, + deployment_mode=data_service_pb2.DEPLOYMENT_MODE_COLOCATED): + self._work_dir = tempfile.mkdtemp(dir=googletest.GetTempDir()) + self._deployment_mode = deployment_mode + self._start_dispatcher(worker_addresses) + self._start_local_workers(num_local_workers, worker_tags) + self._start_remote_workers(num_remote_workers, worker_tags) + + def _start_dispatcher(self, worker_addresses, port=0): + if port == 0: + port = test_util.pick_unused_port() + self._dispatcher = server_lib.DispatchServer( + service_config_pb2.DispatcherConfig( + port=port, + protocol="grpc", + work_dir=self._work_dir, + fault_tolerant_mode=True, + worker_addresses=worker_addresses, + deployment_mode=self._deployment_mode), + start=True) + + def _start_local_workers(self, num_workers, worker_tags=None): + self._local_workers = [] + for _ in range(num_workers): + self.start_local_worker(worker_tags) + + def _start_remote_workers(self, num_workers, worker_tags=None): + # List of (worker address, remote worker process) tuples. + self._remote_workers = [] + for _ in range(num_workers): + self.start_remote_worker(worker_tags) + + def start_local_worker(self, worker_tags=None): + worker = data_service_test_base.TestWorker( + self.dispatcher_address(), + _WORKER_SHUTDOWN_QUIET_PERIOD_MS, + port=test_util.pick_unused_port(), + worker_tags=worker_tags) + worker.start() + self._local_workers.append(worker) + + def start_remote_worker(self, worker_tags=None): + """Runs a tf.data service worker in a remote process.""" + + pipe_reader, pipe_writer = multi_process_lib.multiprocessing.Pipe( + duplex=False) + worker_process = _RemoteWorkerProcess( + self.dispatcher_address(), + port=test_util.pick_unused_port(), + worker_tags=worker_tags, + pipe_writer=pipe_writer) + worker_process.start() + worker_address = pipe_reader.recv() + self._remote_workers.append((worker_address, worker_process)) + + def restart_dispatcher(self): + port = int(self.dispatcher_address().split(":")[1]) + self._dispatcher._stop() + self._start_dispatcher( + worker_addresses=(self.local_worker_addresses() + + self.remote_worker_addresses()), + port=port) + + def restart_local_workers(self): + for worker in self._local_workers: + worker.restart() + + def dispatcher_address(self): + return self._dispatcher._address + + def local_worker_addresses(self): + return [worker.worker_address() for worker in self._local_workers] + + def remote_worker_addresses(self): + return [worker_address for (worker_address, _) in self._remote_workers] + + def _stop(self): + for worker in self._local_workers: + worker.stop() + for (_, worker_process) in self._remote_workers: + worker_process.kill() + self._dispatcher._stop() + + def __del__(self): + self._stop() + + +def test_main(): + """Main function to be called within `__main__` of a test file.""" + multi_process_lib.test_main() diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/test_base.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..a05b864d90f00c69760a7f012f536382f9ce593e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/test_base.py @@ -0,0 +1,456 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test base for tf.data service tests.""" + +import os +import shutil +import tempfile + +from tensorflow.core.protobuf import service_config_pb2 +from tensorflow.python.data.experimental.ops import data_service_ops +from tensorflow.python.data.experimental.service import server_lib +from tensorflow.python.data.kernel_tests import test_base +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.framework import combinations +from tensorflow.python.framework import dtypes +from tensorflow.python.ops import math_ops +from tensorflow.python.platform import googletest + +# This will be resolved to a tmp directory by `start_dispatch_server`. +TMP_WORK_DIR = "tmp_work_dir_placeholder" +# `""` indicates not to use a work directory. +NO_WORK_DIR = "" +# We use a faster than normal heartbeat interval so that tests run faster. +TEST_HEARTBEAT_INTERVAL_MS = 100 +TEST_DISPATCHER_TIMEOUT_MS = 5000 +TEST_WORKER_TIMEOUT_MS = 200 +TEST_JOB_GC_CHECK_INTERNAL_MS = 1000 +TEST_SNAPSHOT_MAX_CHUNK_SIZE_BYTES = 16 << 10 # 16 KB +PROTOCOL = "grpc" + + +def all_cluster_configurations(): + with_work_dir = combinations.combine( + work_dir=TMP_WORK_DIR, fault_tolerant_mode=[True, False]) + without_work_dir = combinations.combine( + work_dir=NO_WORK_DIR, fault_tolerant_mode=False) + return with_work_dir + without_work_dir + + +def _make_worker( + dispatcher_address, + protocol, + data_transfer_protocol, + shutdown_quiet_period_ms=0, + port=0, + worker_tags=None, + cross_trainer_cache_size_bytes=None, + snapshot_max_chunk_size_bytes=TEST_SNAPSHOT_MAX_CHUNK_SIZE_BYTES, +): + """Creates a worker server.""" + defaults = server_lib.WorkerConfig(dispatcher_address=dispatcher_address) + config_proto = service_config_pb2.WorkerConfig( + dispatcher_address=dispatcher_address, + worker_address=defaults.worker_address, + port=port, + protocol=protocol, + worker_tags=worker_tags, + heartbeat_interval_ms=TEST_HEARTBEAT_INTERVAL_MS, + dispatcher_timeout_ms=TEST_DISPATCHER_TIMEOUT_MS, + data_transfer_protocol=data_transfer_protocol, + data_transfer_address=defaults.worker_address, + shutdown_quiet_period_ms=shutdown_quiet_period_ms, + cross_trainer_cache_size_bytes=cross_trainer_cache_size_bytes, + snapshot_max_chunk_size_bytes=snapshot_max_chunk_size_bytes, + ) + return server_lib.WorkerServer(config_proto, start=False) + + +# pylint: disable=protected-access +class TestWorker: + """A tf.data service worker.""" + + def __init__( + self, + dispatcher_address, + shutdown_quiet_period_ms, + protocol=PROTOCOL, + data_transfer_protocol=None, + port=0, + worker_tags=None, + cross_trainer_cache_size_bytes=None, + snapshot_max_chunk_size_bytes=TEST_SNAPSHOT_MAX_CHUNK_SIZE_BYTES, + ): + self._dispatcher_address = dispatcher_address + self._shutdown_quiet_period_ms = shutdown_quiet_period_ms + self._server = _make_worker( + dispatcher_address, + protocol, + data_transfer_protocol, + shutdown_quiet_period_ms, + port=port, + worker_tags=worker_tags, + cross_trainer_cache_size_bytes=cross_trainer_cache_size_bytes, + snapshot_max_chunk_size_bytes=snapshot_max_chunk_size_bytes, + ) + self._running = False + self._protocol = protocol + self._data_transfer_protocol = data_transfer_protocol + + def stop(self): + self._server._stop() + self._running = False + + def start(self): + self._server.start() + self._port = int(self._server._address.split(":")[1]) + self._running = True + + def restart(self, use_same_port=True): + """Restarts the worker, stopping it first if it is already running.""" + if self._running: + self.stop() + port = 0 + if use_same_port: + port = self._port + self._server = _make_worker(self._dispatcher_address, + self._protocol, + self._data_transfer_protocol, + self._shutdown_quiet_period_ms, port) + self._server.start() + self._port = int(self._server._address.split(":")[1]) + self._running = True + + def join(self): + self._server.join() + + def num_tasks(self): + return self._server._num_tasks() + + def snapshot_task_progresses(self): + return self._server._snapshot_task_progresses() + + def worker_address(self): + return self._server._address + + +class TestCluster: + """Test tf.data service cluster.""" + + def __init__( + self, + num_workers, + dispatcher_port=0, + work_dir=TMP_WORK_DIR, + fault_tolerant_mode=True, + job_gc_check_interval_ms=TEST_JOB_GC_CHECK_INTERNAL_MS, + job_gc_timeout_ms=None, + worker_timeout_ms=TEST_WORKER_TIMEOUT_MS, + worker_shutdown_quiet_period_ms=0, + snapshot_max_chunk_size_bytes=TEST_SNAPSHOT_MAX_CHUNK_SIZE_BYTES, + worker_max_concurrent_snapshots=0, + start=True, + protocol=PROTOCOL, + data_transfer_protocol=None, + ): + """Creates a tf.data service test cluster. + + Args: + num_workers: The number of workers to initially add to the cluster. + dispatcher_port: The port to use for the dispatcher. + work_dir: The work directory to use for the dispatcher. If set to + `TMP_WORK_DIR`, the cluster will create a new temporary directory to use + as the work directory. If set to `NO_WORK_DIR`, no work directory will + be used. + fault_tolerant_mode: Whether the dispatcher should write its state to a + journal so that it can recover from restarts. + job_gc_check_interval_ms: How often the dispatcher should scan through to + delete old and unused jobs, in milliseconds. + job_gc_timeout_ms: How long a job needs to be unused before it becomes a + candidate for garbage collection, in milliseconds. + worker_timeout_ms: How long to wait for a worker to heartbeat before + considering it missing, in milliseconds. + worker_shutdown_quiet_period_ms: When shutting down a worker, how long to + wait for the gRPC server to process the final requests. + snapshot_max_chunk_size_bytes: The maximum size of a distributed snapshot + chunk file. + worker_max_concurrent_snapshots: The maximum number of snapshots a worker + can concurrently process. + start: Whether to immediately start the servers in the cluster. If + `False`, the servers can be started later by calling + `start_dispatcher()` and `start_workers()`. + protocol: The protocol to use for communicating with the tf.data service, + e.g. "grpc". + data_transfer_protocol: (Optional.) The protocol to use for transferring + data with the tf.data service. + """ + if work_dir == TMP_WORK_DIR: + work_dir = tempfile.mkdtemp(dir=googletest.GetTempDir()) + self._worker_shutdown_quiet_period_ms = worker_shutdown_quiet_period_ms + self._snapshot_max_chunk_size_bytes = snapshot_max_chunk_size_bytes + self._protocol = protocol + self._data_transfer_protocol = data_transfer_protocol + self._job_gc_check_interval_ms = job_gc_check_interval_ms + self._job_gc_timeout_ms = job_gc_timeout_ms + self._worker_timeout_ms = worker_timeout_ms + self._worker_max_concurrent_snapshots = worker_max_concurrent_snapshots + self.dispatcher = server_lib.DispatchServer( + server_lib.DispatcherConfig( + port=dispatcher_port, + work_dir=work_dir, + protocol=protocol, + fault_tolerant_mode=fault_tolerant_mode, + job_gc_check_interval_ms=job_gc_check_interval_ms, + job_gc_timeout_ms=job_gc_timeout_ms, + worker_timeout_ms=worker_timeout_ms, + worker_max_concurrent_snapshots=worker_max_concurrent_snapshots, + ), + start=start, + ) + + self.workers = [] + for _ in range(num_workers): + self.add_worker(start=start) + + def dispatcher_address(self): + return self.dispatcher.target.split("://")[1] + + def add_worker(self, start=True): + worker = TestWorker( + self.dispatcher_address(), + self._worker_shutdown_quiet_period_ms, + self._protocol, + self._data_transfer_protocol, + snapshot_max_chunk_size_bytes=self._snapshot_max_chunk_size_bytes, + ) + if start: + worker.start() + self.workers.append(worker) + + def start_dispatcher(self): + self.dispatcher.start() + + def start_workers(self): + for worker in self.workers: + worker.start() + + def stop_dispatcher(self): + # pylint: disable=protected-access + self.dispatcher._stop() + + def restart_worker(self, index): + self.workers[index].restart() + + def stop_worker(self, index): + self.workers[index].stop() + + def stop_workers(self): + for worker in self.workers: + worker.stop() + + # pylint: disable=protected-access + def restart_dispatcher(self): + """Stops `dispatcher` and creates a new dispatcher with the same port. + + Restarting is supported only when the dispatcher is configured with + `fault_tolerant_mode=True`. + """ + if not self.dispatcher._config.fault_tolerant_mode: + raise ValueError( + "Trying to restart the dispatcher without fault-tolerance.") + port = int(self.dispatcher_address().split(":")[1]) + self.dispatcher._stop() + self.dispatcher = server_lib.DispatchServer( + server_lib.DispatcherConfig( + port=port, + work_dir=self.dispatcher._config.work_dir, + protocol=self._protocol, + fault_tolerant_mode=self.dispatcher._config.fault_tolerant_mode, + job_gc_check_interval_ms=self._job_gc_check_interval_ms, + job_gc_timeout_ms=self._job_gc_timeout_ms, + worker_timeout_ms=self._worker_timeout_ms, + worker_max_concurrent_snapshots= + self._worker_max_concurrent_snapshots, + ) + ) + + def num_registered_workers(self): + return self.dispatcher._num_workers() + + def num_tasks_on_workers(self): + return sum(worker.num_tasks() for worker in self.workers) + + def snapshot_streams(self, path): + return self.dispatcher._snapshot_streams(path) + + def __del__(self): + # Destroy workers before the dispatcher for clean shutdown. + self.workers.clear() + del self.dispatcher + + +class TestBase(test_base.DatasetTestBase): + """Base class for tf.data service tests.""" + + def setUp(self): + self.default_data_transfer_protocol = None + self.default_compression = "AUTO" + + def set_default_data_transfer_protocol(self, protocol): + self.default_data_transfer_protocol = protocol + + def set_default_compression(self, compression): + self.default_compression = compression + + def make_test_cluster(self, *args, **kwargs): + if "data_transfer_protocol" not in kwargs: + kwargs["data_transfer_protocol"] = self.default_data_transfer_protocol + return TestCluster(*args, **kwargs) + + def make_distributed_dataset(self, + dataset, + cluster, + processing_mode="parallel_epochs", + **kwargs): + kwargs["task_refresh_interval_hint_ms"] = 20 + if "data_transfer_protocol" not in kwargs: + kwargs["data_transfer_protocol"] = self.default_data_transfer_protocol + if "compression" not in kwargs: + kwargs["compression"] = self.default_compression + + # pylint: disable=protected-access + return dataset.apply( + data_service_ops._distribute( + processing_mode, + cluster.dispatcher_address(), + **kwargs)) + + def make_distributed_range_dataset(self, + num_elements, + cluster, + **kwargs): + dataset = dataset_ops.Dataset.range(num_elements) + return self.make_distributed_dataset(dataset, cluster, **kwargs) + + def make_coordinated_read_dataset( + self, + cluster, + num_consumers, + sharding_policy=data_service_ops.ShardingPolicy.OFF): + """Creates a dataset that performs coordinated reads. + + The dataset simulates `num_consumers` consumers by using parallel + interleave to read with `num_consumers` threads, one for each consumer. The + nth element of the dataset is produced by consumer `n % num_consumers`. + + The dataset executed on each worker will produce groups of `num_consumers` + sequentially increasing numbers. For example, if `num_consumers=3` a worker + dataset could produce [0, 1, 2, 9, 10, 11, 21, 22, 23]. This enables + `checkCoordinatedReadGroups` below to assess whether the values received in + each step came from the same group. + + Args: + cluster: A tf.data service `TestCluster`. + num_consumers: The number of consumers to simulate. + sharding_policy: The sharding policy to use. Currently only OFF and + DYNAMIC are supported. + + Returns: + A dataset that simulates reading with `num_consumers` consumers. + """ + if sharding_policy not in [ + data_service_ops.ShardingPolicy.OFF, + data_service_ops.ShardingPolicy.DYNAMIC + ]: + raise ValueError(f"Unsupported sharding policy: {sharding_policy}") + # Start from 0 so that we can detect when a new worker is added with + # ShardingPolicy.OFF. + ds = dataset_ops.Dataset.from_tensors(math_ops.cast(0, dtypes.int64)) + ds = ds.concatenate(dataset_ops.Dataset.random()) + # Ensure that all elements in the same group are consecutive. + def make_group(x): + # Avoid overflowing an int64 in (x+1)*num_consumers below. + x = x % (2**32) + return dataset_ops.Dataset.range(x*num_consumers, (x+1)*num_consumers) + ds = ds.flat_map(make_group) + consumers = [] + for consumer_index in range(num_consumers): + consumers.append( + self.make_distributed_dataset( + ds, + cluster, + job_name="test", + processing_mode=sharding_policy, + consumer_index=consumer_index, + num_consumers=num_consumers)) + # Use parallel interleave to read from consumers in parallel. + ds = dataset_ops.Dataset.from_tensor_slices(consumers) + ds = ds.interleave( + lambda x: x, + cycle_length=num_consumers, + num_parallel_calls=num_consumers) + return ds + + def checkCoordinatedReadGroups(self, results, num_consumers): + """Validates results from a `make_coordinted_read_dataset` dataset. + + Each group of `num_consumers` results should be consecutive, indicating that + they were produced by the same worker. + + Args: + results: The elements produced by the dataset. + num_consumers: The number of consumers. + """ + groups = [ + results[start:start + num_consumers] + for start in range(0, len(results), num_consumers) + ] + incorrect_groups = [] + for group in groups: + # Check that each group of `num_consumers` results are consecutive. + for offset in range(1, len(group)): + if group[0] + offset != group[offset]: + incorrect_groups.append(group) + break + self.assertEmpty( + incorrect_groups, + "Incorrect groups: {}.\nAll groups: {}".format(incorrect_groups, + groups)) + + def read(self, get_next, results, count): + for _ in range(count): + results.append(self.evaluate(get_next())) + + +class TempDir: + """Temporary directory for unit testing.""" + + def __init__(self): + temp_dir = tempfile.mkdtemp(dir=googletest.GetTempDir()) + self._path = os.path.join( + tempfile.mkdtemp(dir=temp_dir), "tf_data_snapshot") + + @property + def full_path(self) -> str: + return self._path + + def __fspath__(self) -> str: + return self._path + + def __del__(self): + try: + shutil.rmtree(self.full_path) + except FileNotFoundError: + pass diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/batching.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/batching.py new file mode 100644 index 0000000000000000000000000000000000000000..9b07a6b4471e0092f477e52920a3365a909a228e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/batching.py @@ -0,0 +1,379 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Batching dataset transformations.""" +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.data.ops import structured_function +from tensorflow.python.data.util import convert +from tensorflow.python.data.util import nest +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +@tf_export("data.experimental.dense_to_ragged_batch") +@deprecation.deprecated(None, "Use `tf.data.Dataset.ragged_batch` instead.") +def dense_to_ragged_batch(batch_size, + drop_remainder=False, + row_splits_dtype=dtypes.int64): + """A transformation that batches ragged elements into `tf.RaggedTensor`s. + + This transformation combines multiple consecutive elements of the input + dataset into a single element. + + Like `tf.data.Dataset.batch`, the components of the resulting element will + have an additional outer dimension, which will be `batch_size` (or + `N % batch_size` for the last element if `batch_size` does not divide the + number of input elements `N` evenly and `drop_remainder` is `False`). If + your program depends on the batches having the same outer dimension, you + should set the `drop_remainder` argument to `True` to prevent the smaller + batch from being produced. + + Unlike `tf.data.Dataset.batch`, the input elements to be batched may have + different shapes: + + * If an input element is a `tf.Tensor` whose static `tf.TensorShape` is + fully defined, then it is batched as normal. + * If an input element is a `tf.Tensor` whose static `tf.TensorShape` contains + one or more axes with unknown size (i.e., `shape[i]=None`), then the output + will contain a `tf.RaggedTensor` that is ragged up to any of such + dimensions. + * If an input element is a `tf.RaggedTensor` or any other type, then it is + batched as normal. + + Example: + + >>> dataset = tf.data.Dataset.from_tensor_slices(np.arange(6)) + >>> dataset = dataset.map(lambda x: tf.range(x)) + >>> dataset.element_spec.shape + TensorShape([None]) + >>> dataset = dataset.apply( + ... tf.data.experimental.dense_to_ragged_batch(batch_size=2)) + >>> for batch in dataset: + ... print(batch) + + + + + Args: + batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of + consecutive elements of this dataset to combine in a single batch. + drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing + whether the last batch should be dropped in the case it has fewer than + `batch_size` elements; the default behavior is not to drop the smaller + batch. + row_splits_dtype: The dtype that should be used for the `row_splits` of any + new ragged tensors. Existing `tf.RaggedTensor` elements do not have their + row_splits dtype changed. + + Returns: + Dataset: A `Dataset`. + """ + def _apply_fn(dataset): + return dataset.ragged_batch(batch_size, drop_remainder, row_splits_dtype) + + return _apply_fn + + +@tf_export("data.experimental.dense_to_sparse_batch") +@deprecation.deprecated(None, "Use `tf.data.Dataset.sparse_batch` instead.") +def dense_to_sparse_batch(batch_size, row_shape): + """A transformation that batches ragged elements into `tf.sparse.SparseTensor`s. + + Like `Dataset.padded_batch()`, this transformation combines multiple + consecutive elements of the dataset, which might have different + shapes, into a single element. The resulting element has three + components (`indices`, `values`, and `dense_shape`), which + comprise a `tf.sparse.SparseTensor` that represents the same data. The + `row_shape` represents the dense shape of each row in the + resulting `tf.sparse.SparseTensor`, to which the effective batch size is + prepended. For example: + + ```python + # NOTE: The following examples use `{ ... }` to represent the + # contents of a dataset. + a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] } + + a.apply(tf.data.experimental.dense_to_sparse_batch( + batch_size=2, row_shape=[6])) == + { + ([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1]], # indices + ['a', 'b', 'c', 'a', 'b'], # values + [2, 6]), # dense_shape + ([[0, 0], [0, 1], [0, 2], [0, 3]], + ['a', 'b', 'c', 'd'], + [1, 6]) + } + ``` + + Args: + batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of + consecutive elements of this dataset to combine in a single batch. + row_shape: A `tf.TensorShape` or `tf.int64` vector tensor-like object + representing the equivalent dense shape of a row in the resulting + `tf.sparse.SparseTensor`. Each element of this dataset must have the same + rank as `row_shape`, and must have size less than or equal to `row_shape` + in each dimension. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + """ + + def _apply_fn(dataset): + return dataset.sparse_batch(batch_size, row_shape) + + return _apply_fn + + +@deprecation.deprecated(None, "Use `tf.data.experimental.map_and_batch()") +@tf_export(v1=["data.experimental.map_and_batch_with_legacy_function"]) +def map_and_batch_with_legacy_function(map_func, + batch_size, + num_parallel_batches=None, + drop_remainder=False, + num_parallel_calls=None): + """Fused implementation of `map` and `batch`. + + NOTE: This is an escape hatch for existing uses of `map_and_batch` that do not + work with V2 functions. New uses are strongly discouraged and existing uses + should migrate to `map_and_batch` as this method will not be removed in V2. + + Args: + map_func: A function mapping a nested structure of tensors to another + nested structure of tensors. + batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of + consecutive elements of this dataset to combine in a single batch. + num_parallel_batches: (Optional.) A `tf.int64` scalar `tf.Tensor`, + representing the number of batches to create in parallel. On one hand, + higher values can help mitigate the effect of stragglers. On the other + hand, higher values can increase contention if CPU is scarce. + drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing + whether the last batch should be dropped in case its size is smaller than + desired; the default behavior is not to drop the smaller batch. + num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`, + representing the number of elements to process in parallel. If not + specified, `batch_size * num_parallel_batches` elements will be processed + in parallel. If the value `tf.data.AUTOTUNE` is used, then + the number of parallel calls is set dynamically based on available CPU. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + + Raises: + ValueError: If both `num_parallel_batches` and `num_parallel_calls` are + specified. + """ + + if num_parallel_batches is None and num_parallel_calls is None: + num_parallel_calls = batch_size + elif num_parallel_batches is not None and num_parallel_calls is None: + num_parallel_calls = batch_size * num_parallel_batches + elif num_parallel_batches is not None and num_parallel_calls is not None: + raise ValueError( + "`map_and_batch_with_legacy_function` allows only one of " + "`num_parallel_batches` and " + "`num_parallel_calls` to be set, but " + f"`num_parallel_batches` was set to {num_parallel_batches} " + f"and `num_parallel_calls` as set to {num_parallel_calls}.") + + def _apply_fn(dataset): + return _MapAndBatchDataset(dataset, map_func, batch_size, + num_parallel_calls, drop_remainder, + use_legacy_function=True) + + return _apply_fn + + +@deprecation.deprecated( + None, + "Use `tf.data.Dataset.map(map_func, num_parallel_calls)` followed by " + "`tf.data.Dataset.batch(batch_size, drop_remainder)`. Static tf.data " + "optimizations will take care of using the fused implementation.") +@tf_export("data.experimental.map_and_batch") +def map_and_batch(map_func, + batch_size, + num_parallel_batches=None, + drop_remainder=False, + num_parallel_calls=None): + """Fused implementation of `map` and `batch`. + + Maps `map_func` across `batch_size` consecutive elements of this dataset + and then combines them into a batch. Functionally, it is equivalent to `map` + followed by `batch`. This API is temporary and deprecated since input pipeline + optimization now fuses consecutive `map` and `batch` operations automatically. + + Args: + map_func: A function mapping a nested structure of tensors to another + nested structure of tensors. + batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of + consecutive elements of this dataset to combine in a single batch. + num_parallel_batches: (Optional.) A `tf.int64` scalar `tf.Tensor`, + representing the number of batches to create in parallel. On one hand, + higher values can help mitigate the effect of stragglers. On the other + hand, higher values can increase contention if CPU is scarce. + drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing + whether the last batch should be dropped in case its size is smaller than + desired; the default behavior is not to drop the smaller batch. + num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`, + representing the number of elements to process in parallel. If not + specified, `batch_size * num_parallel_batches` elements will be processed + in parallel. If the value `tf.data.AUTOTUNE` is used, then + the number of parallel calls is set dynamically based on available CPU. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + + Raises: + ValueError: If both `num_parallel_batches` and `num_parallel_calls` are + specified. + """ + + if num_parallel_batches is None and num_parallel_calls is None: + num_parallel_calls = batch_size + elif num_parallel_batches is not None and num_parallel_calls is None: + num_parallel_calls = batch_size * num_parallel_batches + elif num_parallel_batches is not None and num_parallel_calls is not None: + raise ValueError( + "`map_and_batch` allows only one of `num_parallel_batches` and " + "`num_parallel_calls` to be set, but " + f"`num_parallel_batches` was set to {num_parallel_batches} " + f"and `num_parallel_calls` as set to {num_parallel_calls}.") + + def _apply_fn(dataset): + return _MapAndBatchDataset(dataset, map_func, batch_size, + num_parallel_calls, drop_remainder) + + return _apply_fn + + +@deprecation.deprecated(None, "Use `tf.data.Dataset.unbatch()`.") +@tf_export("data.experimental.unbatch") +def unbatch(): + """Splits elements of a dataset into multiple elements on the batch dimension. + + For example, if elements of the dataset are shaped `[B, a0, a1, ...]`, + where `B` may vary for each input element, then for each element in the + dataset, the unbatched dataset will contain `B` consecutive elements + of shape `[a0, a1, ...]`. + + ```python + # NOTE: The following example uses `{ ... }` to represent the contents + # of a dataset. + a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] } + + a.unbatch() == { + 'a', 'b', 'c', 'a', 'b', 'a', 'b', 'c', 'd'} + ``` + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + """ + + def _apply_fn(dataset): + return dataset.unbatch() + + return _apply_fn + + +class _DenseToSparseBatchDataset(dataset_ops.UnaryDataset): + """A `Dataset` that batches ragged dense elements into `tf.sparse.SparseTensor`s.""" + + def __init__(self, input_dataset, batch_size, row_shape): + """See `Dataset.dense_to_sparse_batch()` for more details.""" + if not isinstance( + dataset_ops.get_legacy_output_types(input_dataset), dtypes.DType): + raise TypeError("`dense_to_sparse_batch` requires an input dataset whose " + "elements have a single component, but the given dataset " + "has the following component types: " + f"{dataset_ops.get_legacy_output_types(input_dataset)}.") + self._input_dataset = input_dataset + self._batch_size = batch_size + self._row_shape = row_shape + self._element_spec = sparse_tensor.SparseTensorSpec( + tensor_shape.TensorShape([None]).concatenate(self._row_shape), + dataset_ops.get_legacy_output_types(input_dataset)) + + variant_tensor = ged_ops.dense_to_sparse_batch_dataset( + self._input_dataset._variant_tensor, # pylint: disable=protected-access + self._batch_size, + row_shape=convert.partial_shape_to_tensor(self._row_shape), + **self._flat_structure) + super(_DenseToSparseBatchDataset, self).__init__(input_dataset, + variant_tensor) + + @property + def element_spec(self): + return self._element_spec + + +class _MapAndBatchDataset(dataset_ops.UnaryDataset): + """A `Dataset` that maps a function over a batch of elements.""" + + def __init__(self, input_dataset, map_func, batch_size, num_parallel_calls, + drop_remainder, use_legacy_function=False): + self._input_dataset = input_dataset + + self._map_func = structured_function.StructuredFunctionWrapper( + map_func, + "tf.data.experimental.map_and_batch()", + dataset=input_dataset, + use_legacy_function=use_legacy_function) + self._batch_size_t = ops.convert_to_tensor( + batch_size, dtype=dtypes.int64, name="batch_size") + self._num_parallel_calls_t = ops.convert_to_tensor( + num_parallel_calls, dtype=dtypes.int64, name="num_parallel_calls") + self._drop_remainder_t = ops.convert_to_tensor( + drop_remainder, dtype=dtypes.bool, name="drop_remainder") + + constant_drop_remainder = tensor_util.constant_value(self._drop_remainder_t) + # pylint: disable=protected-access + if constant_drop_remainder: + # NOTE(mrry): `constant_drop_remainder` may be `None` (unknown statically) + # or `False` (explicitly retaining the remainder). + # pylint: disable=g-long-lambda + self._element_spec = nest.map_structure( + lambda component_spec: component_spec._batch( + tensor_util.constant_value(self._batch_size_t)), + self._map_func.output_structure) + else: + self._element_spec = nest.map_structure( + lambda component_spec: component_spec._batch(None), + self._map_func.output_structure) + # pylint: enable=protected-access + variant_tensor = ged_ops.map_and_batch_dataset( + self._input_dataset._variant_tensor, # pylint: disable=protected-access + self._map_func.function.captured_inputs, + f=self._map_func.function, + batch_size=self._batch_size_t, + num_parallel_calls=self._num_parallel_calls_t, + drop_remainder=self._drop_remainder_t, + preserve_cardinality=True, + **self._flat_structure) + super(_MapAndBatchDataset, self).__init__(input_dataset, variant_tensor) + + def _functions(self): + return [self._map_func] + + @property + def element_spec(self): + return self._element_spec diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/cardinality.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/cardinality.py new file mode 100644 index 0000000000000000000000000000000000000000..6525e565f368627d8673ee859ac7ee04adc9563c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/cardinality.py @@ -0,0 +1,113 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Cardinality analysis of `Dataset` objects.""" +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import gen_dataset_ops +from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops +from tensorflow.python.util.tf_export import tf_export + + +INFINITE = -1 +UNKNOWN = -2 +tf_export("data.experimental.INFINITE_CARDINALITY").export_constant( + __name__, "INFINITE") +tf_export("data.experimental.UNKNOWN_CARDINALITY").export_constant( + __name__, "UNKNOWN") + + +# TODO(b/157691652): Deprecate this method after migrating users to the new API. +@tf_export("data.experimental.cardinality") +def cardinality(dataset): + """Returns the cardinality of `dataset`, if known. + + The operation returns the cardinality of `dataset`. The operation may return + `tf.data.experimental.INFINITE_CARDINALITY` if `dataset` contains an infinite + number of elements or `tf.data.experimental.UNKNOWN_CARDINALITY` if the + analysis fails to determine the number of elements in `dataset` (e.g. when the + dataset source is a file). + + >>> dataset = tf.data.Dataset.range(42) + >>> print(tf.data.experimental.cardinality(dataset).numpy()) + 42 + >>> dataset = dataset.repeat() + >>> cardinality = tf.data.experimental.cardinality(dataset) + >>> print((cardinality == tf.data.experimental.INFINITE_CARDINALITY).numpy()) + True + >>> dataset = dataset.filter(lambda x: True) + >>> cardinality = tf.data.experimental.cardinality(dataset) + >>> print((cardinality == tf.data.experimental.UNKNOWN_CARDINALITY).numpy()) + True + + Args: + dataset: A `tf.data.Dataset` for which to determine cardinality. + + Returns: + A scalar `tf.int64` `Tensor` representing the cardinality of `dataset`. If + the cardinality is infinite or unknown, the operation returns the named + constant `INFINITE_CARDINALITY` and `UNKNOWN_CARDINALITY` respectively. + """ + + return gen_dataset_ops.dataset_cardinality(dataset._variant_tensor) # pylint: disable=protected-access + + +@tf_export("data.experimental.assert_cardinality") +def assert_cardinality(expected_cardinality): + """Asserts the cardinality of the input dataset. + + NOTE: The following assumes that "examples.tfrecord" contains 42 records. + + >>> dataset = tf.data.TFRecordDataset("examples.tfrecord") + >>> cardinality = tf.data.experimental.cardinality(dataset) + >>> print((cardinality == tf.data.experimental.UNKNOWN_CARDINALITY).numpy()) + True + >>> dataset = dataset.apply(tf.data.experimental.assert_cardinality(42)) + >>> print(tf.data.experimental.cardinality(dataset).numpy()) + 42 + + Args: + expected_cardinality: The expected cardinality of the input dataset. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + + Raises: + FailedPreconditionError: The assertion is checked at runtime (when iterating + the dataset) and an error is raised if the actual and expected cardinality + differ. + """ + def _apply_fn(dataset): + return _AssertCardinalityDataset(dataset, expected_cardinality) + + return _apply_fn + + +class _AssertCardinalityDataset(dataset_ops.UnaryUnchangedStructureDataset): + """A `Dataset` that assert the cardinality of its input.""" + + def __init__(self, input_dataset, expected_cardinality): + self._input_dataset = input_dataset + self._expected_cardinality = ops.convert_to_tensor( + expected_cardinality, dtype=dtypes.int64, name="expected_cardinality") + + # pylint: enable=protected-access + variant_tensor = ged_ops.assert_cardinality_dataset( + self._input_dataset._variant_tensor, # pylint: disable=protected-access + self._expected_cardinality, + **self._flat_structure) + super(_AssertCardinalityDataset, self).__init__(input_dataset, + variant_tensor) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/compression_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/compression_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..67213a0729638892bb652eb55ca9a4bb09d39bbc --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/compression_ops.py @@ -0,0 +1,51 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Ops for compressing and uncompressing dataset elements.""" +from tensorflow.python.data.util import structure +from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops + + +def compress(element): + """Compress a dataset element. + + Args: + element: A nested structure of types supported by Tensorflow. + + Returns: + A variant tensor representing the compressed element. This variant can be + passed to `uncompress` to get back the original element. + """ + element_spec = structure.type_spec_from_value(element) + tensor_list = structure.to_tensor_list(element_spec, element) + return ged_ops.compress_element(tensor_list) + + +def uncompress(element, output_spec): + """Uncompress a compressed dataset element. + + Args: + element: A scalar variant tensor to uncompress. The element should have been + created by calling `compress`. + output_spec: A nested structure of `tf.TypeSpec` representing the type(s) of + the uncompressed element. + + Returns: + The uncompressed element. + """ + flat_types = structure.get_flat_tensor_types(output_spec) + flat_shapes = structure.get_flat_tensor_shapes(output_spec) + tensor_list = ged_ops.uncompress_element( + element, output_types=flat_types, output_shapes=flat_shapes) + return structure.from_tensor_list(output_spec, tensor_list) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/counter.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/counter.py new file mode 100644 index 0000000000000000000000000000000000000000..e9dc2b49a0ea0d4e4ce6d576ee2731aace5dd0ff --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/counter.py @@ -0,0 +1,84 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The Counter Dataset.""" +from tensorflow.python import tf2 +from tensorflow.python.compat import v2_compat +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.framework import dtypes +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +@tf_export("data.experimental.Counter", v1=[]) +@deprecation.deprecated(None, "Use `tf.data.Dataset.counter(...)` instead.") +def CounterV2(start=0, step=1, dtype=dtypes.int64): + """Creates a `Dataset` that counts from `start` in steps of size `step`. + + Unlike `tf.data.Dataset.range` which will stop at some ending number, + `Counter` will produce elements indefinitely. + + >>> dataset = tf.data.experimental.Counter().take(5) + >>> list(dataset.as_numpy_iterator()) + [0, 1, 2, 3, 4] + >>> dataset.element_spec + TensorSpec(shape=(), dtype=tf.int64, name=None) + >>> dataset = tf.data.experimental.Counter(dtype=tf.int32) + >>> dataset.element_spec + TensorSpec(shape=(), dtype=tf.int32, name=None) + >>> dataset = tf.data.experimental.Counter(start=2).take(5) + >>> list(dataset.as_numpy_iterator()) + [2, 3, 4, 5, 6] + >>> dataset = tf.data.experimental.Counter(start=2, step=5).take(5) + >>> list(dataset.as_numpy_iterator()) + [2, 7, 12, 17, 22] + >>> dataset = tf.data.experimental.Counter(start=10, step=-1).take(5) + >>> list(dataset.as_numpy_iterator()) + [10, 9, 8, 7, 6] + + Args: + start: (Optional.) The starting value for the counter. Defaults to 0. + step: (Optional.) The step size for the counter. Defaults to 1. + dtype: (Optional.) The data type for counter elements. Defaults to + `tf.int64`. + + Returns: + A `Dataset` of scalar `dtype` elements. + """ + return dataset_ops.Dataset.counter(start, step, dtype) + + +@tf_export(v1=["data.experimental.Counter"]) +@deprecation.deprecated(None, "Use `tf.data.Dataset.counter(...)` instead.") +def CounterV1(start=0, step=1, dtype=dtypes.int64): + return dataset_ops.DatasetV1Adapter(CounterV2(start, step, dtype)) + + +CounterV1.__doc__ = CounterV2.__doc__ + +if tf2.enabled(): + Counter = CounterV2 +else: + Counter = CounterV1 + + +def _tf2_callback(): # pylint: disable=invalid-name + global Counter + if tf2.enabled(): + Counter = CounterV2 + else: + Counter = CounterV1 + + +v2_compat.register_data_v2_callback(_tf2_callback) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/distribute.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/distribute.py new file mode 100644 index 0000000000000000000000000000000000000000..1eb3672d5dc2c11190777846f31bf97d726076ad --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/distribute.py @@ -0,0 +1,399 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Distribution Strategy-related dataset transformations.""" + +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.data.ops.options import ExternalStatePolicy +from tensorflow.python.data.util import nest +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import tensor_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops +from tensorflow.python.types import data as data_types +from tensorflow.python.util.tf_export import tf_export + +SHARD_HINT = -1 +tf_export("data.experimental.SHARD_HINT").export_constant( + __name__, "SHARD_HINT") + + +class _AutoShardDataset(dataset_ops.UnaryDataset): + """A `Dataset` that shards the `Dataset` automatically. + + This dataset takes in an existing dataset and tries to automatically figure + out how to shard the dataset in a multi-worker scenario using graph rewrites. + + If the AutoShardPolicy is set to FILE, it walks up the dataset graph until + it finds a reader dataset, then inserts a ShardDataset op before that node + so that each worker only sees some files. + + If the AutoShardPolicy is set to DATA, it inserts a ShardDataset op at the + end of the input pipeline, before any terminal PrefetchDataset if there is + one. Additionally, if there is a RebatchDatasetV2 in the input pipeline, it + is written to legacy RebatchDataset for correctness reasons, since + RebatchDatasetV2 is incompatible with data sharding. + + If the AutoShardPolicy is set to AUTO, it tries to do file-based sharding. + If it cannot find a reader dataset, it falls back to doing data-based + sharding. + + If the AutoShardPolicy is set to OFF, it does nothing. + + Attributes: + num_workers: Total number of workers to shard this dataset across. + index: The current worker index (out of the total number of workers) this + dataset is for. + num_replicas: The total number of replicas across all workers. This is used + only when sharding by data (either DATA or AUTO) in order to rewrite + RebatchDatasetV2 to RebatchDataset. + + Raises: + NotFoundError: If we cannot find a suitable reader dataset to begin + automatically sharding the dataset. + """ + + def __init__(self, input_dataset, num_workers, index, num_replicas=None): + self._input_dataset = input_dataset + + self._element_spec = input_dataset.element_spec + variant_tensor = ged_ops.auto_shard_dataset( + self._input_dataset._variant_tensor, # pylint: disable=protected-access + num_workers=num_workers, + index=index, + auto_shard_policy=int( + input_dataset.options().experimental_distribute.auto_shard_policy), + num_replicas=num_replicas, + **self._flat_structure) + super(_AutoShardDataset, self).__init__(input_dataset, variant_tensor) + + @property + def element_spec(self): + return self._element_spec + + +def _AutoShardDatasetV1(input_dataset, num_workers, index, num_replicas=None): # pylint: disable=invalid-name + return dataset_ops.DatasetV1Adapter( + _AutoShardDataset(input_dataset, num_workers, index, num_replicas)) + + +class _LegacyRebatchDataset(dataset_ops.UnaryDataset): + """A `Dataset` that divides its input batches into `num_replicas` sub-batches. + + For each batch in the input dataset, _LegacyRebatchDataset will produce + `num_replicas` smaller batches whose sizes add up to the original batch size. + + For example: + + ```python + ds = tf.data.Dataset.range(8) + ds = ds.batch(4) + ds = _LegacyRebatchDataset(ds, num_replicas=3) + for elem in ds: + print(elem) + >> [0, 1], [2, 3], [], [4, 5], [6, 7], [] + ``` + """ + + def __init__(self, input_dataset, num_replicas): + """Creates a _LegacyRebatchDataset. + + Args: + input_dataset: `Dataset` to rebatch. + num_replicas: A `tf.int64` scalar, representing the number of sub-batches + to split each batch from `input_dataset` into. + """ + + def recalculate_batch_size(type_spec): + """Recalculates the output_shape after dividing it by num_replicas.""" + output_shape = type_spec._to_legacy_output_shapes() # pylint: disable=protected-access + if not isinstance(output_shape, tensor_shape.TensorShape): + return None + + # If the output shape is unknown, we set the batch dimension to unknown. + if output_shape.rank is None: + return None + + if len(output_shape) < 1: + raise ValueError( + "Invalid `input_dataset`. Expected a dataset whose elements " + "have rank >= 1 but found a dataset whose elements are scalars. " + "Fix the issue by adding the `batch` transformation to the " + "dataset.") + output_dims = [d.value for d in output_shape.dims] + + if output_dims[0] is not None and output_dims[0] % num_replicas == 0: + return output_dims[0] // num_replicas + + # Set the batch dimension to unknown. If the global batch size does not + # divide num_replicas evenly, the minibatches may have different sizes. + return None + + def rebatch(type_spec): + # pylint: disable=protected-access + batch_size = recalculate_batch_size(type_spec) + return type_spec._unbatch()._batch(batch_size) + # pylint: enable=protected-access + + self._element_spec = nest.map_structure( + rebatch, dataset_ops.get_structure(input_dataset)) + + # auto_shard rewrite assumes that there's normalize_to_dense before + # rebatch_dataset. + # LINT.IfChange + input_dataset = dataset_ops.normalize_to_dense(input_dataset) + variant_tensor = ged_ops.rebatch_dataset( + input_dataset._variant_tensor, # pylint: disable=protected-access + num_replicas=num_replicas, + **self._flat_structure) + # LINT.ThenChange(//tensorflow/core/grappler/optimizers/data/auto_shard.cc) + super(_LegacyRebatchDataset, self).__init__(input_dataset, variant_tensor) + + @property + def element_spec(self): + return self._element_spec + + +class _RemoteDataset(dataset_ops.DatasetSource): + """Creates a dataset on a given `device` given a graph def.""" + + def __init__(self, graph_def, device, element_spec): + self._elem_spec = element_spec + with ops.device(device): + variant_tensor = ged_ops.dataset_from_graph(graph_def) + super(_RemoteDataset, self).__init__(variant_tensor) + + @property + def element_spec(self): + return self._elem_spec + + +def replicate(dataset, devices): + """A transformation that replicates `dataset` onto a list of devices. + + Args: + dataset: A `tf.data.Dataset` object. + devices: A list of devices to replicate the dataset on. + + Returns: + A dictionary mapping device name to a dataset on that device. + """ + if not isinstance(dataset, data_types.DatasetV2): + raise TypeError( + f"Invalid `dataset`. Expected a `tf.data.Dataset` object but " + f"got {type(dataset)}.") + + # pylint: disable=protected-access + dataset_device = dataset._variant_tensor.device + + datasets = {} + if len(devices) == 1 and devices[0] == dataset_device: + datasets[devices[0]] = dataset + return datasets + + with ops.colocate_with(dataset._variant_tensor): + dataset = dataset._apply_debug_options() + graph_def = dataset._as_serialized_graph( + strip_device_assignment=True, + external_state_policy=ExternalStatePolicy.WARN) + for device in devices: + ds = _RemoteDataset(graph_def, device, dataset.element_spec) + datasets[device] = ds + return datasets + + +def batch_sizes_for_worker(global_batch_size, num_workers, + num_replicas_per_worker, worker_index): + """Determines how to rebatch a dataset for the given worker. + + Given the global batch size, number of workers, number of replicas per worker, + and worker index, returns the correct batch sizes for rebatching a dataset + on worker `worker_index` of `num_workers`, such that each global step (across + all workers and replicas) will consume global_batch_size elements. The + returned value should be passed as the `batch_sizes` input parameter to + `tf.data.experimental.rebatch()`. The returned batch sizes meet the following + constraints: + + Let G = global_batch_size, W = num_workers, R = num_replicas_per_worker + (A) for any worker, len(batch_sizes) = W * R + (B) for any worker, sum(batch_sizes) == G + (C) for any global step (i.e. R iterations on each worker), the sum of batches + consumed by replicas across all workers is G. + (D) any two batch sizes of any two replicas differs by at most one. + + For example, suppose we have G = 7, W = 2, R = 2, and suppose we have two + files which each contain 7 elements: + + ```python + # WORKER 0 + batch_sizes_0 = batch_sizes_for_worker(global_batch_size=global_batch_size, + num_workers=2, + num_replicas_per_worker=2, + worker_index=0) + print(batch_sizes_0) + >> [2, 2, 2, 1] + + dataset_0 = tf.data.Dataset.from_tensor_slices(["file_a", "file_b"]) + dataset_0 = dataset_0.shard(num_shards, index=0) + dataset_0 = dataset_0.batch(7) + dataset_0 = dataset_0.apply(tf.data.experimental.rebatch(batch_sizes_0)) + for elem in dataset_0: + print(elem) + >> [[A0, A1], [A2, A3], [A4, A5], [A6]] + + # WORKER 1 + batch_sizes_1 = batch_sizes_for_worker(global_batch_size=global_batch_size, + num_workers=2, + num_replicas_per_worker=2, + worker_index=1) + print(batch_sizes_1) + >> [2, 1, 2, 2] + + dataset_1 = tf.data.Dataset.from_tensor_slices(["file_a", "file_b"]) + dataset_1 = dataset_1.shard(num_shards, index=1) + dataset_1 = dataset_1.batch(7) + dataset_1 = dataset_1.apply(tf.data.experimental.rebatch(batch_sizes_1)) + for elem in dataset_1: + print(elem) + >> [[B0, B1], [B2], [B3, B4], [B5, B6]] + ``` + + The above example will produce the following elements: + + Step 1: + Worker 0 Replica 0: [A0, A1] + Worker 0 Replica 1: [A2, A3] + Worker 1 Replica 0: [B0, B1] + Worker 1 Replica 1: [B2] + Total batch size = 7 + + Step 2: + Worker 0 Replica 0: [A4, A5] + Worker 0 Replica 1: [A6] + Worker 1 Replica 0: [B3, B4] + Worker 1 Replica 1: [B5, B6] + Total batch size = 7 + + Args: + global_batch_size: A `tf.int64` scalar, representing the global batch size. + num_workers: An integer representing the number of workers the dataset will + be distributed across. + num_replicas_per_worker: An integer representing the number of replicas per + worker. All workers are assumed to have the same number of replicas. + worker_index: An integer index of the worker to be rebatched. + + Returns: + A `tf.int64` vector, representing the batch sizes to rebatch the dataset + into. + """ + # Constraint (A) + num_subbatches = num_workers * num_replicas_per_worker + + offset = worker_index * num_replicas_per_worker + + const_value = tensor_util.constant_value(global_batch_size) + if const_value is not None: + # Use the constant global batch size for further calculations + global_batch_size = const_value + + # Let N = W * R. Constraint (B) and (D) jointly mean that the iterations + # should have batch size either floor(B/N) or ceil(B/N). Namely, of the N + # subbatches a batch is split into, B - N * floor(B/N) of them will have size + # ceil(B/N), and the rest will have size floor(B/N). + floor = global_batch_size // num_subbatches + num_ceil = global_batch_size - (num_subbatches * floor) + + # For worker 0, we assign the first num_ceil subbatches to have size + # ceil(B/N), and the remainder to have size floor(B/N). The other workers will + # each be offset by R * worker_index in order to meet constraint (C). + if const_value is not None: + # If the global batch size is a known constant value, we return a constant + # tensor directly instead of manipulating it with TF ops. This allows for + # better downstream shape inference. + worker_0 = [floor + 1] * num_ceil + [floor] * (num_subbatches - num_ceil) + return ops.convert_to_tensor( + worker_0[offset:] + worker_0[:offset], + dtype=dtypes.int64, + name="batch_sizes") + + worker_0 = array_ops.ones(num_subbatches, dtype=dtypes.int64) + worker_0 = floor * worker_0 + array_ops.concat([ + array_ops.ones(num_ceil, dtype=dtypes.int64), + array_ops.zeros(num_subbatches - num_ceil, dtype=dtypes.int64) + ], + axis=0) + + return array_ops.concat([worker_0[offset:], worker_0[:offset]], axis=0) + + +def compute_batch_size(dataset): + """An operation that returns the batch size of the dataset. + + This op tries to infer the batch size statically by walking up the dataset + tree from the final dataset node and returning the batch size of the first + batching dataset (such as from .batch() and .padded_batch()) that it + encounters. This differs from using the `element_spec` of a dataset in that it + does not account for partial batches. + + This operation may fail if it encounters contradictory batch sizes (for + example, if the dataset is created by zipping together two datasets with + different batch sizes), if there are no explicit batching transformations, or + if there are operations downstream from the batching transformation that may + modify its batch size. In these cases, it returns a -1. + + Args: + dataset: A `tf.data.Dataset` object. + + Returns: + A `tf.int64` Tensor representing the batch size of the dataset sans partial + batches. If this cannot be inferred statically, the value of this tensor + will be -1. + """ + + def get_static_batch_dim(type_spec): + try: + output_shape = type_spec._to_legacy_output_shapes() # pylint: disable=protected-access + except NotImplementedError: + return None + if not isinstance(output_shape, tensor_shape.TensorShape): + return None + if output_shape.rank is None: + return None + return output_shape.dims[0].value + + batch_dims = [ + get_static_batch_dim(type_spec) + for type_spec in nest.flatten(dataset_ops.get_structure(dataset)) + ] + + if all(d is not None for d in batch_dims): + + if all(d == batch_dims[0] for d in batch_dims): + # If all batch dimensions are known and equal, return that directly. + batch_dim = batch_dims[0] + else: + # If all batch dimensions are known but not all equal, return -1. + batch_dim = -1 + + return constant_op.constant( + batch_dim, dtype=dtypes.int64, name="static_batch_size") + + # If any batch dimensions are unknown, use compute_batch_size op. + return ged_ops.compute_batch_size(dataset._variant_tensor) # pylint: disable=protected-access + + +_AutoShardDatasetV1.__doc__ = _AutoShardDataset.__doc__ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/distributed_save_op.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/distributed_save_op.py new file mode 100644 index 0000000000000000000000000000000000000000..3e986a72681309237024eb910ce2af40c04fa37d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/distributed_save_op.py @@ -0,0 +1,61 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Distributed saving of a dataset to disk.""" + +from tensorflow.core.protobuf import snapshot_pb2 +from tensorflow.python.ops import gen_experimental_dataset_ops +# TODO(b/238903802): Use TypeSpec serialization methods directly. +from tensorflow.python.saved_model import nested_structure_coder + + +# TODO(b/250921378): Add example to docstring and export to TF API. +def distributed_save(dataset, path, dispatcher_address, compression="AUTO"): + """Initiates the process of distributedly saving a dataset to disk. + + Args: + dataset: The `tf.data.Dataset` to save. + path: A string indicating the filepath of the directory to which to save + `dataset`. + dispatcher_address: A string indicating the address of the dispatcher for + the tf.data service instance used to save `dataset`. + compression: (Optional.) A string indicating whether and how to compress the + `dataset` materialization. If `"AUTO"`, the tf.data runtime decides which + algorithm to use. If `"GZIP"` or `"SNAPPY"`, that specific algorithm is + used. If `None`, the `dataset` materialization is not compressed. + + Returns: + An operation which when executed performs the distributed save. + + Raises: + ValueError: If `dispatcher_address` is invalid. + """ + if not isinstance(dispatcher_address, str): + raise ValueError("`dispatcher_address` must be a string, but is a " + f"{type(dispatcher_address)} ({dispatcher_address}") + if not dispatcher_address: + raise ValueError("`dispatcher_address` must not be empty") + + metadata = snapshot_pb2.DistributedSnapshotMetadata( + element_spec=nested_structure_coder.encode_structure( + dataset.element_spec).SerializeToString(), + compression=compression, + ) + + return gen_experimental_dataset_ops.distributed_save( + dataset._variant_tensor, # pylint: disable=protected-access + directory=path, + address=dispatcher_address, + metadata=metadata.SerializeToString(), + ) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/enumerate_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/enumerate_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..7d73e748156c44d34c4923bf0a57d00b8213a3cf --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/enumerate_ops.py @@ -0,0 +1,54 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Enumerate dataset transformations.""" +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +@deprecation.deprecated(None, "Use `tf.data.Dataset.enumerate()`.") +@tf_export("data.experimental.enumerate_dataset") +def enumerate_dataset(start=0): + """A transformation that enumerates the elements of a dataset. + + It is similar to python's `enumerate`. + For example: + + ```python + # NOTE: The following examples use `{ ... }` to represent the + # contents of a dataset. + a = { 1, 2, 3 } + b = { (7, 8), (9, 10) } + + # The nested structure of the `datasets` argument determines the + # structure of elements in the resulting dataset. + a.apply(tf.data.experimental.enumerate_dataset(start=5)) + => { (5, 1), (6, 2), (7, 3) } + b.apply(tf.data.experimental.enumerate_dataset()) + => { (0, (7, 8)), (1, (9, 10)) } + ``` + + Args: + start: A `tf.int64` scalar `tf.Tensor`, representing the start value for + enumeration. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + """ + + def _apply_fn(dataset): + return dataset.enumerate(start) + + return _apply_fn diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/from_list.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/from_list.py new file mode 100644 index 0000000000000000000000000000000000000000..9725d54c1b65a83ee8884f14bcac7df3db434494 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/from_list.py @@ -0,0 +1,119 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Python API for creating a dataset from a list.""" + +import itertools + +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.data.util import nest +from tensorflow.python.data.util import structure +from tensorflow.python.ops import gen_experimental_dataset_ops +from tensorflow.python.util.tf_export import tf_export + + +class _ListDataset(dataset_ops.DatasetSource): + """A `Dataset` of elements from a list.""" + + def __init__(self, elements, name=None): + if not elements: + raise ValueError("Invalid `elements`. `elements` should not be empty.") + if not isinstance(elements, list): + raise ValueError("Invalid `elements`. `elements` must be a list.") + + elements = [structure.normalize_element(element) for element in elements] + type_specs = [ + structure.type_spec_from_value(element) for element in elements + ] + + # Check that elements have same nested structure. + num_elements = len(elements) + for i in range(1, num_elements): + nest.assert_same_structure(type_specs[0], type_specs[i]) + + # Infer elements' supershape. + flattened_type_specs = [nest.flatten(type_spec) for type_spec in type_specs] + num_tensors_per_element = len(flattened_type_specs[0]) + flattened_structure = [None] * num_tensors_per_element + for i in range(num_tensors_per_element): + flattened_structure[i] = flattened_type_specs[0][i] + for j in range(1, num_elements): + flattened_structure[i] = flattened_structure[ + i].most_specific_common_supertype([flattened_type_specs[j][i]]) + + if not isinstance(type_specs[0], dataset_ops.DatasetSpec): + self._tensors = list( + itertools.chain.from_iterable( + [nest.flatten(element) for element in elements])) + else: + self._tensors = [x._variant_tensor for x in elements] + self._structure = nest.pack_sequence_as(type_specs[0], flattened_structure) + self._name = name + variant_tensor = gen_experimental_dataset_ops.list_dataset( + self._tensors, + output_types=self._flat_types, + output_shapes=self._flat_shapes, + metadata=self._metadata.SerializeToString()) + super(_ListDataset, self).__init__(variant_tensor) + + @property + def element_spec(self): + return self._structure + + +@tf_export("data.experimental.from_list") +def from_list(elements, name=None): + """Creates a `Dataset` comprising the given list of elements. + + The returned dataset will produce the items in the list one by one. The + functionality is identical to `Dataset.from_tensor_slices` when elements are + scalars, but different when elements have structure. Consider the following + example. + + >>> dataset = tf.data.experimental.from_list([(1, 'a'), (2, 'b'), (3, 'c')]) + >>> list(dataset.as_numpy_iterator()) + [(1, b'a'), (2, b'b'), (3, b'c')] + + To get the same output with `from_tensor_slices`, the data needs to be + reorganized: + + >>> dataset = tf.data.Dataset.from_tensor_slices(([1, 2, 3], ['a', 'b', 'c'])) + >>> list(dataset.as_numpy_iterator()) + [(1, b'a'), (2, b'b'), (3, b'c')] + + Unlike `from_tensor_slices`, `from_list` supports non-rectangular input: + + >>> dataset = tf.data.experimental.from_list([[1], [2, 3]]) + >>> list(dataset.as_numpy_iterator()) + [array([1], dtype=int32), array([2, 3], dtype=int32)] + + Achieving the same with `from_tensor_slices` requires the use of ragged + tensors. + + `from_list` can be more performant than `from_tensor_slices` in some cases, + since it avoids the need for data slicing each epoch. However, it can also be + less performant, because data is stored as many small tensors rather than a + few large tensors as in `from_tensor_slices`. The general guidance is to + prefer `from_list` from a performance perspective when the number of elements + is small (less than 1000). + + Args: + elements: A list of elements whose components have the same nested + structure. + name: (Optional.) A name for the tf.data operation. + + Returns: + Dataset: A `Dataset` of the `elements`. + """ + return _ListDataset(elements, name) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/interleave_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/interleave_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..7f1d97d6a0e90eea4f1e7077ed39b52c9c112023 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/interleave_ops.py @@ -0,0 +1,261 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Non-deterministic dataset transformations.""" +from tensorflow.python import tf2 +from tensorflow.python.compat import v2_compat +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.data.ops import readers +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +@deprecation.deprecated( + None, + "Use `tf.data.Dataset.interleave(map_func, cycle_length, block_length, " + "num_parallel_calls=tf.data.AUTOTUNE)` instead. If sloppy " + "execution is desired, use `tf.data.Options.deterministic`.") +@tf_export("data.experimental.parallel_interleave") +def parallel_interleave(map_func, + cycle_length, + block_length=1, + sloppy=False, + buffer_output_elements=None, + prefetch_input_elements=None): + """A parallel version of the `Dataset.interleave()` transformation. + + `parallel_interleave()` maps `map_func` across its input to produce nested + datasets, and outputs their elements interleaved. Unlike + `tf.data.Dataset.interleave`, it gets elements from `cycle_length` nested + datasets in parallel, which increases the throughput, especially in the + presence of stragglers. Furthermore, the `sloppy` argument can be used to + improve performance, by relaxing the requirement that the outputs are produced + in a deterministic order, and allowing the implementation to skip over nested + datasets whose elements are not readily available when requested. + + Example usage: + + ```python + # Preprocess 4 files concurrently. + filenames = tf.data.Dataset.list_files("/path/to/data/train*.tfrecords") + dataset = filenames.apply( + tf.data.experimental.parallel_interleave( + lambda filename: tf.data.TFRecordDataset(filename), + cycle_length=4)) + ``` + + WARNING: If `sloppy` is `True`, the order of produced elements is not + deterministic. + + Args: + map_func: A function mapping a nested structure of tensors to a `Dataset`. + cycle_length: The number of input `Dataset`s to interleave from in parallel. + block_length: The number of consecutive elements to pull from an input + `Dataset` before advancing to the next input `Dataset`. + sloppy: A boolean controlling whether determinism should be traded for + performance by allowing elements to be produced out of order. If `sloppy` + is `None`, the `tf.data.Options.deterministic` dataset option (`True` by + default) is used to decide whether to enforce a deterministic order. + buffer_output_elements: The number of elements each iterator being + interleaved should buffer (similar to the `.prefetch()` transformation for + each interleaved iterator). + prefetch_input_elements: The number of input elements to transform to + iterators before they are needed for interleaving. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + """ + + def _apply_fn(dataset): + return readers.ParallelInterleaveDataset(dataset, map_func, cycle_length, + block_length, sloppy, + buffer_output_elements, + prefetch_input_elements) + + return _apply_fn + + +@deprecation.deprecated(None, + "Use `tf.data.Dataset.sample_from_datasets(...)`.") +@tf_export("data.experimental.sample_from_datasets", v1=[]) +def sample_from_datasets_v2(datasets, + weights=None, + seed=None, + stop_on_empty_dataset=False): + """Samples elements at random from the datasets in `datasets`. + + Creates a dataset by interleaving elements of `datasets` with `weight[i]` + probability of picking an element from dataset `i`. Sampling is done without + replacement. For example, suppose we have 2 datasets: + + ```python + dataset1 = tf.data.Dataset.range(0, 3) + dataset2 = tf.data.Dataset.range(100, 103) + ``` + + Suppose also that we sample from these 2 datasets with the following weights: + + ```python + sample_dataset = tf.data.Dataset.sample_from_datasets( + [dataset1, dataset2], weights=[0.5, 0.5]) + ``` + + One possible outcome of elements in sample_dataset is: + + ``` + print(list(sample_dataset.as_numpy_iterator())) + # [100, 0, 1, 101, 2, 102] + ``` + + Args: + datasets: A non-empty list of `tf.data.Dataset` objects with compatible + structure. + weights: (Optional.) A list or Tensor of `len(datasets)` floating-point + values where `weights[i]` represents the probability to sample from + `datasets[i]`, or a `tf.data.Dataset` object where each element is such a + list. Defaults to a uniform distribution across `datasets`. + seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random + seed that will be used to create the distribution. See + `tf.random.set_seed` for behavior. + stop_on_empty_dataset: If `True`, sampling stops if it encounters an empty + dataset. If `False`, it skips empty datasets. It is recommended to set it + to `True`. Otherwise, the distribution of samples starts off as the user + intends, but may change as input datasets become empty. This can be + difficult to detect since the dataset starts off looking correct. Default + to `False` for backward compatibility. + + Returns: + A dataset that interleaves elements from `datasets` at random, according to + `weights` if provided, otherwise with uniform probability. + + Raises: + TypeError: If the `datasets` or `weights` arguments have the wrong type. + ValueError: + - If `datasets` is empty, or + - If `weights` is specified and does not match the length of `datasets`. + """ + return dataset_ops.Dataset.sample_from_datasets( + datasets=datasets, + weights=weights, + seed=seed, + stop_on_empty_dataset=stop_on_empty_dataset) + + +@deprecation.deprecated(None, + "Use `tf.data.Dataset.sample_from_datasets(...)`.") +@tf_export(v1=["data.experimental.sample_from_datasets"]) +def sample_from_datasets_v1(datasets, + weights=None, + seed=None, + stop_on_empty_dataset=False): + return dataset_ops.DatasetV1Adapter( + sample_from_datasets_v2(datasets, weights, seed, stop_on_empty_dataset)) + + +sample_from_datasets_v1.__doc__ = sample_from_datasets_v2.__doc__ + + +@deprecation.deprecated( + None, "Use `tf.data.Dataset.choose_from_datasets(...)` instead. Note that, " + "unlike the experimental endpoint, the non-experimental endpoint " + "sets `stop_on_empty_dataset=True` by default. You should set this " + "argument explicitly in case you would like to match the behavior of the " + "experimental endpoint.") +@tf_export("data.experimental.choose_from_datasets", v1=[]) +def choose_from_datasets_v2(datasets, + choice_dataset, + stop_on_empty_dataset=False): + """Creates a dataset that deterministically chooses elements from `datasets`. + + For example, given the following datasets: + + ```python + datasets = [tf.data.Dataset.from_tensors("foo").repeat(), + tf.data.Dataset.from_tensors("bar").repeat(), + tf.data.Dataset.from_tensors("baz").repeat()] + + # Define a dataset containing `[0, 1, 2, 0, 1, 2, 0, 1, 2]`. + choice_dataset = tf.data.Dataset.range(3).repeat(3) + + result = tf.data.experimental.choose_from_datasets(datasets, choice_dataset) + ``` + + The elements of `result` will be: + + ``` + "foo", "bar", "baz", "foo", "bar", "baz", "foo", "bar", "baz" + ``` + + Args: + datasets: A non-empty list of `tf.data.Dataset` objects with compatible + structure. + choice_dataset: A `tf.data.Dataset` of scalar `tf.int64` tensors between `0` + and `len(datasets) - 1`. + stop_on_empty_dataset: If `True`, selection stops if it encounters an empty + dataset. If `False`, it skips empty datasets. It is recommended to set it + to `True`. Otherwise, the selected elements start off as the user intends, + but may change as input datasets become empty. This can be difficult to + detect since the dataset starts off looking correct. Default to `False` + for backward compatibility. + + Returns: + A dataset that interleaves elements from `datasets` according to the values + of `choice_dataset`. + + Raises: + TypeError: If `datasets` or `choice_dataset` has the wrong type. + ValueError: If `datasets` is empty. + """ + return dataset_ops.Dataset.choose_from_datasets( + datasets=datasets, + choice_dataset=choice_dataset, + stop_on_empty_dataset=stop_on_empty_dataset) + + +@deprecation.deprecated( + None, "Use `tf.data.Dataset.choose_from_datasets(...)` instead. Note that, " + "unlike the experimental endpoint, the non-experimental endpoint " + "sets `stop_on_empty_dataset=True` by default. You should set this " + "argument explicitly in case you would like to match the behavior of the " + "experimental endpoint.") +@tf_export(v1=["data.experimental.choose_from_datasets"]) +def choose_from_datasets_v1(datasets, + choice_dataset, + stop_on_empty_dataset=False): + return dataset_ops.DatasetV1Adapter( + choose_from_datasets_v2(datasets, choice_dataset, stop_on_empty_dataset)) + + +choose_from_datasets_v1.__doc__ = choose_from_datasets_v2.__doc__ + +if tf2.enabled(): + choose_from_datasets = choose_from_datasets_v2 + sample_from_datasets = sample_from_datasets_v2 +else: + choose_from_datasets = choose_from_datasets_v1 + sample_from_datasets = sample_from_datasets_v1 + + +def _tf2_callback(): + global choose_from_datasets, sample_from_datasets + if tf2.enabled(): + choose_from_datasets = choose_from_datasets_v2 + sample_from_datasets = sample_from_datasets_v2 + else: + choose_from_datasets = choose_from_datasets_v1 + sample_from_datasets = sample_from_datasets_v1 + + +v2_compat.register_data_v2_callback(_tf2_callback) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/io.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/io.py new file mode 100644 index 0000000000000000000000000000000000000000..35554a1464c953a23d54406dde78f85ad90ab7be --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/io.py @@ -0,0 +1,166 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Python API for save and loading a dataset.""" + +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + +COMPRESSION_GZIP = "GZIP" +COMPRESSION_SNAPPY = "NONE" +DATASET_SPEC_FILENAME = "dataset_spec.pb" + + +@tf_export("data.experimental.save", v1=[]) +@deprecation.deprecated(None, "Use `tf.data.Dataset.save(...)` instead.") +def save(dataset, + path, + compression=None, + shard_func=None, + checkpoint_args=None): + """Saves the content of the given dataset. + + Example usage: + + >>> import tempfile + >>> path = os.path.join(tempfile.gettempdir(), "saved_data") + >>> # Save a dataset + >>> dataset = tf.data.Dataset.range(2) + >>> tf.data.experimental.save(dataset, path) + >>> new_dataset = tf.data.experimental.load(path) + >>> for elem in new_dataset: + ... print(elem) + tf.Tensor(0, shape=(), dtype=int64) + tf.Tensor(1, shape=(), dtype=int64) + + The saved dataset is saved in multiple file "shards". By default, the dataset + output is divided to shards in a round-robin fashion but custom sharding can + be specified via the `shard_func` function. For example, you can save the + dataset to using a single shard as follows: + + ```python + dataset = make_dataset() + def custom_shard_func(element): + return np.int64(0) + dataset = tf.data.experimental.save( + path="/path/to/data", ..., shard_func=custom_shard_func) + ``` + + To enable checkpointing, pass in `checkpoint_args` to the `save` method + as follows: + + ```python + dataset = tf.data.Dataset.range(100) + save_dir = "..." + checkpoint_prefix = "..." + step_counter = tf.Variable(0, trainable=False) + checkpoint_args = { + "checkpoint_interval": 50, + "step_counter": step_counter, + "directory": checkpoint_prefix, + "max_to_keep": 20, + } + dataset.save(dataset, save_dir, checkpoint_args=checkpoint_args) + ``` + + NOTE: The directory layout and file format used for saving the dataset is + considered an implementation detail and may change. For this reason, datasets + saved through `tf.data.experimental.save` should only be consumed through + `tf.data.experimental.load`, which is guaranteed to be backwards compatible. + + Args: + dataset: The dataset to save. + path: Required. A directory to use for saving the dataset. + compression: Optional. The algorithm to use to compress data when writing + it. Supported options are `GZIP` and `NONE`. Defaults to `NONE`. + shard_func: Optional. A function to control the mapping of dataset elements + to file shards. The function is expected to map elements of the input + dataset to int64 shard IDs. If present, the function will be traced and + executed as graph computation. + checkpoint_args: Optional args for checkpointing which will be passed into + the `tf.train.CheckpointManager`. If `checkpoint_args` are not specified, + then checkpointing will not be performed. The `save()` implementation + creates a `tf.train.Checkpoint` object internally, so users should not + set the `checkpoint` argument in `checkpoint_args`. + + Returns: + An operation which when executed performs the save. When writing + checkpoints, returns None. The return value is useful in unit tests. + + Raises: + ValueError if `checkpoint` is passed into `checkpoint_args`. + """ + return dataset.save(path, compression, shard_func, checkpoint_args) + + +@tf_export("data.experimental.load", v1=[]) +@deprecation.deprecated(None, "Use `tf.data.Dataset.load(...)` instead.") +def load(path, element_spec=None, compression=None, reader_func=None): + """Loads a previously saved dataset. + + Example usage: + + >>> import tempfile + >>> path = os.path.join(tempfile.gettempdir(), "saved_data") + >>> # Save a dataset + >>> dataset = tf.data.Dataset.range(2) + >>> tf.data.experimental.save(dataset, path) + >>> new_dataset = tf.data.experimental.load(path) + >>> for elem in new_dataset: + ... print(elem) + tf.Tensor(0, shape=(), dtype=int64) + tf.Tensor(1, shape=(), dtype=int64) + + + If the default option of sharding the saved dataset was used, the element + order of the saved dataset will be preserved when loading it. + + The `reader_func` argument can be used to specify a custom order in which + elements should be loaded from the individual shards. The `reader_func` is + expected to take a single argument -- a dataset of datasets, each containing + elements of one of the shards -- and return a dataset of elements. For + example, the order of shards can be shuffled when loading them as follows: + + ```python + def custom_reader_func(datasets): + datasets = datasets.shuffle(NUM_SHARDS) + return datasets.interleave(lambda x: x, num_parallel_calls=AUTOTUNE) + + dataset = tf.data.experimental.load( + path="/path/to/data", ..., reader_func=custom_reader_func) + ``` + + Args: + path: Required. A path pointing to a previously saved dataset. + element_spec: Optional. A nested structure of `tf.TypeSpec` objects matching + the structure of an element of the saved dataset and specifying the type + of individual element components. If not provided, the nested structure of + `tf.TypeSpec` saved with the saved dataset is used. Note that this + argument is required in graph mode. + compression: Optional. The algorithm to use to decompress the data when + reading it. Supported options are `GZIP` and `NONE`. Defaults to `NONE`. + reader_func: Optional. A function to control how to read data from shards. + If present, the function will be traced and executed as graph computation. + + Returns: + A `tf.data.Dataset` instance. + + Raises: + FileNotFoundError: If `element_spec` is not specified and the saved nested + structure of `tf.TypeSpec` can not be located with the saved dataset. + ValueError: If `element_spec` is not specified and the method is executed + in graph mode. + """ + return dataset_ops.Dataset.load(path, element_spec, compression, reader_func) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/iterator_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/iterator_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..16166e002cb0d642aad2fdebac381962d9061711 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/iterator_ops.py @@ -0,0 +1,97 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Iterator ops.""" + +from tensorflow.python.data.ops import iterator_ops +from tensorflow.python.data.ops import options as options_lib +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +def _convert_external_state_policy_to_enum(external_state_policy): + if isinstance(external_state_policy, options_lib.ExternalStatePolicy): + return external_state_policy + if external_state_policy == "warn": + return options_lib.ExternalStatePolicy.WARN + if external_state_policy == "ignore": + return options_lib.ExternalStatePolicy.IGNORE + if external_state_policy == "fail": + return options_lib.ExternalStatePolicy.FAIL + raise ValueError( + f"Invalid `ExternalStatePolicy.` Supported values include 'warn', " + f"'ignore', and 'fail.' Received {external_state_policy}." + ) + + +@tf_export("data.experimental.make_saveable_from_iterator") +@deprecation.deprecated( + None, "`make_saveable_from_iterator` is intended for use in TF1 with " + "`tf.compat.v1.Saver`. In TF2, use `tf.train.Checkpoint` instead.") +def make_saveable_from_iterator(iterator, external_state_policy=None): + """Returns a SaveableObject for saving/restoring iterator state using Saver. + + Args: + iterator: Iterator. + external_state_policy: A string that identifies how to handle input + pipelines that depend on external state. Possible values are + 'ignore': The external state is silently ignored. + 'warn': The external state is ignored, logging a warning. + 'fail': The operation fails upon encountering external state. + By default we set it to 'fail'. + + Returns: + A SaveableObject for saving/restoring iterator state using Saver. + + Raises: + ValueError: If iterator does not support checkpointing. + ValueError: If `external_state_policy` is not one of 'warn', 'ignore' or + 'fail'. + + For example: + + ```python + with tf.Graph().as_default(): + ds = tf.data.Dataset.range(10) + iterator = ds.make_initializable_iterator() + # Build the iterator SaveableObject. + saveable_obj = tf.data.experimental.make_saveable_from_iterator(iterator) + # Add the SaveableObject to the SAVEABLE_OBJECTS collection so + # it can be automatically saved using Saver. + tf.compat.v1.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, saveable_obj) + saver = tf.compat.v1.train.Saver() + + while continue_training: + ... Perform training ... + if should_save_checkpoint: + saver.save() + ``` + + Note: When restoring the iterator, the existing iterator state is completely + discarded. This means that any changes you may have made to the Dataset + graph will be discarded as well! This includes the new Dataset graph + that you may have built during validation. So, while running validation, + make sure to run the initializer for the validation input pipeline after + restoring the checkpoint. + + Note: Not all iterators support checkpointing yet. Attempting to save the + state of an unsupported iterator will throw an error. + """ + if external_state_policy is None: + external_state_policy = "fail" + policy_enum = _convert_external_state_policy_to_enum(external_state_policy) + return iterator_ops._IteratorSaveable( # pylint: disable=protected-access + iterator._iterator_resource, # pylint: disable=protected-access + iterator._iterator_resource.name, # pylint: disable=protected-access + external_state_policy=policy_enum) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/pad_to_cardinality.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/pad_to_cardinality.py new file mode 100644 index 0000000000000000000000000000000000000000..e98ddb887331708e2531d76ed0f43543a122a66c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/pad_to_cardinality.py @@ -0,0 +1,105 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The implementation of `tf.data.experimental.pad_to_cardinality`.""" + +from collections.abc import Mapping + +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.eager import context +from tensorflow.python.ops import array_ops +from tensorflow.python.util import nest +from tensorflow.python.util.tf_export import tf_export + + +@tf_export("data.experimental.pad_to_cardinality") +def pad_to_cardinality(cardinality, mask_key="valid"): + """Pads a dataset with fake elements to reach the desired cardinality. + + The dataset to pad must have a known and finite cardinality and contain + dictionary elements. The `mask_key` will be added to differentiate between + real and padding elements -- real elements will have a `=True` entry + while padding elements will have a `=False` entry. + + Example usage: + + >>> ds = tf.data.Dataset.from_tensor_slices({'a': [1, 2]}) + >>> ds = ds.apply(tf.data.experimental.pad_to_cardinality(3)) + >>> list(ds.as_numpy_iterator()) + [{'a': 1, 'valid': True}, {'a': 2, 'valid': True}, {'a': 0, 'valid': False}] + + This can be useful, e.g. during eval, when partial batches are undesirable but + it is also important not to drop any data. + + ``` + ds = ... + # Round up to the next full batch. + target_cardinality = -(-ds.cardinality() // batch_size) * batch_size + ds = ds.apply(tf.data.experimental.pad_to_cardinality(target_cardinality)) + # Set `drop_remainder` so that batch shape will be known statically. No data + # will actually be dropped since the batch size divides the cardinality. + ds = ds.batch(batch_size, drop_remainder=True) + ``` + + Args: + cardinality: The cardinality to pad the dataset to. + mask_key: The key to use for identifying real vs padding elements. + + Returns: + A dataset transformation that can be applied via `Dataset.apply()`. + """ + + def make_filler_dataset(ds): + padding = cardinality - ds.cardinality() + + filler_element = nest.map_structure( + lambda spec: array_ops.zeros(spec.shape, spec.dtype), ds.element_spec + ) + filler_element[mask_key] = False + filler_dataset = dataset_ops.Dataset.from_tensors(filler_element) + filler_dataset = filler_dataset.repeat(padding) + return filler_dataset + + def apply_valid_mask(x): + x[mask_key] = True + return x + + def _apply_fn(dataset): + # The cardinality tensor is unknown during tracing, so we only check it + # in eager mode. + if context.executing_eagerly(): + if dataset.cardinality() < 0: + raise ValueError( + "The dataset passed into `pad_to_cardinality` must " + "have a known cardinalty, but has cardinality " + f"{dataset.cardinality()}" + ) + if dataset.cardinality() > cardinality: + raise ValueError( + "The dataset passed into `pad_to_cardinality` must " + "have a cardinalty less than the target cardinality " + f"({cardinality}), but has cardinality " + f"{dataset.cardinality()}" + ) + if not isinstance(dataset.element_spec, Mapping): + raise ValueError( + "`pad_to_cardinality` requires its input dataset to " + "be a dictionary." + ) + filler = make_filler_dataset(dataset) + dataset = dataset.map(apply_valid_mask) + dataset = dataset.concatenate(filler) + return dataset + + return _apply_fn diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/parsing_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/parsing_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..5fd14d63bd3762c17fa7e3dcacedaf8df58e486e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/parsing_ops.py @@ -0,0 +1,161 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Experimental `dataset` API for parsing example.""" +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.data.util import structure +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.framework import tensor_spec +from tensorflow.python.ops import gen_experimental_dataset_ops +from tensorflow.python.ops import parsing_ops +from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +class _ParseExampleDataset(dataset_ops.UnaryDataset): + """A `Dataset` that parses `example` dataset into a `dict` dataset.""" + + def __init__(self, input_dataset, features, num_parallel_calls, + deterministic): + self._input_dataset = input_dataset + if not structure.are_compatible( + input_dataset.element_spec, + tensor_spec.TensorSpec([None], dtypes.string)): + raise TypeError("Input dataset should be a dataset of vectors of " + f"strings. Instead it is `{input_dataset.element_spec}`.") + self._num_parallel_calls = num_parallel_calls + if deterministic is None: + self._deterministic = "default" + elif deterministic: + self._deterministic = "true" + else: + self._deterministic = "false" + # pylint: disable=protected-access + self._features = parsing_ops._prepend_none_dimension(features) + params = parsing_ops._ParseOpParams.from_features(self._features, [ + parsing_ops.VarLenFeature, parsing_ops.SparseFeature, + parsing_ops.FixedLenFeature, parsing_ops.FixedLenSequenceFeature, + parsing_ops.RaggedFeature + ]) + # pylint: enable=protected-access + self._sparse_keys = params.sparse_keys + self._sparse_types = params.sparse_types + self._ragged_keys = params.ragged_keys + self._ragged_value_types = params.ragged_value_types + self._ragged_split_types = params.ragged_split_types + self._dense_keys = params.dense_keys + self._dense_defaults = params.dense_defaults_vec + self._dense_shapes = params.dense_shapes_as_proto + self._dense_types = params.dense_types + input_dataset_shape = dataset_ops.get_legacy_output_shapes( + self._input_dataset) + + self._element_spec = {} + + for (key, value_type) in zip(params.sparse_keys, params.sparse_types): + self._element_spec[key] = sparse_tensor.SparseTensorSpec( + input_dataset_shape.concatenate([None]), value_type) + + for (key, value_type, dense_shape) in zip(params.dense_keys, + params.dense_types, + params.dense_shapes): + self._element_spec[key] = tensor_spec.TensorSpec( + input_dataset_shape.concatenate(dense_shape), value_type) + + for (key, value_type, splits_type) in zip(params.ragged_keys, + params.ragged_value_types, + params.ragged_split_types): + self._element_spec[key] = ragged_tensor.RaggedTensorSpec( + input_dataset_shape.concatenate([None]), value_type, 1, splits_type) + + variant_tensor = ( + gen_experimental_dataset_ops.parse_example_dataset_v2( + self._input_dataset._variant_tensor, # pylint: disable=protected-access + self._num_parallel_calls, + self._dense_defaults, + self._sparse_keys, + self._dense_keys, + self._sparse_types, + self._dense_shapes, + deterministic=self._deterministic, + ragged_keys=self._ragged_keys, + ragged_value_types=self._ragged_value_types, + ragged_split_types=self._ragged_split_types, + **self._flat_structure)) + super(_ParseExampleDataset, self).__init__(input_dataset, variant_tensor) + + @property + def element_spec(self): + return self._element_spec + + +@tf_export("data.experimental.parse_example_dataset") +@deprecation.deprecated( + None, "Use `tf.data.Dataset.map(tf.io.parse_example(...))` instead.") +def parse_example_dataset(features, num_parallel_calls=1, deterministic=None): + """A transformation that parses `Example` protos into a `dict` of tensors. + + Parses a number of serialized `Example` protos given in `serialized`. We refer + to `serialized` as a batch with `batch_size` many entries of individual + `Example` protos. + + This op parses serialized examples into a dictionary mapping keys to `Tensor`, + `SparseTensor`, and `RaggedTensor` objects. `features` is a dict from keys to + `VarLenFeature`, `RaggedFeature`, `SparseFeature`, and `FixedLenFeature` + objects. Each `VarLenFeature` and `SparseFeature` is mapped to a + `SparseTensor`; each `RaggedFeature` is mapped to a `RaggedTensor`; and each + `FixedLenFeature` is mapped to a `Tensor`. See `tf.io.parse_example` for more + details about feature dictionaries. + + Args: + features: A `dict` mapping feature keys to `FixedLenFeature`, + `VarLenFeature`, `RaggedFeature`, and `SparseFeature` values. + num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`, + representing the number of parsing processes to call in parallel. + deterministic: (Optional.) A boolean controlling whether determinism + should be traded for performance by allowing elements to be produced out + of order if some parsing calls complete faster than others. If + `deterministic` is `None`, the + `tf.data.Options.deterministic` dataset option (`True` by default) is used + to decide whether to produce elements deterministically. + + Returns: + A dataset transformation function, which can be passed to + `tf.data.Dataset.apply`. + + Raises: + ValueError: if features argument is None. + """ + if features is None: + raise ValueError("Argument `features` is required, but not specified.") + + def _apply_fn(dataset): + """Function from `Dataset` to `Dataset` that applies the transformation.""" + out_dataset = _ParseExampleDataset(dataset, features, num_parallel_calls, + deterministic) + if any( + isinstance(feature, parsing_ops.SparseFeature) or + isinstance(feature, parsing_ops.RaggedFeature) + for feature in features.values()): + # pylint: disable=protected-access + # pylint: disable=g-long-lambda + out_dataset = out_dataset.map( + lambda x: parsing_ops._construct_tensors_for_composite_features( + features, x), + num_parallel_calls=num_parallel_calls) + return out_dataset + + return _apply_fn diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/prefetching_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/prefetching_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..5335de4c5a4acd6b0b5d35ab900ab215ef40c051 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/prefetching_ops.py @@ -0,0 +1,287 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Python wrapper for prefetching_ops.""" +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.data.ops import iterator_ops +from tensorflow.python.data.ops import structured_function +from tensorflow.python.data.util import structure +from tensorflow.python.eager import def_function +from tensorflow.python.framework import device as framework_device +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_spec +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import functional_ops +from tensorflow.python.ops import gen_dataset_ops +from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops +from tensorflow.python.ops import resource_variable_ops +from tensorflow.python.util.tf_export import tf_export + + +@tf_export("data.experimental.prefetch_to_device") +def prefetch_to_device(device, buffer_size=None): + """A transformation that prefetches dataset values to the given `device`. + + NOTE: Although the transformation creates a `tf.data.Dataset`, the + transformation must be the final `Dataset` in the input pipeline. + + For example, + >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3]) + >>> dataset = dataset.apply(tf.data.experimental.prefetch_to_device("/cpu:0")) + >>> for element in dataset: + ... print(f'Tensor {element} is on device {element.device}') + Tensor 1 is on device /job:localhost/replica:0/task:0/device:CPU:0 + Tensor 2 is on device /job:localhost/replica:0/task:0/device:CPU:0 + Tensor 3 is on device /job:localhost/replica:0/task:0/device:CPU:0 + + Args: + device: A string. The name of a device to which elements will be prefetched. + buffer_size: (Optional.) The number of elements to buffer on `device`. + Defaults to an automatically chosen value. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + """ + def _apply_fn(dataset): + return dataset.apply( + copy_to_device(target_device=device)).prefetch(buffer_size) + + return _apply_fn + + +@tf_export("data.experimental.copy_to_device") +def copy_to_device(target_device, source_device="/cpu:0"): + """A transformation that copies dataset elements to the given `target_device`. + + Args: + target_device: The name of a device to which elements will be copied. + source_device: The original device on which `input_dataset` will be placed. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + """ + + def _apply_fn(dataset): + return _CopyToDeviceDataset( + dataset, target_device=target_device, source_device=source_device) + + return _apply_fn + + +# TODO(rohanj): Use the _input_hostmem attr on the RemoteCall ops to indicate +# all inputs to the Op are in host memory, thereby avoiding some unnecessary +# Sends and Recvs. +class _CopyToDeviceDataset(dataset_ops.UnaryUnchangedStructureDataset): + """A `Dataset` that copies elements to another device.""" + + def __init__(self, input_dataset, target_device, source_device="/cpu:0"): + """Constructs a _CopyToDeviceDataset. + + Args: + input_dataset: `Dataset` to be copied + target_device: The name of the device to which elements would be copied. + source_device: Device where input_dataset would be placed. + """ + self._input_dataset = input_dataset._apply_debug_options() # pylint: disable=protected-access + self._target_device = target_device + spec = framework_device.DeviceSpec().from_string(self._target_device) + self._is_gpu_target = (spec.device_type == "GPU") + self._source_device_string = source_device + self._source_device = ops.convert_to_tensor(source_device) + + wrap_ds_variant = gen_dataset_ops.wrap_dataset_variant( + self._input_dataset._variant_tensor) # pylint: disable=protected-access + + @def_function.function() + def _init_func(): + """Creates an iterator for the input dataset. + + Returns: + A `string` tensor that encapsulates the iterator created. + """ + ds_variant = gen_dataset_ops.unwrap_dataset_variant(wrap_ds_variant) + resource = gen_dataset_ops.anonymous_iterator( + **self._input_dataset._flat_structure) # pylint: disable=protected-access + with ops.control_dependencies( + [gen_dataset_ops.make_iterator(ds_variant, resource)]): + return gen_dataset_ops.iterator_to_string_handle(resource) + + init_func_concrete = _init_func.get_concrete_function() # pylint: disable=protected-access + + @def_function.function() + def _remote_init_func(): + return functional_ops.remote_call( + target=self._source_device, + args=init_func_concrete.captured_inputs, + Tout=[dtypes.string], + f=init_func_concrete) + + self._init_func = _remote_init_func.get_concrete_function() # pylint: disable=protected-access + self._init_captured_args = self._init_func.captured_inputs + + @def_function.function( + input_signature=[tensor_spec.TensorSpec([], dtypes.string)]) + def _next_func(string_handle): + """Calls get_next for created iterator. + + Args: + string_handle: An iterator string handle created by _init_func + Returns: + The elements generated from `input_dataset` + """ + with ops.device(self._source_device_string): + iterator = iterator_ops.Iterator.from_string_handle( + string_handle, + dataset_ops.get_legacy_output_types(self), + dataset_ops.get_legacy_output_shapes(self), + dataset_ops.get_legacy_output_classes(self)) + return structure.to_tensor_list(self.element_spec, iterator.get_next()) + + next_func_concrete = _next_func.get_concrete_function() # pylint: disable=protected-access + + @def_function.function( + input_signature=[tensor_spec.TensorSpec([], dtypes.string)], + experimental_attributes={"experimental_ints_on_device": True}) + def _remote_next_func(string_handle): + return functional_ops.remote_call( + target=self._source_device, + args=[string_handle] + next_func_concrete.captured_inputs, + Tout=self._input_dataset._flat_types, # pylint: disable=protected-access + f=next_func_concrete) + + self._next_func = _remote_next_func.get_concrete_function() + self._next_captured_args = self._next_func.captured_inputs + + @def_function.function( + input_signature=[tensor_spec.TensorSpec([], dtypes.string)]) + def _finalize_func(string_handle): + """Destroys the iterator resource created. + + Args: + string_handle: An iterator string handle created by _init_func + Returns: + Tensor constant 0 + """ + iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2( + string_handle, + **self._input_dataset._flat_structure) # pylint: disable=protected-access + with ops.control_dependencies([ + resource_variable_ops.destroy_resource_op( + iterator_resource, ignore_lookup_error=True)]): + return array_ops.constant(0, dtypes.int64) + + finalize_func_concrete = _finalize_func.get_concrete_function() # pylint: disable=protected-access + + @def_function.function( + input_signature=[tensor_spec.TensorSpec([], dtypes.string)]) + def _remote_finalize_func(string_handle): + return functional_ops.remote_call( + target=self._source_device, + args=[string_handle] + finalize_func_concrete.captured_inputs, + Tout=[dtypes.int64], + f=finalize_func_concrete) + + self._finalize_func = _remote_finalize_func.get_concrete_function( # pylint: disable=protected-access + ) + self._finalize_captured_args = self._finalize_func.captured_inputs + + g = ops.get_default_graph() + self._init_func.add_to_graph(g) + self._next_func.add_to_graph(g) + self._finalize_func.add_to_graph(g) + # pylint: enable=protected-scope + + with ops.device(self._target_device): + variant_tensor = gen_dataset_ops.generator_dataset( + self._init_captured_args, + self._next_captured_args, + self._finalize_captured_args, + init_func=self._init_func, + next_func=self._next_func, + finalize_func=self._finalize_func, + **self._input_dataset._flat_structure) # pylint: disable=protected-access + super(_CopyToDeviceDataset, self).__init__(input_dataset, variant_tensor) + + # The one_shot_iterator implementation needs a 0 arg _make_dataset function + # that thereby captures all the inputs required to create the dataset. Since + # there are strings that are inputs to the GeneratorDataset which can't be + # placed on a GPU, this fails for the GPU case. Therefore, disabling it for + # GPU + def make_one_shot_iterator(self): + if self._is_gpu_target: + raise ValueError( + "`make_one_shot_iterator` is not compatible with GPU execution. " + "Please use `Dataset.make_initializable_iterator()` instead." + ) + else: + return super(_CopyToDeviceDataset, self).make_one_shot_iterator() + + +class _MapOnGpuDataset(dataset_ops.UnaryDataset): + """A `Dataset` that maps a function over elements in its using a GPU.""" + + def __init__(self, input_dataset, map_func, use_inter_op_parallelism=True): + """See `Dataset.map()` for details.""" + self._input_dataset = input_dataset + self._use_inter_op_parallelism = use_inter_op_parallelism + + self._map_func = structured_function.StructuredFunctionWrapper( + map_func, + self._transformation_name(), + dataset=input_dataset, + defun_kwargs={"experimental_ints_on_device": True}) + variant_tensor = ged_ops.experimental_map_dataset( + self._input_dataset._variant_tensor, # pylint: disable=protected-access + self._map_func.function.captured_inputs, + f=self._map_func.function, + use_inter_op_parallelism=self._use_inter_op_parallelism, + **self._flat_structure) + super(_MapOnGpuDataset, self).__init__(input_dataset, variant_tensor) + + def _functions(self): + return [self._map_func] + + @property + def element_spec(self): + return self._map_func.output_structure + + def _transformation_name(self): + return "map_on_gpu()" + + +def map_on_gpu(map_func): + """Maps `map_func` across the elements of this dataset. + + NOTE: This is a highly experimental version of `tf.data.Dataset.map` that runs + `map_func` on GPU. It must be used after applying the + `tf.data.experimental.copy_to_device` transformation with a GPU device + argument. + + Args: + map_func: A function mapping a nested structure of tensors (having shapes + and types defined by `self.output_shapes` and `self.output_types`) to + another nested structure of tensors. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + """ + + def _apply_fn(dataset): + return _MapOnGpuDataset(dataset, map_func) + + return _apply_fn diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/random_access.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/random_access.py new file mode 100644 index 0000000000000000000000000000000000000000..f1a14b0f9b9d0e47b164bcfd6c1e11c3b710d95b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/random_access.py @@ -0,0 +1,73 @@ +# Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Python API for random indexing into a dataset.""" + +from tensorflow.python.data.util import structure +from tensorflow.python.ops import gen_experimental_dataset_ops +from tensorflow.python.util.tf_export import tf_export + + +@tf_export("data.experimental.at", v1=[]) +def at(dataset, index): + """Returns the element at a specific index in a datasest. + + Currently, random access is supported for the following tf.data operations: + + - `tf.data.Dataset.from_tensor_slices`, + - `tf.data.Dataset.from_tensors`, + - `tf.data.Dataset.shuffle`, + - `tf.data.Dataset.batch`, + - `tf.data.Dataset.shard`, + - `tf.data.Dataset.map`, + - `tf.data.Dataset.range`, + - `tf.data.Dataset.zip`, + - `tf.data.Dataset.skip`, + - `tf.data.Dataset.repeat`, + - `tf.data.Dataset.list_files`, + - `tf.data.Dataset.SSTableDataset`, + - `tf.data.Dataset.concatenate`, + - `tf.data.Dataset.enumerate`, + - `tf.data.Dataset.parallel_map`, + - `tf.data.Dataset.prefetch`, + - `tf.data.Dataset.take`, + - `tf.data.Dataset.cache` (in-memory only) + + Users can use the cache operation to enable random access for any dataset, + even one comprised of transformations which are not on this list. + E.g., to get the third element of a TFDS dataset: + + ```python + ds = tfds.load("mnist", split="train").cache() + elem = tf.data.Dataset.experimental.at(ds, 3) + ``` + + Args: + dataset: A `tf.data.Dataset` to determine whether it supports random access. + index: The index at which to fetch the element. + + Returns: + A (nested) structure of values matching `tf.data.Dataset.element_spec`. + + Raises: + UnimplementedError: If random access is not yet supported for a dataset. + """ + # pylint: disable=protected-access + return structure.from_tensor_list( + dataset.element_spec, + gen_experimental_dataset_ops.get_element_at_index( + dataset._variant_tensor, + index, + output_types=structure.get_flat_tensor_types(dataset.element_spec), + output_shapes=structure.get_flat_tensor_shapes(dataset.element_spec))) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/random_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/random_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..a88f14a8063b429eb907cd0335d866f281543624 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/random_ops.py @@ -0,0 +1,58 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Datasets for random number generators.""" +import functools + +from tensorflow.python import tf2 +from tensorflow.python.compat import v2_compat +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.data.ops import random_op +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +# TODO(b/260143413): Migrate users to `tf.data.Dataset.random`. +@deprecation.deprecated(None, "Use `tf.data.Dataset.random(...)`.") +@tf_export("data.experimental.RandomDataset", v1=[]) +class RandomDatasetV2(random_op._RandomDataset): # pylint: disable=protected-access + """A `Dataset` of pseudorandom values.""" + + +@deprecation.deprecated(None, "Use `tf.data.Dataset.random(...)`.") +@tf_export(v1=["data.experimental.RandomDataset"]) +class RandomDatasetV1(dataset_ops.DatasetV1Adapter): + """A `Dataset` of pseudorandom values.""" + + @functools.wraps(RandomDatasetV2.__init__) + def __init__(self, seed=None): + wrapped = RandomDatasetV2(seed) + super(RandomDatasetV1, self).__init__(wrapped) + + +if tf2.enabled(): + RandomDataset = RandomDatasetV2 +else: + RandomDataset = RandomDatasetV1 + + +def _tf2_callback(): + global RandomDataset + if tf2.enabled(): + RandomDataset = RandomDatasetV2 + else: + RandomDataset = RandomDatasetV1 + + +v2_compat.register_data_v2_callback(_tf2_callback) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/readers.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/readers.py new file mode 100644 index 0000000000000000000000000000000000000000..7ffcbeb4d499eaf246d919ef5fdf2bd616a88b4b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/readers.py @@ -0,0 +1,1238 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Python wrappers for reader Datasets.""" +import collections +import csv +import functools +import gzip + +import numpy as np + +from tensorflow.python import tf2 +from tensorflow.python.compat import v2_compat +from tensorflow.python.data.experimental.ops import error_ops +from tensorflow.python.data.experimental.ops import parsing_ops +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.data.ops import map_op +from tensorflow.python.data.ops import options as options_lib +from tensorflow.python.data.ops import readers as core_readers +from tensorflow.python.data.util import convert +from tensorflow.python.data.util import nest +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_spec +from tensorflow.python.framework import tensor_util +from tensorflow.python.lib.io import file_io +from tensorflow.python.ops import gen_experimental_dataset_ops +from tensorflow.python.ops import io_ops +from tensorflow.python.platform import gfile +from tensorflow.python.util.tf_export import tf_export + +_ACCEPTABLE_CSV_TYPES = (dtypes.float32, dtypes.float64, dtypes.int32, + dtypes.int64, dtypes.string) + + +def _is_valid_int32(str_val): + try: + # Checks equality to prevent int32 overflow + return dtypes.int32.as_numpy_dtype(str_val) == dtypes.int64.as_numpy_dtype( + str_val) + except (ValueError, OverflowError): + return False + + +def _is_valid_int64(str_val): + try: + dtypes.int64.as_numpy_dtype(str_val) + return True + except (ValueError, OverflowError): + return False + + +def _is_valid_float(str_val, float_dtype): + try: + return float_dtype.as_numpy_dtype(str_val) < np.inf + except ValueError: + return False + + +def _infer_type(str_val, na_value, prev_type): + """Given a string, infers its tensor type. + + Infers the type of a value by picking the least 'permissive' type possible, + while still allowing the previous type inference for this column to be valid. + + Args: + str_val: String value to infer the type of. + na_value: Additional string to recognize as a NA/NaN CSV value. + prev_type: Type previously inferred based on values of this column that + we've seen up till now. + Returns: + Inferred dtype. + """ + if str_val in ("", na_value): + # If the field is null, it gives no extra information about its type + return prev_type + + type_list = [ + dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64, dtypes.string + ] # list of types to try, ordered from least permissive to most + + type_functions = [ + _is_valid_int32, + _is_valid_int64, + lambda str_val: _is_valid_float(str_val, dtypes.float32), + lambda str_val: _is_valid_float(str_val, dtypes.float64), + lambda str_val: True, + ] # Corresponding list of validation functions + + for i in range(len(type_list)): + validation_fn = type_functions[i] + if validation_fn(str_val) and (prev_type is None or + prev_type in type_list[:i + 1]): + return type_list[i] + + +def _next_csv_row(filenames, num_cols, field_delim, use_quote_delim, header, + file_io_fn): + """Generator that yields rows of CSV file(s) in order.""" + for fn in filenames: + with file_io_fn(fn) as f: + rdr = csv.reader( + f, + delimiter=field_delim, + quoting=csv.QUOTE_MINIMAL if use_quote_delim else csv.QUOTE_NONE) + row_num = 1 + if header: + next(rdr) # Skip header lines + row_num += 1 + + for csv_row in rdr: + if len(csv_row) != num_cols: + raise ValueError( + f"Problem inferring types: CSV row {row_num} has {len(csv_row)} " + f"number of fields. Expected: {num_cols}.") + row_num += 1 + yield csv_row + + +def _infer_column_defaults(filenames, num_cols, field_delim, use_quote_delim, + na_value, header, num_rows_for_inference, + select_columns, file_io_fn): + """Infers column types from the first N valid CSV records of files.""" + if select_columns is None: + select_columns = range(num_cols) + inferred_types = [None] * len(select_columns) + + for i, csv_row in enumerate( + _next_csv_row(filenames, num_cols, field_delim, use_quote_delim, header, + file_io_fn)): + if num_rows_for_inference is not None and i >= num_rows_for_inference: + break + + for j, col_index in enumerate(select_columns): + inferred_types[j] = _infer_type(csv_row[col_index], na_value, + inferred_types[j]) + + # Replace None's with a default type + inferred_types = [t or dtypes.string for t in inferred_types] + # Default to 0 or '' for null values + return [ + constant_op.constant([0 if t is not dtypes.string else ""], dtype=t) + for t in inferred_types + ] + + +def _infer_column_names(filenames, field_delim, use_quote_delim, file_io_fn): + """Infers column names from first rows of files.""" + csv_kwargs = { + "delimiter": field_delim, + "quoting": csv.QUOTE_MINIMAL if use_quote_delim else csv.QUOTE_NONE + } + with file_io_fn(filenames[0]) as f: + try: + column_names = next(csv.reader(f, **csv_kwargs)) + except StopIteration: + raise ValueError("Failed when reading the header line of " + f"{filenames[0]}. Is it an empty file?") + + for name in filenames[1:]: + with file_io_fn(name) as f: + try: + if next(csv.reader(f, **csv_kwargs)) != column_names: + raise ValueError( + "All input CSV files should have the same column names in the " + f"header row. File {name} has different column names.") + except StopIteration: + raise ValueError("Failed when reading the header line of " + f"{name}. Is it an empty file?") + return column_names + + +def _get_sorted_col_indices(select_columns, column_names): + """Transforms select_columns argument into sorted column indices.""" + names_to_indices = {n: i for i, n in enumerate(column_names)} + num_cols = len(column_names) + + results = [] + for v in select_columns: + # If value is already an int, check if it's valid. + if isinstance(v, int): + if v < 0 or v >= num_cols: + raise ValueError( + f"Column index {v} specified in `select_columns` should be > 0 " + f" and <= {num_cols}, which is the number of columns.") + results.append(v) + # Otherwise, check that it's a valid column name and convert to the + # the relevant column index. + elif v not in names_to_indices: + raise ValueError( + f"Column {v} specified in `select_columns` must be of one of the " + f"columns: {names_to_indices.keys()}.") + else: + results.append(names_to_indices[v]) + + # Sort and ensure there are no duplicates + results = sorted(set(results)) + if len(results) != len(select_columns): + sorted_names = sorted(results) + duplicate_columns = set([a for a, b in zip( + sorted_names[:-1], sorted_names[1:]) if a == b]) + raise ValueError("The `select_columns` argument contains duplicate " + f"columns: {duplicate_columns}.") + return results + + +def _maybe_shuffle_and_repeat( + dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed): + """Optionally shuffle and repeat dataset, as requested.""" + if shuffle: + dataset = dataset.shuffle(shuffle_buffer_size, shuffle_seed) + if num_epochs != 1: + dataset = dataset.repeat(num_epochs) + return dataset + + +def make_tf_record_dataset(file_pattern, + batch_size, + parser_fn=None, + num_epochs=None, + shuffle=True, + shuffle_buffer_size=None, + shuffle_seed=None, + prefetch_buffer_size=None, + num_parallel_reads=None, + num_parallel_parser_calls=None, + drop_final_batch=False): + """Reads and optionally parses TFRecord files into a dataset. + + Provides common functionality such as batching, optional parsing, shuffling, + and performant defaults. + + Args: + file_pattern: List of files or patterns of TFRecord file paths. + See `tf.io.gfile.glob` for pattern rules. + batch_size: An int representing the number of records to combine + in a single batch. + parser_fn: (Optional.) A function accepting string input to parse + and process the record contents. This function must map records + to components of a fixed shape, so they may be batched. By + default, uses the record contents unmodified. + num_epochs: (Optional.) An int specifying the number of times this + dataset is repeated. If None (the default), cycles through the + dataset forever. + shuffle: (Optional.) A bool that indicates whether the input + should be shuffled. Defaults to `True`. + shuffle_buffer_size: (Optional.) Buffer size to use for + shuffling. A large buffer size ensures better shuffling, but + increases memory usage and startup time. + shuffle_seed: (Optional.) Randomization seed to use for shuffling. + prefetch_buffer_size: (Optional.) An int specifying the number of + feature batches to prefetch for performance improvement. + Defaults to auto-tune. Set to 0 to disable prefetching. + num_parallel_reads: (Optional.) Number of threads used to read + records from files. By default or if set to a value >1, the + results will be interleaved. Defaults to `24`. + num_parallel_parser_calls: (Optional.) Number of parallel + records to parse in parallel. Defaults to `batch_size`. + drop_final_batch: (Optional.) Whether the last batch should be + dropped in case its size is smaller than `batch_size`; the + default behavior is not to drop the smaller batch. + + Returns: + A dataset, where each element matches the output of `parser_fn` + except it will have an additional leading `batch-size` dimension, + or a `batch_size`-length 1-D tensor of strings if `parser_fn` is + unspecified. + """ + if num_parallel_reads is None: + # NOTE: We considered auto-tuning this value, but there is a concern + # that this affects the mixing of records from different files, which + # could affect training convergence/accuracy, so we are defaulting to + # a constant for now. + num_parallel_reads = 24 + + if num_parallel_parser_calls is None: + # TODO(josh11b): if num_parallel_parser_calls is None, use some function + # of num cores instead of `batch_size`. + num_parallel_parser_calls = batch_size + + if prefetch_buffer_size is None: + prefetch_buffer_size = dataset_ops.AUTOTUNE + + files = dataset_ops.Dataset.list_files( + file_pattern, shuffle=shuffle, seed=shuffle_seed) + + dataset = core_readers.TFRecordDataset( + files, num_parallel_reads=num_parallel_reads) + + if shuffle_buffer_size is None: + # TODO(josh11b): Auto-tune this value when not specified + shuffle_buffer_size = 10000 + dataset = _maybe_shuffle_and_repeat( + dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed) + + # NOTE(mrry): We set `drop_final_batch=True` when `num_epochs is None` to + # improve the shape inference, because it makes the batch dimension static. + # It is safe to do this because in that case we are repeating the input + # indefinitely, and all batches will be full-sized. + drop_final_batch = drop_final_batch or num_epochs is None + + if parser_fn is None: + dataset = dataset.batch(batch_size, drop_remainder=drop_final_batch) + else: + dataset = dataset.map( + parser_fn, num_parallel_calls=num_parallel_parser_calls) + dataset = dataset.batch(batch_size, drop_remainder=drop_final_batch) + + if prefetch_buffer_size == 0: + return dataset + else: + return dataset.prefetch(buffer_size=prefetch_buffer_size) + + +@tf_export("data.experimental.make_csv_dataset", v1=[]) +def make_csv_dataset_v2( + file_pattern, + batch_size, + column_names=None, + column_defaults=None, + label_name=None, + select_columns=None, + field_delim=",", + use_quote_delim=True, + na_value="", + header=True, + num_epochs=None, # TODO(aaudibert): Change default to 1 when graduating. + shuffle=True, + shuffle_buffer_size=10000, + shuffle_seed=None, + prefetch_buffer_size=None, + num_parallel_reads=None, + sloppy=False, + num_rows_for_inference=100, + compression_type=None, + ignore_errors=False, + encoding="utf-8", +): + """Reads CSV files into a dataset. + + Reads CSV files into a dataset, where each element of the dataset is a + (features, labels) tuple that corresponds to a batch of CSV rows. The features + dictionary maps feature column names to `Tensor`s containing the corresponding + feature data, and labels is a `Tensor` containing the batch's label data. + + By default, the first rows of the CSV files are expected to be headers listing + the column names. If the first rows are not headers, set `header=False` and + provide the column names with the `column_names` argument. + + By default, the dataset is repeated indefinitely, reshuffling the order each + time. This behavior can be modified by setting the `num_epochs` and `shuffle` + arguments. + + For example, suppose you have a CSV file containing + + | Feature_A | Feature_B | + | --------- | --------- | + | 1 | "a" | + | 2 | "b" | + | 3 | "c" | + | 4 | "d" | + + ``` + # No label column specified + dataset = tf.data.experimental.make_csv_dataset(filename, batch_size=2) + iterator = dataset.as_numpy_iterator() + print(dict(next(iterator))) + # prints a dictionary of batched features: + # OrderedDict([('Feature_A', array([1, 4], dtype=int32)), + # ('Feature_B', array([b'a', b'd'], dtype=object))]) + ``` + + ``` + # Set Feature_B as label column + dataset = tf.data.experimental.make_csv_dataset( + filename, batch_size=2, label_name="Feature_B") + iterator = dataset.as_numpy_iterator() + print(next(iterator)) + # prints (features, labels) tuple: + # (OrderedDict([('Feature_A', array([1, 2], dtype=int32))]), + # array([b'a', b'b'], dtype=object)) + ``` + + See the + [Load CSV data guide](https://www.tensorflow.org/tutorials/load_data/csv) for + more examples of using `make_csv_dataset` to read CSV data. + + Args: + file_pattern: List of files or patterns of file paths containing CSV + records. See `tf.io.gfile.glob` for pattern rules. + batch_size: An int representing the number of records to combine + in a single batch. + column_names: An optional list of strings that corresponds to the CSV + columns, in order. One per column of the input record. If this is not + provided, infers the column names from the first row of the records. + These names will be the keys of the features dict of each dataset element. + column_defaults: A optional list of default values for the CSV fields. One + item per selected column of the input record. Each item in the list is + either a valid CSV dtype (float32, float64, int32, int64, or string), or a + `Tensor` with one of the aforementioned types. The tensor can either be + a scalar default value (if the column is optional), or an empty tensor (if + the column is required). If a dtype is provided instead of a tensor, the + column is also treated as required. If this list is not provided, tries + to infer types based on reading the first num_rows_for_inference rows of + files specified, and assumes all columns are optional, defaulting to `0` + for numeric values and `""` for string values. If both this and + `select_columns` are specified, these must have the same lengths, and + `column_defaults` is assumed to be sorted in order of increasing column + index. + label_name: A optional string corresponding to the label column. If + provided, the data for this column is returned as a separate `Tensor` from + the features dictionary. + select_columns: An optional list of integer indices or string column + names, that specifies a subset of columns of CSV data to select. If + column names are provided, these must correspond to names provided in + `column_names` or inferred from the file header lines. When this argument + is specified, only a subset of CSV columns will be parsed and returned, + corresponding to the columns specified. Using this results in faster + parsing and lower memory usage. If both this and `column_defaults` are + specified, these must have the same lengths, and `column_defaults` is + assumed to be sorted in order of increasing column index. + field_delim: An optional `string`. Defaults to `","`. Char delimiter to + separate fields in a record. + use_quote_delim: An optional bool. Defaults to `True`. If false, treats + double quotation marks as regular characters inside of the string fields. + na_value: Additional string to recognize as NA/NaN. + header: A bool that indicates whether the first rows of provided CSV files + correspond to header lines with column names, and should not be included + in the data. + num_epochs: An int specifying the number of times this dataset is repeated. + If None, cycles through the dataset forever. + shuffle: A bool that indicates whether the input should be shuffled. + shuffle_buffer_size: Buffer size to use for shuffling. A large buffer size + ensures better shuffling, but increases memory usage and startup time. + shuffle_seed: Randomization seed to use for shuffling. + prefetch_buffer_size: An int specifying the number of feature + batches to prefetch for performance improvement. Recommended value is the + number of batches consumed per training step. Defaults to auto-tune. + num_parallel_reads: Number of threads used to read CSV records from files. + If >1, the results will be interleaved. Defaults to `1`. + sloppy: If `True`, reading performance will be improved at + the cost of non-deterministic ordering. If `False`, the order of elements + produced is deterministic prior to shuffling (elements are still + randomized if `shuffle=True`. Note that if the seed is set, then order + of elements after shuffling is deterministic). Defaults to `False`. + num_rows_for_inference: Number of rows of a file to use for type inference + if record_defaults is not provided. If None, reads all the rows of all + the files. Defaults to 100. + compression_type: (Optional.) A `tf.string` scalar evaluating to one of + `""` (no compression), `"ZLIB"`, or `"GZIP"`. Defaults to no compression. + ignore_errors: (Optional.) If `True`, ignores errors with CSV file parsing, + such as malformed data or empty lines, and moves on to the next valid + CSV record. Otherwise, the dataset raises an error and stops processing + when encountering any invalid records. Defaults to `False`. + encoding: Encoding to use when reading. Defaults to `UTF-8`. + + Returns: + A dataset, where each element is a (features, labels) tuple that corresponds + to a batch of `batch_size` CSV rows. The features dictionary maps feature + column names to `Tensor`s containing the corresponding column data, and + labels is a `Tensor` containing the column data for the label column + specified by `label_name`. + + Raises: + ValueError: If any of the arguments is malformed. + """ + if num_parallel_reads is None: + num_parallel_reads = 1 + + if prefetch_buffer_size is None: + prefetch_buffer_size = dataset_ops.AUTOTUNE + + # Create dataset of all matching filenames + filenames = _get_file_names(file_pattern, False) + dataset = dataset_ops.Dataset.from_tensor_slices(filenames) + if shuffle: + dataset = dataset.shuffle(len(filenames), shuffle_seed) + + # Clean arguments; figure out column names and defaults + if column_names is None or column_defaults is None: + # Find out which io function to open the file + file_io_fn = lambda filename: file_io.FileIO( # pylint: disable=g-long-lambda + filename, "r", encoding=encoding) + if compression_type is not None: + compression_type_value = tensor_util.constant_value(compression_type) + if compression_type_value is None: + raise ValueError( + f"Received unknown `compression_type` {compression_type}. " + "Expected: GZIP, ZLIB or "" (empty string).") + if compression_type_value == "GZIP": + file_io_fn = lambda filename: gzip.open( # pylint: disable=g-long-lambda + filename, "rt", encoding=encoding) + elif compression_type_value == "ZLIB": + raise ValueError( + f"`compression_type` {compression_type} is not supported for " + "probing columns.") + elif compression_type_value != "": + raise ValueError( + f"Received unknown `compression_type` {compression_type}. " + "Expected: GZIP, ZLIB or " + " (empty string).") + if column_names is None: + if not header: + raise ValueError("Expected `column_names` or `header` arguments. Neither " + "is provided.") + # If column names are not provided, infer from the header lines + column_names = _infer_column_names(filenames, field_delim, use_quote_delim, + file_io_fn) + if len(column_names) != len(set(column_names)): + sorted_names = sorted(column_names) + duplicate_columns = set([a for a, b in zip( + sorted_names[:-1], sorted_names[1:]) if a == b]) + raise ValueError( + "Either `column_names` argument or CSV header row contains duplicate " + f"column names: {duplicate_columns}.") + + if select_columns is not None: + select_columns = _get_sorted_col_indices(select_columns, column_names) + + if column_defaults is not None: + column_defaults = [ + constant_op.constant([], dtype=x) + if not tensor_util.is_tf_type(x) and x in _ACCEPTABLE_CSV_TYPES else x + for x in column_defaults + ] + else: + # If column defaults are not provided, infer from records at graph + # construction time + column_defaults = _infer_column_defaults(filenames, len(column_names), + field_delim, use_quote_delim, + na_value, header, + num_rows_for_inference, + select_columns, file_io_fn) + + if select_columns is not None and len(column_defaults) != len(select_columns): + raise ValueError( + "If specified, `column_defaults` and `select_columns` must have the " + f"same length: `column_defaults` has length {len(column_defaults)}, " + f"`select_columns` has length {len(select_columns)}.") + if select_columns is not None and len(column_names) > len(select_columns): + # Pick the relevant subset of column names + column_names = [column_names[i] for i in select_columns] + + if label_name is not None and label_name not in column_names: + raise ValueError("`label_name` provided must be one of the columns: " + f"{column_names}. Received: {label_name}.") + + def filename_to_dataset(filename): + dataset = CsvDataset( + filename, + record_defaults=column_defaults, + field_delim=field_delim, + use_quote_delim=use_quote_delim, + na_value=na_value, + select_cols=select_columns, + header=header, + compression_type=compression_type + ) + if ignore_errors: + dataset = dataset.apply(error_ops.ignore_errors()) + return dataset + + def map_fn(*columns): + """Organizes columns into a features dictionary. + + Args: + *columns: list of `Tensor`s corresponding to one csv record. + Returns: + An OrderedDict of feature names to values for that particular record. If + label_name is provided, extracts the label feature to be returned as the + second element of the tuple. + """ + features = collections.OrderedDict(zip(column_names, columns)) + if label_name is not None: + label = features.pop(label_name) + return features, label + return features + + if num_parallel_reads == dataset_ops.AUTOTUNE: + dataset = dataset.interleave( + filename_to_dataset, num_parallel_calls=num_parallel_reads) + options = options_lib.Options() + options.deterministic = not sloppy + dataset = dataset.with_options(options) + else: + # Read files sequentially (if num_parallel_reads=1) or in parallel + def apply_fn(dataset): + return core_readers.ParallelInterleaveDataset( + dataset, + filename_to_dataset, + cycle_length=num_parallel_reads, + block_length=1, + sloppy=sloppy, + buffer_output_elements=None, + prefetch_input_elements=None) + + dataset = dataset.apply(apply_fn) + + dataset = _maybe_shuffle_and_repeat( + dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed) + + # Apply batch before map for perf, because map has high overhead relative + # to the size of the computation in each map. + # NOTE(mrry): We set `drop_remainder=True` when `num_epochs is None` to + # improve the shape inference, because it makes the batch dimension static. + # It is safe to do this because in that case we are repeating the input + # indefinitely, and all batches will be full-sized. + dataset = dataset.batch(batch_size=batch_size, + drop_remainder=num_epochs is None) + dataset = map_op._MapDataset( # pylint: disable=protected-access + dataset, map_fn, use_inter_op_parallelism=False) + dataset = dataset.prefetch(prefetch_buffer_size) + + return dataset + + +@tf_export(v1=["data.experimental.make_csv_dataset"]) +def make_csv_dataset_v1( + file_pattern, + batch_size, + column_names=None, + column_defaults=None, + label_name=None, + select_columns=None, + field_delim=",", + use_quote_delim=True, + na_value="", + header=True, + num_epochs=None, + shuffle=True, + shuffle_buffer_size=10000, + shuffle_seed=None, + prefetch_buffer_size=None, + num_parallel_reads=None, + sloppy=False, + num_rows_for_inference=100, + compression_type=None, + ignore_errors=False, + encoding="utf-8", +): # pylint: disable=missing-docstring + return dataset_ops.DatasetV1Adapter( + make_csv_dataset_v2(file_pattern, batch_size, column_names, + column_defaults, label_name, select_columns, + field_delim, use_quote_delim, na_value, header, + num_epochs, shuffle, shuffle_buffer_size, + shuffle_seed, prefetch_buffer_size, + num_parallel_reads, sloppy, num_rows_for_inference, + compression_type, ignore_errors, encoding)) +make_csv_dataset_v1.__doc__ = make_csv_dataset_v2.__doc__ + + +_DEFAULT_READER_BUFFER_SIZE_BYTES = 4 * 1024 * 1024 # 4 MB + + +@tf_export("data.experimental.CsvDataset", v1=[]) +class CsvDatasetV2(dataset_ops.DatasetSource): + r"""A Dataset comprising lines from one or more CSV files. + + The `tf.data.experimental.CsvDataset` class provides a minimal CSV Dataset + interface. There is also a richer `tf.data.experimental.make_csv_dataset` + function which provides additional convenience features such as column header + parsing, column type-inference, automatic shuffling, and file interleaving. + + The elements of this dataset correspond to records from the file(s). + RFC 4180 format is expected for CSV files + (https://tools.ietf.org/html/rfc4180) + Note that we allow leading and trailing spaces for int or float fields. + + For example, suppose we have a file 'my_file0.csv' with four CSV columns of + different data types: + + >>> with open('/tmp/my_file0.csv', 'w') as f: + ... f.write('abcdefg,4.28E10,5.55E6,12\n') + ... f.write('hijklmn,-5.3E14,,2\n') + + We can construct a CsvDataset from it as follows: + + >>> dataset = tf.data.experimental.CsvDataset( + ... "/tmp/my_file0.csv", + ... [tf.float32, # Required field, use dtype or empty tensor + ... tf.constant([0.0], dtype=tf.float32), # Optional field, default to 0.0 + ... tf.int32, # Required field, use dtype or empty tensor + ... ], + ... select_cols=[1,2,3] # Only parse last three columns + ... ) + + The expected output of its iterations is: + + >>> for element in dataset.as_numpy_iterator(): + ... print(element) + (4.28e10, 5.55e6, 12) + (-5.3e14, 0.0, 2) + + See + https://www.tensorflow.org/tutorials/load_data/csv#tfdataexperimentalcsvdataset + for more in-depth example usage. + """ + + def __init__(self, + filenames, + record_defaults, + compression_type=None, + buffer_size=None, + header=False, + field_delim=",", + use_quote_delim=True, + na_value="", + select_cols=None, + exclude_cols=None): + """Creates a `CsvDataset` by reading and decoding CSV files. + + Args: + filenames: A `tf.string` tensor containing one or more filenames. + record_defaults: A list of default values for the CSV fields. Each item in + the list is either a valid CSV `DType` (float32, float64, int32, int64, + string), or a `Tensor` object with one of the above types. One per + column of CSV data, with either a scalar `Tensor` default value for the + column if it is optional, or `DType` or empty `Tensor` if required. If + both this and `select_columns` are specified, these must have the same + lengths, and `column_defaults` is assumed to be sorted in order of + increasing column index. If both this and 'exclude_cols' are specified, + the sum of lengths of record_defaults and exclude_cols should equal + the total number of columns in the CSV file. + compression_type: (Optional.) A `tf.string` scalar evaluating to one of + `""` (no compression), `"ZLIB"`, or `"GZIP"`. Defaults to no + compression. + buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes + to buffer while reading files. Defaults to 4MB. + header: (Optional.) A `tf.bool` scalar indicating whether the CSV file(s) + have header line(s) that should be skipped when parsing. Defaults to + `False`. + field_delim: (Optional.) A `tf.string` scalar containing the delimiter + character that separates fields in a record. Defaults to `","`. + use_quote_delim: (Optional.) A `tf.bool` scalar. If `False`, treats + double quotation marks as regular characters inside of string fields + (ignoring RFC 4180, Section 2, Bullet 5). Defaults to `True`. + na_value: (Optional.) A `tf.string` scalar indicating a value that will + be treated as NA/NaN. + select_cols: (Optional.) A sorted list of column indices to select from + the input data. If specified, only this subset of columns will be + parsed. Defaults to parsing all columns. At most one of `select_cols` + and `exclude_cols` can be specified. + exclude_cols: (Optional.) A sorted list of column indices to exclude from + the input data. If specified, only the complement of this set of column + will be parsed. Defaults to parsing all columns. At most one of + `select_cols` and `exclude_cols` can be specified. + + Raises: + InvalidArgumentError: If exclude_cols is not None and + len(exclude_cols) + len(record_defaults) does not match the total + number of columns in the file(s) + + + """ + self._filenames = ops.convert_to_tensor( + filenames, dtype=dtypes.string, name="filenames") + self._compression_type = convert.optional_param_to_tensor( + "compression_type", + compression_type, + argument_default="", + argument_dtype=dtypes.string) + record_defaults = [ + constant_op.constant([], dtype=x) + if not tensor_util.is_tf_type(x) and x in _ACCEPTABLE_CSV_TYPES else x + for x in record_defaults + ] + self._record_defaults = ops.convert_n_to_tensor( + record_defaults, name="record_defaults") + self._buffer_size = convert.optional_param_to_tensor( + "buffer_size", buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES) + self._header = ops.convert_to_tensor( + header, dtype=dtypes.bool, name="header") + self._field_delim = ops.convert_to_tensor( + field_delim, dtype=dtypes.string, name="field_delim") + self._use_quote_delim = ops.convert_to_tensor( + use_quote_delim, dtype=dtypes.bool, name="use_quote_delim") + self._na_value = ops.convert_to_tensor( + na_value, dtype=dtypes.string, name="na_value") + self._select_cols = convert.optional_param_to_tensor( + "select_cols", + select_cols, + argument_default=[], + argument_dtype=dtypes.int64, + ) + self._exclude_cols = convert.optional_param_to_tensor( + "exclude_cols", + exclude_cols, + argument_default=[], + argument_dtype=dtypes.int64, + ) + self._element_spec = tuple( + tensor_spec.TensorSpec([], d.dtype) for d in self._record_defaults) + variant_tensor = gen_experimental_dataset_ops.csv_dataset_v2( + filenames=self._filenames, + record_defaults=self._record_defaults, + buffer_size=self._buffer_size, + header=self._header, + output_shapes=self._flat_shapes, + field_delim=self._field_delim, + use_quote_delim=self._use_quote_delim, + na_value=self._na_value, + select_cols=self._select_cols, + exclude_cols=self._exclude_cols, + compression_type=self._compression_type) + super(CsvDatasetV2, self).__init__(variant_tensor) + + @property + def element_spec(self): + return self._element_spec + + +@tf_export(v1=["data.experimental.CsvDataset"]) +class CsvDatasetV1(dataset_ops.DatasetV1Adapter): + """A Dataset comprising lines from one or more CSV files.""" + + @functools.wraps(CsvDatasetV2.__init__, ("__module__", "__name__")) + def __init__(self, + filenames, + record_defaults, + compression_type=None, + buffer_size=None, + header=False, + field_delim=",", + use_quote_delim=True, + na_value="", + select_cols=None): + """Creates a `CsvDataset` by reading and decoding CSV files. + + The elements of this dataset correspond to records from the file(s). + RFC 4180 format is expected for CSV files + (https://tools.ietf.org/html/rfc4180) + Note that we allow leading and trailing spaces with int or float field. + + + For example, suppose we have a file 'my_file0.csv' with four CSV columns of + different data types: + ``` + abcdefg,4.28E10,5.55E6,12 + hijklmn,-5.3E14,,2 + ``` + + We can construct a CsvDataset from it as follows: + + ```python + dataset = tf.data.experimental.CsvDataset( + "my_file*.csv", + [tf.float32, # Required field, use dtype or empty tensor + tf.constant([0.0], dtype=tf.float32), # Optional field, default to 0.0 + tf.int32, # Required field, use dtype or empty tensor + ], + select_cols=[1,2,3] # Only parse last three columns + ) + ``` + + The expected output of its iterations is: + + ```python + for element in dataset: + print(element) + + >> (4.28e10, 5.55e6, 12) + >> (-5.3e14, 0.0, 2) + ``` + + Args: + filenames: A `tf.string` tensor containing one or more filenames. + record_defaults: A list of default values for the CSV fields. Each item in + the list is either a valid CSV `DType` (float32, float64, int32, int64, + string), or a `Tensor` object with one of the above types. One per + column of CSV data, with either a scalar `Tensor` default value for the + column if it is optional, or `DType` or empty `Tensor` if required. If + both this and `select_columns` are specified, these must have the same + lengths, and `column_defaults` is assumed to be sorted in order of + increasing column index. If both this and 'exclude_cols' are specified, + the sum of lengths of record_defaults and exclude_cols should equal the + total number of columns in the CSV file. + compression_type: (Optional.) A `tf.string` scalar evaluating to one of + `""` (no compression), `"ZLIB"`, or `"GZIP"`. Defaults to no + compression. + buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes + to buffer while reading files. Defaults to 4MB. + header: (Optional.) A `tf.bool` scalar indicating whether the CSV file(s) + have header line(s) that should be skipped when parsing. Defaults to + `False`. + field_delim: (Optional.) A `tf.string` scalar containing the delimiter + character that separates fields in a record. Defaults to `","`. + use_quote_delim: (Optional.) A `tf.bool` scalar. If `False`, treats double + quotation marks as regular characters inside of string fields (ignoring + RFC 4180, Section 2, Bullet 5). Defaults to `True`. + na_value: (Optional.) A `tf.string` scalar indicating a value that will be + treated as NA/NaN. + select_cols: (Optional.) A sorted list of column indices to select from + the input data. If specified, only this subset of columns will be + parsed. Defaults to parsing all columns. At most one of `select_cols` + and `exclude_cols` can be specified. + """ + wrapped = CsvDatasetV2(filenames, record_defaults, compression_type, + buffer_size, header, field_delim, use_quote_delim, + na_value, select_cols) + super(CsvDatasetV1, self).__init__(wrapped) + + +@tf_export("data.experimental.make_batched_features_dataset", v1=[]) +def make_batched_features_dataset_v2(file_pattern, + batch_size, + features, + reader=None, + label_key=None, + reader_args=None, + num_epochs=None, + shuffle=True, + shuffle_buffer_size=10000, + shuffle_seed=None, + prefetch_buffer_size=None, + reader_num_threads=None, + parser_num_threads=None, + sloppy_ordering=False, + drop_final_batch=False): + """Returns a `Dataset` of feature dictionaries from `Example` protos. + + If label_key argument is provided, returns a `Dataset` of tuple + comprising of feature dictionaries and label. + + Example: + + ``` + serialized_examples = [ + features { + feature { key: "age" value { int64_list { value: [ 0 ] } } } + feature { key: "gender" value { bytes_list { value: [ "f" ] } } } + feature { key: "kws" value { bytes_list { value: [ "code", "art" ] } } } + }, + features { + feature { key: "age" value { int64_list { value: [] } } } + feature { key: "gender" value { bytes_list { value: [ "f" ] } } } + feature { key: "kws" value { bytes_list { value: [ "sports" ] } } } + } + ] + ``` + + We can use arguments: + + ``` + features: { + "age": FixedLenFeature([], dtype=tf.int64, default_value=-1), + "gender": FixedLenFeature([], dtype=tf.string), + "kws": VarLenFeature(dtype=tf.string), + } + ``` + + And the expected output is: + + ```python + { + "age": [[0], [-1]], + "gender": [["f"], ["f"]], + "kws": SparseTensor( + indices=[[0, 0], [0, 1], [1, 0]], + values=["code", "art", "sports"] + dense_shape=[2, 2]), + } + ``` + + Args: + file_pattern: List of files or patterns of file paths containing + `Example` records. See `tf.io.gfile.glob` for pattern rules. + batch_size: An int representing the number of records to combine + in a single batch. + features: A `dict` mapping feature keys to `FixedLenFeature` or + `VarLenFeature` values. See `tf.io.parse_example`. + reader: A function or class that can be + called with a `filenames` tensor and (optional) `reader_args` and returns + a `Dataset` of `Example` tensors. Defaults to `tf.data.TFRecordDataset`. + label_key: (Optional) A string corresponding to the key labels are stored in + `tf.Examples`. If provided, it must be one of the `features` key, + otherwise results in `ValueError`. + reader_args: Additional arguments to pass to the reader class. + num_epochs: Integer specifying the number of times to read through the + dataset. If None, cycles through the dataset forever. Defaults to `None`. + shuffle: A boolean, indicates whether the input should be shuffled. Defaults + to `True`. + shuffle_buffer_size: Buffer size of the ShuffleDataset. A large capacity + ensures better shuffling but would increase memory usage and startup time. + shuffle_seed: Randomization seed to use for shuffling. + prefetch_buffer_size: Number of feature batches to prefetch in order to + improve performance. Recommended value is the number of batches consumed + per training step. Defaults to auto-tune. + reader_num_threads: Number of threads used to read `Example` records. If >1, + the results will be interleaved. Defaults to `1`. + parser_num_threads: Number of threads to use for parsing `Example` tensors + into a dictionary of `Feature` tensors. Defaults to `2`. + sloppy_ordering: If `True`, reading performance will be improved at + the cost of non-deterministic ordering. If `False`, the order of elements + produced is deterministic prior to shuffling (elements are still + randomized if `shuffle=True`. Note that if the seed is set, then order + of elements after shuffling is deterministic). Defaults to `False`. + drop_final_batch: If `True`, and the batch size does not evenly divide the + input dataset size, the final smaller batch will be dropped. Defaults to + `False`. + + Returns: + A dataset of `dict` elements, (or a tuple of `dict` elements and label). + Each `dict` maps feature keys to `Tensor` or `SparseTensor` objects. + + Raises: + TypeError: If `reader` is of the wrong type. + ValueError: If `label_key` is not one of the `features` keys. + """ + if reader is None: + reader = core_readers.TFRecordDataset + + if reader_num_threads is None: + reader_num_threads = 1 + if parser_num_threads is None: + parser_num_threads = 2 + if prefetch_buffer_size is None: + prefetch_buffer_size = dataset_ops.AUTOTUNE + + # Create dataset of all matching filenames + dataset = dataset_ops.Dataset.list_files( + file_pattern, shuffle=shuffle, seed=shuffle_seed) + + if isinstance(reader, type) and issubclass(reader, io_ops.ReaderBase): + raise TypeError("The `reader` argument must return a `Dataset` object. " + "`tf.ReaderBase` subclasses are not supported. For " + "example, pass `tf.data.TFRecordDataset` instead of " + "`tf.TFRecordReader`.") + + # Read `Example` records from files as tensor objects. + if reader_args is None: + reader_args = [] + + if reader_num_threads == dataset_ops.AUTOTUNE: + dataset = dataset.interleave( + lambda filename: reader(filename, *reader_args), + num_parallel_calls=reader_num_threads) + options = options_lib.Options() + options.deterministic = not sloppy_ordering + dataset = dataset.with_options(options) + else: + # Read files sequentially (if reader_num_threads=1) or in parallel + def apply_fn(dataset): + return core_readers.ParallelInterleaveDataset( + dataset, + lambda filename: reader(filename, *reader_args), + cycle_length=reader_num_threads, + block_length=1, + sloppy=sloppy_ordering, + buffer_output_elements=None, + prefetch_input_elements=None) + + dataset = dataset.apply(apply_fn) + + # Extract values if the `Example` tensors are stored as key-value tuples. + if dataset_ops.get_legacy_output_types(dataset) == ( + dtypes.string, dtypes.string): + dataset = map_op._MapDataset( # pylint: disable=protected-access + dataset, lambda _, v: v, use_inter_op_parallelism=False) + + # Apply dataset repeat and shuffle transformations. + dataset = _maybe_shuffle_and_repeat( + dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed) + + # NOTE(mrry): We set `drop_remainder=True` when `num_epochs is None` to + # improve the shape inference, because it makes the batch dimension static. + # It is safe to do this because in that case we are repeating the input + # indefinitely, and all batches will be full-sized. + dataset = dataset.batch( + batch_size, drop_remainder=drop_final_batch or num_epochs is None) + + # Parse `Example` tensors to a dictionary of `Feature` tensors. + dataset = dataset.apply( + parsing_ops.parse_example_dataset( + features, num_parallel_calls=parser_num_threads)) + + if label_key: + if label_key not in features: + raise ValueError( + f"The `label_key` provided ({label_key}) must be one of the " + f"`features` keys: {features.keys()}.") + dataset = dataset.map(lambda x: (x, x.pop(label_key))) + + dataset = dataset.prefetch(prefetch_buffer_size) + return dataset + + +@tf_export(v1=["data.experimental.make_batched_features_dataset"]) +def make_batched_features_dataset_v1(file_pattern, # pylint: disable=missing-docstring + batch_size, + features, + reader=None, + label_key=None, + reader_args=None, + num_epochs=None, + shuffle=True, + shuffle_buffer_size=10000, + shuffle_seed=None, + prefetch_buffer_size=None, + reader_num_threads=None, + parser_num_threads=None, + sloppy_ordering=False, + drop_final_batch=False): + return dataset_ops.DatasetV1Adapter(make_batched_features_dataset_v2( + file_pattern, batch_size, features, reader, label_key, reader_args, + num_epochs, shuffle, shuffle_buffer_size, shuffle_seed, + prefetch_buffer_size, reader_num_threads, parser_num_threads, + sloppy_ordering, drop_final_batch)) +make_batched_features_dataset_v1.__doc__ = ( + make_batched_features_dataset_v2.__doc__) + + +def _get_file_names(file_pattern, shuffle): + """Parse list of file names from pattern, optionally shuffled. + + Args: + file_pattern: File glob pattern, or list of glob patterns. + shuffle: Whether to shuffle the order of file names. + + Returns: + List of file names matching `file_pattern`. + + Raises: + ValueError: If `file_pattern` is empty, or pattern matches no files. + """ + if isinstance(file_pattern, list): + if not file_pattern: + raise ValueError("Argument `file_pattern` should not be empty.") + file_names = [] + for entry in file_pattern: + file_names.extend(gfile.Glob(entry)) + else: + file_names = list(gfile.Glob(file_pattern)) + + if not file_names: + raise ValueError(f"No files match `file_pattern` {file_pattern}.") + + # Sort files so it will be deterministic for unit tests. + if not shuffle: + file_names = sorted(file_names) + return file_names + + +@tf_export("data.experimental.SqlDataset", v1=[]) +class SqlDatasetV2(dataset_ops.DatasetSource): + """A `Dataset` consisting of the results from a SQL query. + + `SqlDataset` allows a user to read data from the result set of a SQL query. + For example: + + ```python + dataset = tf.data.experimental.SqlDataset("sqlite", "/foo/bar.sqlite3", + "SELECT name, age FROM people", + (tf.string, tf.int32)) + # Prints the rows of the result set of the above query. + for element in dataset: + print(element) + ``` + """ + + def __init__(self, driver_name, data_source_name, query, output_types): + """Creates a `SqlDataset`. + + Args: + driver_name: A 0-D `tf.string` tensor containing the database type. + Currently, the only supported value is 'sqlite'. + data_source_name: A 0-D `tf.string` tensor containing a connection string + to connect to the database. + query: A 0-D `tf.string` tensor containing the SQL query to execute. + output_types: A tuple of `tf.DType` objects representing the types of the + columns returned by `query`. + """ + self._driver_name = ops.convert_to_tensor( + driver_name, dtype=dtypes.string, name="driver_name") + self._data_source_name = ops.convert_to_tensor( + data_source_name, dtype=dtypes.string, name="data_source_name") + self._query = ops.convert_to_tensor( + query, dtype=dtypes.string, name="query") + self._element_spec = nest.map_structure( + lambda dtype: tensor_spec.TensorSpec([], dtype), output_types) + variant_tensor = gen_experimental_dataset_ops.sql_dataset( + self._driver_name, self._data_source_name, self._query, + **self._flat_structure) + super(SqlDatasetV2, self).__init__(variant_tensor) + + @property + def element_spec(self): + return self._element_spec + + +@tf_export(v1=["data.experimental.SqlDataset"]) +class SqlDatasetV1(dataset_ops.DatasetV1Adapter): + """A `Dataset` consisting of the results from a SQL query.""" + + @functools.wraps(SqlDatasetV2.__init__) + def __init__(self, driver_name, data_source_name, query, output_types): + wrapped = SqlDatasetV2(driver_name, data_source_name, query, output_types) + super(SqlDatasetV1, self).__init__(wrapped) + + +if tf2.enabled(): + CsvDataset = CsvDatasetV2 + SqlDataset = SqlDatasetV2 + make_batched_features_dataset = make_batched_features_dataset_v2 + make_csv_dataset = make_csv_dataset_v2 +else: + CsvDataset = CsvDatasetV1 + SqlDataset = SqlDatasetV1 + make_batched_features_dataset = make_batched_features_dataset_v1 + make_csv_dataset = make_csv_dataset_v1 + + +def _tf2_callback(): + global CsvDataset, SqlDataset, make_batched_features_dataset, make_csv_dataset + if tf2.enabled(): + CsvDataset = CsvDatasetV2 + SqlDataset = SqlDatasetV2 + make_batched_features_dataset = make_batched_features_dataset_v2 + make_csv_dataset = make_csv_dataset_v2 + else: + CsvDataset = CsvDatasetV1 + SqlDataset = SqlDatasetV1 + make_batched_features_dataset = make_batched_features_dataset_v1 + make_csv_dataset = make_csv_dataset_v1 + + +v2_compat.register_data_v2_callback(_tf2_callback) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/resampling.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/resampling.py new file mode 100644 index 0000000000000000000000000000000000000000..e233adc9b22c4398d7a8beae60c358df0f7d7c2d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/resampling.py @@ -0,0 +1,50 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Resampling dataset transformations.""" +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +@deprecation.deprecated(None, "Use `tf.data.Dataset.rejection_resample(...)`.") +@tf_export("data.experimental.rejection_resample") +def rejection_resample(class_func, target_dist, initial_dist=None, seed=None): + """A transformation that resamples a dataset to achieve a target distribution. + + **NOTE** Resampling is performed via rejection sampling; some fraction + of the input values will be dropped. + + Args: + class_func: A function mapping an element of the input dataset to a scalar + `tf.int32` tensor. Values should be in `[0, num_classes)`. + target_dist: A floating point type tensor, shaped `[num_classes]`. + initial_dist: (Optional.) A floating point type tensor, shaped + `[num_classes]`. If not provided, the true class distribution is + estimated live in a streaming fashion. + seed: (Optional.) Python integer seed for the resampler. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + """ + def _apply_fn(dataset): + """Function from `Dataset` to `Dataset` that applies the transformation.""" + + return dataset.rejection_resample( + class_func=class_func, + target_dist=target_dist, + initial_dist=initial_dist, + seed=seed) + + return _apply_fn diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/shuffle_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/shuffle_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..ba4d36578020df20d75eb5da9ffcdf629aba9f0f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/shuffle_ops.py @@ -0,0 +1,272 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Experimental shuffle ops.""" + +import functools +import numpy as np + +from tensorflow.python.data.experimental.ops import random_access +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.data.util import random_seed +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gen_dataset_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import stateless_random_ops +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +class _ShuffleAndRepeatDataset(dataset_ops.UnaryUnchangedStructureDataset): + """A `Dataset` that fuses `shuffle` and `repeat`.""" + + def __init__(self, input_dataset, buffer_size, count=None, seed=None): + self._input_dataset = input_dataset + self._buffer_size = ops.convert_to_tensor( + buffer_size, dtype=dtypes.int64, name="buffer_size") + if count is None: + self._count = constant_op.constant(-1, dtype=dtypes.int64, name="count") + else: + self._count = ops.convert_to_tensor( + count, dtype=dtypes.int64, name="count") + self._seed, self._seed2 = random_seed.get_seed(seed) + variant_tensor = gen_dataset_ops.shuffle_and_repeat_dataset( + self._input_dataset._variant_tensor, # pylint: disable=protected-access + buffer_size=self._buffer_size, + count=self._count, + seed=self._seed, + seed2=self._seed2, + **self._flat_structure) + super(_ShuffleAndRepeatDataset, self).__init__(input_dataset, + variant_tensor) + + +@deprecation.deprecated( + None, "Use `tf.data.Dataset.shuffle(buffer_size, seed)` followed by " + "`tf.data.Dataset.repeat(count)`. Static tf.data optimizations will take " + "care of using the fused implementation.") +@tf_export("data.experimental.shuffle_and_repeat") +def shuffle_and_repeat(buffer_size, count=None, seed=None): + """Shuffles and repeats a Dataset, reshuffling with each repetition. + + >>> d = tf.data.Dataset.from_tensor_slices([1, 2, 3]) + >>> d = d.apply(tf.data.experimental.shuffle_and_repeat(2, count=2)) + >>> [elem.numpy() for elem in d] # doctest: +SKIP + [2, 3, 1, 1, 3, 2] + + ```python + dataset.apply( + tf.data.experimental.shuffle_and_repeat(buffer_size, count, seed)) + ``` + + produces the same output as + + ```python + dataset.shuffle( + buffer_size, seed=seed, reshuffle_each_iteration=True).repeat(count) + ``` + + In each repetition, this dataset fills a buffer with `buffer_size` elements, + then randomly samples elements from this buffer, replacing the selected + elements with new elements. For perfect shuffling, set the buffer size equal + to the full size of the dataset. + + For instance, if your dataset contains 10,000 elements but `buffer_size` is + set to 1,000, then `shuffle` will initially select a random element from + only the first 1,000 elements in the buffer. Once an element is selected, + its space in the buffer is replaced by the next (i.e. 1,001-st) element, + maintaining the 1,000 element buffer. + + Args: + buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the maximum + number elements that will be buffered when prefetching. + count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the number + of times the dataset should be repeated. The default behavior (if `count` + is `None` or `-1`) is for the dataset be repeated indefinitely. + seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random + seed that will be used to create the distribution. See + `tf.random.set_seed` for behavior. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + """ + + def _apply_fn(dataset): # pylint: disable=missing-docstring + return _ShuffleAndRepeatDataset(dataset, buffer_size, count, seed) + + return _apply_fn + + +def _process_file_infos(file_infos): + """Computes aggregate information about files to read. + + The method collects information about the files to read, the total number of + elements, and arrays that can be used to account for elements to be skipped, + which can be specified via the "skip" and "take" keys. + + To account for elements to skip, the range of each file can be divided into + three regions: + - S (elements to skip) + - T (elements to read) + - R (remainder of elements that will also be skipped) + + The `thresholds` and `offsets` arrays are initialized as follows: + `thresholds = [0, T_1, T_1 + T_2, ...]` and + `offsets = [S_1, S_1 + R_1 + S_2, S_1 + R_1 + S_2 + R_2 + S_3, ...]` + + This makes it possible to map an index from a contiguous range + `(0...num_elements_to_read)` to an index in the range of all elements, + skipping over elements as per the "skip" and "take" keys values. In + particular, for a given input index `X`, we find the greatest `thresholds` + value that is smaller or equal to `X`. Let `t(X)` denotes such index in the + `thresholds` array. The output index is computed as `X + offsets[t(X)]`. + + Args: + file_infos: See `file_infos` argument of `index_shuffle` for details. + + Returns: + A dictionary containing the following keys: + - `files`, the vector of pathnames of files to read + - `num_elements`, an integer identifying the total number of elements + - `offsets`, the vector of offsets to use for index adjustment (in case + any elements should be skipped) + - `thresholds`, the vector of thresholds to use for index adjustment (in + case any elements should be skipped) + """ + files = [] + num_elements = 0 + offsets = np.int64([]) + offset_sum = 0 + thresholds = np.int64([]) + threshold_sum = 0 + adjustment_needed = False + for file_info in file_infos: + files.append(file_info["path"]) + skip = 0 + if "skip" in file_info: + if file_info["skip"] < -1: + raise ValueError("`skip` should be greater than `-1` but got {}".format( + file_info["skip"])) + if file_info["skip"] == -1: + skip = file_info["num_elements"] + else: + skip = min(file_info["skip"], file_info["num_elements"]) + take = file_info["num_elements"] - skip + if "take" in file_info: + if file_info["take"] < -1: + raise ValueError("`take` should be greater than `-1` but got {}".format( + file_info["take"])) + # `file_info["take"] == -1` is a no-op + if file_info["take"] != -1: + take = min(file_info["take"], take) + remainder = file_info["num_elements"] - skip - take + if take != file_info["num_elements"]: + adjustment_needed = True + num_elements += take + offsets = np.append(offsets, offset_sum + skip) + offset_sum += skip + remainder + thresholds = np.append(thresholds, threshold_sum) + threshold_sum += take + result = {"files": files, "num_elements": num_elements} + if adjustment_needed: + result["offsets"] = offsets + result["thresholds"] = thresholds + return result + + +def _adjust_index(index, thresholds, offsets): + """Adjusts index to account for elements to be skipped.""" + t_index = array_ops.shape( + array_ops.boolean_mask( + thresholds, + math_ops.less_equal(thresholds, index)))[0] - 1 + return index + array_ops.gather(offsets, t_index) + + +# TODO(jsimsa): Expose this method in the public API. When we do, consider +# defining `FileInfo` as a public API to encapsulate the information provided +# through the `file_infos` argument. +def index_shuffle(file_infos, + reader_factory, + seed=None, + reshuffle_each_iteration=False, + num_parallel_calls=dataset_ops.AUTOTUNE): + """Creates a (globally) shuffled dataset from the given set of files. + + Unlike `tf.data.Dataset.shuffle()`, which uses an in-memory buffer to shuffle + elements of input dataset in a streaming fashion, + `tf.data.experimental.index_shuffle()` performs a global shuffle of element + indices and then reads the data in a shuffled order. The advantage of + `index_shuffle()` is that it can perform global shuffle of datasets that do + not fit into memory (as long as the array of their indices does) and that the + shuffling logic it provides is compatible with symbolic checkpointing. The + disadvantage of `index_shuffle()` is that reading data in a shuffled random + order will in general not be as efficient as reading data sequentially. + + Args: + file_infos: A list of dictionaries that describe each file of the input + dataset. Each dictionary is expected to contain the "path" key, which + identifies the path of the file and the "num_elements" key, which + identifies the number of elements in the file. In addition, the "skip" + and "take" keys can be used to identify the number of elements to skip + and take respectively. By default, no elements are skipped and all + elements are taken. + reader_factory: A function that maps a sequence of filenames to an instance + of `tf.data.Dataset` that reads data from the files. + seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random + seed that will be used to shuffle the order of elements. Default to + non-deterministic seed. + reshuffle_each_iteration: (Optional.) A `tf.bool` scalar `tf.Tensor`, that + determines whether to change the shuffle order each iteration. Defaults to + `False`. + num_parallel_calls: (Optional.) A `tf.int64` scalar `tf.Tensor`, that + determines the maximum number of random access operations to perform + in parallel. By default, the tf.data runtime uses autotuning to determine + the value dynamically. + + Returns: + A `tf.data.Dataset` object, representing a globally shuffled dataset of + the input data. + """ + + result = _process_file_infos(file_infos) + + def sequential_index_shuffle(seeds): + dataset = dataset_ops.Dataset.range(result["num_elements"]) + + def read_element(dataset, index): + # 1) Shuffle the index. + shuffled_index = stateless_random_ops.index_shuffle( + index, seeds, result["num_elements"] - 1) + # 2) If needed, adjust the index to the non-contiguous range. + if "thresholds" in result and "offsets" in result: + shuffled_index = _adjust_index(shuffled_index, result["thresholds"], + result["offsets"]) + # 3) Perform the read. + return random_access.at(dataset, shuffled_index) + + # We evaluate `reader_factory()` eagerly to prevent the dataset from being + # created on every lookup. + map_func = functools.partial(read_element, reader_factory(result["files"])) + return dataset.map(map_func, num_parallel_calls=num_parallel_calls) + + rng_ds = dataset_ops.Dataset.random( + seed=seed, + rerandomize_each_iteration=reshuffle_each_iteration) + rng_ds = rng_ds.take(2).batch(2, drop_remainder=True) + return rng_ds.flat_map(sequential_index_shuffle) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/take_while_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/take_while_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..08324782dbfaae3ced6d11e44d2378fa71f4610d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/take_while_ops.py @@ -0,0 +1,38 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""take-while dataset transformation.""" +from tensorflow.python.util import deprecation +from tensorflow.python.util.tf_export import tf_export + + +@deprecation.deprecated(None, "Use `tf.data.Dataset.take_while(...)") +@tf_export("data.experimental.take_while") +def take_while(predicate): + """A transformation that stops dataset iteration based on a `predicate`. + + Args: + predicate: A function that maps a nested structure of tensors (having shapes + and types defined by `self.output_shapes` and `self.output_types`) to a + scalar `tf.bool` tensor. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + """ + + def _apply_fn(dataset): + return dataset.take_while(predicate=predicate) + + return _apply_fn diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/testing.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..e164bbac1cb7286967aad39157694387fd7edbd2 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/testing.py @@ -0,0 +1,198 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Experimental API for testing of tf.data.""" +from google.protobuf import text_format +from tensorflow.core.framework import attr_value_pb2 +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import gen_experimental_dataset_ops + + +def assert_next(transformations): + """A transformation that asserts which transformations happen next. + + Transformations should be referred to by their base name, not including + version suffix. For example, use "Batch" instead of "BatchV2". "Batch" will + match any of "Batch", "BatchV1", "BatchV2", etc. + + Args: + transformations: A `tf.string` vector `tf.Tensor` identifying the + transformations that are expected to happen next. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + """ + + def _apply_fn(dataset): + """Function from `Dataset` to `Dataset` that applies the transformation.""" + return _AssertNextDataset(dataset, transformations) + + return _apply_fn + + +def assert_prev(transformations): + r"""Asserts which transformations, with which attributes, happened previously. + + Each transformation is repesented as a tuple in the input. + + The first element is the base op name of the transformation, not including + version suffix. For example, use "BatchDataset" instead of + "BatchDatasetV2". "BatchDataset" will match any of "BatchDataset", + "BatchDatasetV1", "BatchDatasetV2", etc. + + The second element is a dict of attribute name-value pairs. Attributes + values must be of type bool, int, or string. + + Example usage: + + >>> dataset_ops.Dataset.from_tensors(0) \ + ... .map(lambda x: x) \ + ... .batch(1, deterministic=True, num_parallel_calls=8) \ + ... .assert_prev([("ParallelBatchDataset", {"deterministic": True}), \ + ... ("MapDataset", {})]) + + Args: + transformations: A list of tuples identifying the (required) transformation + name, with (optional) attribute name-value pairs, that are expected to + have happened previously. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + """ + + def _apply_fn(dataset): + """Function from `Dataset` to `Dataset` that applies the transformation.""" + return _AssertPrevDataset(dataset, transformations) + + return _apply_fn + + +def non_serializable(): + """A non-serializable identity transformation. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + """ + + def _apply_fn(dataset): + """Function from `Dataset` to `Dataset` that applies the transformation.""" + return _NonSerializableDataset(dataset) + + return _apply_fn + + +def sleep(sleep_microseconds): + """Sleeps for `sleep_microseconds` before producing each input element. + + Args: + sleep_microseconds: The number of microseconds to sleep before producing an + input element. + + Returns: + A `Dataset` transformation function, which can be passed to + `tf.data.Dataset.apply`. + """ + + def _apply_fn(dataset): + return _SleepDataset(dataset, sleep_microseconds) + + return _apply_fn + + +class _AssertNextDataset(dataset_ops.UnaryUnchangedStructureDataset): + """A `Dataset` that asserts which transformations happen next.""" + + def __init__(self, input_dataset, transformations): + """See `assert_next()` for details.""" + self._input_dataset = input_dataset + if transformations is None: + raise ValueError( + "Invalid `transformations`. `transformations` should not be empty.") + + self._transformations = ops.convert_to_tensor( + transformations, dtype=dtypes.string, name="transformations") + variant_tensor = ( + gen_experimental_dataset_ops.experimental_assert_next_dataset( + self._input_dataset._variant_tensor, # pylint: disable=protected-access + self._transformations, + **self._flat_structure)) + super(_AssertNextDataset, self).__init__(input_dataset, variant_tensor) + + +class _AssertPrevDataset(dataset_ops.UnaryUnchangedStructureDataset): + """A `Dataset` that asserts which transformations happened previously.""" + + def __init__(self, input_dataset, transformations): + """See `assert_prev()` for details.""" + self._input_dataset = input_dataset + if transformations is None: + raise ValueError("`transformations` cannot be empty") + + def serialize_transformation(op_name, attributes): + proto = attr_value_pb2.NameAttrList(name=op_name) + if attributes is None or isinstance(attributes, set): + attributes = dict() + for (name, value) in attributes.items(): + if isinstance(value, bool): + proto.attr[name].b = value + elif isinstance(value, int): + proto.attr[name].i = value + elif isinstance(value, str): + proto.attr[name].s = value.encode() + else: + raise ValueError( + f"attribute value type ({type(value)}) must be bool, int, or str") + return text_format.MessageToString(proto) + + self._transformations = ops.convert_to_tensor( + [serialize_transformation(*x) for x in transformations], + dtype=dtypes.string, + name="transformations") + variant_tensor = ( + gen_experimental_dataset_ops.assert_prev_dataset( + self._input_dataset._variant_tensor, # pylint: disable=protected-access + self._transformations, + **self._flat_structure)) + super(_AssertPrevDataset, self).__init__(input_dataset, variant_tensor) + + +class _NonSerializableDataset(dataset_ops.UnaryUnchangedStructureDataset): + """A `Dataset` that performs non-serializable identity transformation.""" + + def __init__(self, input_dataset): + """See `non_serializable()` for details.""" + self._input_dataset = input_dataset + variant_tensor = ( + gen_experimental_dataset_ops.experimental_non_serializable_dataset( + self._input_dataset._variant_tensor, # pylint: disable=protected-access + **self._flat_structure)) + super(_NonSerializableDataset, self).__init__(input_dataset, variant_tensor) + + +class _SleepDataset(dataset_ops.UnaryUnchangedStructureDataset): + """A `Dataset` that sleeps before producing each upstream element.""" + + def __init__(self, input_dataset, sleep_microseconds): + self._input_dataset = input_dataset + self._sleep_microseconds = sleep_microseconds + variant_tensor = gen_experimental_dataset_ops.sleep_dataset( + self._input_dataset._variant_tensor, # pylint: disable=protected-access + self._sleep_microseconds, + **self._flat_structure) + super(_SleepDataset, self).__init__(input_dataset, variant_tensor) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d21fa96c81d1d2dc84d3fc581992dcf87fa29ed Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/__pycache__/cluster.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/__pycache__/cluster.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ecff1250f1c3d8d9940c931001e001dc92f1e271 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/__pycache__/cluster.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/__pycache__/item.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/__pycache__/item.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47dfe175c1e430bb720b26bfae08fec1dff298af Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/__pycache__/item.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/__pycache__/tf_optimizer.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/__pycache__/tf_optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce528c878c7e897faeec2ff9b59651391094bbdd Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/__pycache__/tf_optimizer.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/_pywrap_tf_cluster.pyi b/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/_pywrap_tf_cluster.pyi new file mode 100644 index 0000000000000000000000000000000000000000..fa2a1086cac2526c0751f090625210d87d1c1b6e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/_pywrap_tf_cluster.pyi @@ -0,0 +1,27 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +class Cluster: + def __init__(self, *args, **kwargs) -> None: ... + +def TF_DeterminePeakMemoryUsage(arg0, arg1: Cluster) -> dict[str,tuple[int,list[tuple[str,int,int,int,int]]]]: ... +def TF_EstimatePerformance(arg0: bytes) -> float: ... +def TF_GetSupportedDevices(arg0: Cluster, arg1) -> dict[str,list[str]]: ... +def TF_ListAvailableOps() -> list[str]: ... +def TF_ListDevices(arg0: Cluster) -> list[bytes]: ... +def TF_MeasureCosts(arg0, arg1: Cluster, arg2: bool) -> tuple[list[bytes],float,bytes]: ... +def TF_NewCluster(arg0: bool, arg1: bool) -> Cluster: ... +def TF_NewVirtualCluster(arg0: list[bytes]) -> Cluster: ... +def TF_ShutdownCluster(arg0: Cluster) -> None: ... diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/_pywrap_tf_item.pyi b/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/_pywrap_tf_item.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a087325eb642c0fbc9f2d6353d5c578a3856b2d0 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/_pywrap_tf_item.pyi @@ -0,0 +1,22 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +class GrapplerItem: + def __init__(self, *args, **kwargs) -> None: ... + +def TF_GetColocationGroups(arg0: GrapplerItem) -> list[list[str]]: ... +def TF_GetOpProperties(arg0: GrapplerItem) -> dict[str,list[bytes]]: ... +def TF_IdentifyImportantOps(arg0: GrapplerItem, arg1: bool) -> list[str]: ... +def TF_NewItem(arg0: bytes, arg1: bool, arg2: bool) -> GrapplerItem: ... diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/_pywrap_tf_optimizer.pyi b/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/_pywrap_tf_optimizer.pyi new file mode 100644 index 0000000000000000000000000000000000000000..9eb2d0e7393c6f4d5e84e7d24b9a85c4e243b387 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/_pywrap_tf_optimizer.pyi @@ -0,0 +1,19 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from typing import Any + +def TF_OptimizeGraph(*args, **kwargs) -> Any: ... +def TF_OptimizeGraphSerialized(arg0, arg1: str, arg2: str, arg3: bool, arg4: str, arg5: bool) -> bytes: ... diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/cluster.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/cluster.py new file mode 100644 index 0000000000000000000000000000000000000000..9e3c2eaad9fc2dc001810e279a917b4c5afa21d5 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/cluster.py @@ -0,0 +1,118 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A python interface for Grappler clusters.""" + +import contextlib + +from tensorflow.core.framework import step_stats_pb2 +from tensorflow.core.grappler.costs import op_performance_data_pb2 +from tensorflow.core.protobuf import device_properties_pb2 +from tensorflow.python.grappler import _pywrap_tf_cluster as tf_cluster + + +class Cluster(object): + """Grappler Clusters.""" + + def __init__(self, + allow_soft_placement=True, + disable_detailed_stats=True, + disable_timeline=True, + devices=None): + """Creates a Cluster. + + Args: + allow_soft_placement: If True, TF will automatically fix illegal + placements instead of erroring out if the placement isn't legal. + disable_detailed_stats: If True, detailed statistics will not be + available. + disable_timeline: If True, the timeline information will not be reported. + devices: A list of devices of type device_properties_pb2.NamedDevice. + If None, a device list will be created based on the spec of + the local machine. + """ + self._tf_cluster = None + self._generate_timeline = not disable_timeline + + if devices is None: + self._tf_cluster = tf_cluster.TF_NewCluster(allow_soft_placement, + disable_detailed_stats) + else: + devices_serialized = [device.SerializeToString() for device in devices] + self._tf_cluster = tf_cluster.TF_NewVirtualCluster(devices_serialized) + + def Shutdown(self): + if self._tf_cluster is not None: + tf_cluster.TF_ShutdownCluster(self._tf_cluster) + self._tf_cluster = None + + def __del__(self): + self.Shutdown() + + @property + def tf_cluster(self): + return self._tf_cluster + + def ListDevices(self): + """Returns a list of available hardware devices.""" + if self._tf_cluster is None: + return [] + return [device_properties_pb2.NamedDevice.FromString(device) + for device in tf_cluster.TF_ListDevices(self._tf_cluster)] + + def ListAvailableOps(self): + """Returns a list of all available operations (sorted alphabetically).""" + return tf_cluster.TF_ListAvailableOps() + + def GetSupportedDevices(self, item): + return tf_cluster.TF_GetSupportedDevices(self._tf_cluster, item.tf_item) + + def EstimatePerformance(self, device): + return tf_cluster.TF_EstimatePerformance(device.SerializeToString()) + + def MeasureCosts(self, item): + """Returns the cost of running the specified item. + + Args: + item: The item for which to measure the costs. + Returns: The triplet op_perfs, runtime, step_stats. + """ + op_perf_bytes_list, run_time, step_stats_bytes = tf_cluster.TF_MeasureCosts( + item.tf_item, self._tf_cluster, self._generate_timeline) + + op_perfs = [op_performance_data_pb2.OpPerformance.FromString(op_perf_bytes) + for op_perf_bytes in op_perf_bytes_list] + return (op_perfs, run_time, + step_stats_pb2.StepStats.FromString(step_stats_bytes)) + + def DeterminePeakMemoryUsage(self, item): + """Returns a snapshot of the peak memory usage. + + Args: + item: The item for which to measure the costs. + Returns: A hashtable indexed by device name. + """ + return tf_cluster.TF_DeterminePeakMemoryUsage(item.tf_item, + self._tf_cluster) + + +@contextlib.contextmanager +def Provision(allow_soft_placement=True, + disable_detailed_stats=True, + disable_timeline=True, + devices=None): + cluster = Cluster(allow_soft_placement, disable_detailed_stats, + disable_timeline, devices) + yield cluster + cluster.Shutdown() diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/item.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/item.py new file mode 100644 index 0000000000000000000000000000000000000000..d5f29606b8cd824c88ab9ebe2f167b96b96cf0fc --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/item.py @@ -0,0 +1,90 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""A python interface for Grappler items.""" + +from tensorflow.core.grappler.costs import op_performance_data_pb2 +from tensorflow.core.protobuf import meta_graph_pb2 +from tensorflow.python.grappler import _pywrap_tf_item as tf_item + + +class Item(object): + """GrapplerItem.""" + + def __init__(self, + metagraph, + ignore_colocation=True, + ignore_user_placement=False): + """Creates an Item. + + Args: + metagraph: a TensorFlow metagraph. + ignore_colocation: if set, the tool will ignore all the colocation + constraints generated by TensorFlow. + ignore_user_placement: if set, all the placement annotations annotated in + the metagraph will be ignored. + Raises: + ValueError: the metagraph is incomplete or invalid. + """ + self._metagraph = metagraph + self._item_graph = meta_graph_pb2.MetaGraphDef() + self._item_graph.CopyFrom(metagraph) + self._ignore_colocation = ignore_colocation + self._ignore_user_placement = ignore_user_placement + self._tf_item = None + self._BuildTFItem() + + def IdentifyImportantOps(self, sort_topologically=False): + return tf_item.TF_IdentifyImportantOps(self.tf_item, sort_topologically) + + def GetOpProperties(self): + """Get Op properties.""" + props = tf_item.TF_GetOpProperties(self.tf_item) + properties = {} + for key, values in props.items(): + prop = [] + for value in values: + # TODO(petebu): Make this conversion to a dictionary be done in the C++ + # wrapper for performance. + prop.append( + op_performance_data_pb2.OpInfo.TensorProperties.FromString(value)) + properties[key] = prop + return properties + + def GetColocationGroups(self): + """Return a list of hard colocation constraints. + + All the nodes in a colocation tuple must be placed on the same device for + the model to work. + + Returns: + A list of colocation tuples. + """ + return tf_item.TF_GetColocationGroups(self.tf_item) + + @property + def metagraph(self): + return self._metagraph + + @property + def tf_item(self): + if self._item_graph != self._metagraph: + self._BuildTFItem() + self._item_graph.CopyFrom(self._metagraph) + return self._tf_item + + def _BuildTFItem(self): + self._tf_item = tf_item.TF_NewItem(self._metagraph.SerializeToString(), + self._ignore_colocation, + self._ignore_user_placement) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/tf_optimizer.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/tf_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..0b07cb5abc3ee2b9dbaaad1010e143b0c9d6e66f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/grappler/tf_optimizer.py @@ -0,0 +1,91 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= +"""Provides a proper python API for the symbols exported through swig.""" + +import threading + +from tensorflow.core.framework import graph_pb2 +from tensorflow.core.protobuf import config_pb2 +from tensorflow.python.grappler import _pywrap_tf_optimizer as tf_opt +from tensorflow.python.grappler import cluster as gcluster + +_OPTIMIZE_GRAPH_CLUSTER_LOCK = threading.Lock() +is_oss = True # Updated by copybara. + + +def OptimizeGraph(config_proto, + metagraph, + verbose=True, + graph_id=b'graph_to_optimize', + cluster=None, + strip_default_attributes=False): + """Optimize the provided metagraph. + + For best results, the signature_def field in `metagraph` should be populated + with information about input (feed) and output (fetch) tensors. + + Args: + config_proto: a ConfigProto protobuf. + metagraph: a MetagraphDef protobuf. + verbose: whether to log optimization results. + graph_id: a string identifying this graph. + cluster: a grappler cluster object representing hardware resources + available to run this graph. + strip_default_attributes: whether graph node attributes having default + values should be removed after all the optimization passes. This + option is useful if the resulting graph will be executed by an older + process that might not know some of the recently added attributes. + """ + if not isinstance(config_proto, config_pb2.ConfigProto): + raise TypeError('Argument `config_proto` should be a tf.ConfigProto, ' + f'received type: {type(config_proto).__name__}') + if is_oss: + optimize_method = tf_opt.TF_OptimizeGraphSerialized + metagraph = metagraph.SerializeToString() + else: + optimize_method = tf_opt.TF_OptimizeGraph + + if cluster is not None: + out_graph = optimize_method( + cluster.tf_cluster, + config_proto.SerializeToString(), + metagraph, + verbose, + graph_id, + strip_default_attributes, + ) + else: + # Currently Grappler assumes no more than 1 sessions alive globally. + # See comments on SingleMachine::Provision(), hence we use the following + # lock to prevent concurrent access to the following code. + with _OPTIMIZE_GRAPH_CLUSTER_LOCK: + cluster = gcluster.Cluster() + try: + out_graph = optimize_method( + cluster.tf_cluster, + config_proto.SerializeToString(), + metagraph, + verbose, + graph_id, + strip_default_attributes, + ) + finally: + # Force the cleanup instead of waiting on python GC to cleanup the + # temporary cluster we've created. Otherwise subsequent calls might + # not have a clean slate because GC may not have run yet. + cluster.Shutdown() + if is_oss: + out_graph = graph_pb2.GraphDef.FromString(out_graph) + return out_graph diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_nn_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_nn_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7edd11f69dddeccb874e1a6957fc44a8b3f64fe7 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_nn_ops.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9bbadc047b17a5efcbaef204352a54d18531597a571a4cb23621160f34ebaad5 +size 368792 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..377b2237dd5a91a1cd676e023d4d28818440bf7b Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/builder.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eaa177d374977d47d1168cbab5e31547c139c322 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/builder.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/loader_impl.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/loader_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98a515966719c34e2c1df159523a418da32c7902 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/loader_impl.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/main_op.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/main_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf040e572857bb93948051308da8813a278ce43d Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/main_op.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/signature_serialization.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/signature_serialization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fdc3088cc5883c72a239e2ecf3ac13baf7307f11 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/signature_serialization.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/tracing_utils.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/tracing_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3300f4488493836a3c8b1afb6713634934c416f Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/tracing_utils.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/utils.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fc1af607539581ef7292fa92a287055e124e656 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/__pycache__/utils.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/registration/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/registration/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..976e148381d796bceff2b7bf041f076b5e62e45d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/registration/__init__.py @@ -0,0 +1,49 @@ +# Copyright 2022 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for registering the saving/loading steps for advanced objects.""" + +from tensorflow.python.saved_model.registration.registration import get_registered_class +from tensorflow.python.saved_model.registration.registration import get_registered_class_name +from tensorflow.python.saved_model.registration.registration import get_registered_saver_name +from tensorflow.python.saved_model.registration.registration import get_restore_function +from tensorflow.python.saved_model.registration.registration import get_save_function +from tensorflow.python.saved_model.registration.registration import get_strict_predicate_restore + +# These are currently an evolving feature. Use with care. +from tensorflow.python.saved_model.registration.registration import register_checkpoint_saver +from tensorflow.python.saved_model.registration.registration import register_serializable + +from tensorflow.python.saved_model.registration.registration import RegisteredSaver +from tensorflow.python.saved_model.registration.registration import validate_restore_function + + +def register_tf_serializable(name=None, predicate=None): + """See the docstring for `register_serializable`.""" + return register_serializable(package="tf", name=name, predicate=predicate) + + +def register_tf_checkpoint_saver(name=None, + predicate=None, + save_fn=None, + restore_fn=None, + strict_predicate_restore=True): + """See the docstring for `register_checkpoint_saver`.""" + return register_checkpoint_saver( + package="tf", + name=name, + predicate=predicate, + save_fn=save_fn, + restore_fn=restore_fn, + strict_predicate_restore=strict_predicate_restore) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/registration/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/registration/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24a5561c798913a9b504f70067a673e7cdc14054 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/registration/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/registration/registration.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/registration/registration.py new file mode 100644 index 0000000000000000000000000000000000000000..11d8a9e0120fab4286a4669bbeab186c47a51e2e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/saved_model/registration/registration.py @@ -0,0 +1,391 @@ +# Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Serialization Registration for SavedModel. + +revived_types registration will be migrated to this infrastructure. + +See the Advanced saving section in go/savedmodel-configurability. +This API is approved for TF internal use only. +""" +import collections +import re + +from absl import logging + +from tensorflow.python.util import tf_inspect + + +# Only allow valid file/directory characters +_VALID_REGISTERED_NAME = re.compile(r"^[a-zA-Z0-9._-]+$") + + +class _PredicateRegistry(object): + """Registry with predicate-based lookup. + + See the documentation for `register_checkpoint_saver` and + `register_serializable` for reasons why predicates are required over a + class-based registry. + + Since this class is used for global registries, each object must be registered + to unique names (an error is raised if there are naming conflicts). The lookup + searches the predicates in reverse order, so that later-registered predicates + are executed first. + """ + __slots__ = ("_registry_name", "_registered_map", "_registered_predicates", + "_registered_names") + + def __init__(self, name): + self._registry_name = name + # Maps registered name -> object + self._registered_map = {} + # Maps registered name -> predicate + self._registered_predicates = {} + # Stores names in the order of registration + self._registered_names = [] + + @property + def name(self): + return self._registry_name + + def register(self, package, name, predicate, candidate): + """Registers a candidate object under the package, name and predicate.""" + if not isinstance(package, str) or not isinstance(name, str): + raise TypeError( + f"The package and name registered to a {self.name} must be strings, " + f"got: package={type(package)}, name={type(name)}") + if not callable(predicate): + raise TypeError( + f"The predicate registered to a {self.name} must be callable, " + f"got: {type(predicate)}") + registered_name = package + "." + name + if not _VALID_REGISTERED_NAME.match(registered_name): + raise ValueError( + f"Invalid registered {self.name}. Please check that the package and " + f"name follow the regex '{_VALID_REGISTERED_NAME.pattern}': " + f"(package='{package}', name='{name}')") + if registered_name in self._registered_map: + raise ValueError( + f"The name '{registered_name}' has already been registered to a " + f"{self.name}. Found: {self._registered_map[registered_name]}") + + self._registered_map[registered_name] = candidate + self._registered_predicates[registered_name] = predicate + self._registered_names.append(registered_name) + + def lookup(self, obj): + """Looks up the registered object using the predicate. + + Args: + obj: Object to pass to each of the registered predicates to look up the + registered object. + Returns: + The object registered with the first passing predicate. + Raises: + LookupError if the object does not match any of the predicate functions. + """ + return self._registered_map[self.get_registered_name(obj)] + + def name_lookup(self, registered_name): + """Looks up the registered object using the registered name.""" + try: + return self._registered_map[registered_name] + except KeyError: + raise LookupError(f"The {self.name} registry does not have name " + f"'{registered_name}' registered.") + + def get_registered_name(self, obj): + for registered_name in reversed(self._registered_names): + predicate = self._registered_predicates[registered_name] + if predicate(obj): + return registered_name + raise LookupError(f"Could not find matching {self.name} for {type(obj)}.") + + def get_predicate(self, registered_name): + try: + return self._registered_predicates[registered_name] + except KeyError: + raise LookupError(f"The {self.name} registry does not have name " + f"'{registered_name}' registered.") + + def get_registrations(self): + return self._registered_predicates + +_class_registry = _PredicateRegistry("serializable class") +_saver_registry = _PredicateRegistry("checkpoint saver") + + +def get_registered_class_name(obj): + try: + return _class_registry.get_registered_name(obj) + except LookupError: + return None + + +def get_registered_class(registered_name): + try: + return _class_registry.name_lookup(registered_name) + except LookupError: + return None + + +def register_serializable(package="Custom", name=None, predicate=None): # pylint: disable=unused-argument + """Decorator for registering a serializable class. + + THIS METHOD IS STILL EXPERIMENTAL AND MAY CHANGE AT ANY TIME. + + Registered classes will be saved with a name generated by combining the + `package` and `name` arguments. When loading a SavedModel, modules saved with + this registered name will be created using the `_deserialize_from_proto` + method. + + By default, only direct instances of the registered class will be saved/ + restored with the `serialize_from_proto`/`deserialize_from_proto` methods. To + extend the registration to subclasses, use the `predicate argument`: + + ```python + class A(tf.Module): + pass + + register_serializable( + package="Example", predicate=lambda obj: isinstance(obj, A))(A) + ``` + + Args: + package: The package that this class belongs to. + name: The name to serialize this class under in this package. If None, the + class's name will be used. + predicate: An optional function that takes a single Trackable argument, and + determines whether that object should be serialized with this `package` + and `name`. The default predicate checks whether the object's type exactly + matches the registered class. Predicates are executed in the reverse order + that they are added (later registrations are checked first). + + Returns: + A decorator that registers the decorated class with the passed names and + predicate. + """ + def decorator(arg): + """Registers a class with the serialization framework.""" + nonlocal predicate + if not tf_inspect.isclass(arg): + raise TypeError("Registered serializable must be a class: {}".format(arg)) + + class_name = name if name is not None else arg.__name__ + if predicate is None: + predicate = lambda x: isinstance(x, arg) + _class_registry.register(package, class_name, predicate, arg) + return arg + + return decorator + + +RegisteredSaver = collections.namedtuple( + "RegisteredSaver", ["name", "predicate", "save_fn", "restore_fn"]) +_REGISTERED_SAVERS = {} +_REGISTERED_SAVER_NAMES = [] # Stores names in the order of registration + + +def register_checkpoint_saver(package="Custom", + name=None, + predicate=None, + save_fn=None, + restore_fn=None, + strict_predicate_restore=True): + """Registers functions which checkpoints & restores objects with custom steps. + + If you have a class that requires complicated coordination between multiple + objects when checkpointing, then you will need to register a custom saver + and restore function. An example of this is a custom Variable class that + splits the variable across different objects and devices, and needs to write + checkpoints that are compatible with different configurations of devices. + + The registered save and restore functions are used in checkpoints and + SavedModel. + + Please make sure you are familiar with the concepts in the [Checkpointing + guide](https://www.tensorflow.org/guide/checkpoint), and ops used to save the + V2 checkpoint format: + + * io_ops.SaveV2 + * io_ops.MergeV2Checkpoints + * io_ops.RestoreV2 + + **Predicate** + + The predicate is a filter that will run on every `Trackable` object connected + to the root object. This function determines whether a `Trackable` should use + the registered functions. + + Example: `lambda x: isinstance(x, CustomClass)` + + **Custom save function** + + This is how checkpoint saving works normally: + 1. Gather all of the Trackables with saveable values. + 2. For each Trackable, gather all of the saveable tensors. + 3. Save checkpoint shards (grouping tensors by device) with SaveV2 + 4. Merge the shards with MergeCheckpointV2. This combines all of the shard's + metadata, and renames them to follow the standard shard pattern. + + When a saver is registered, Trackables that pass the registered `predicate` + are automatically marked as having saveable values. Next, the custom save + function replaces steps 2 and 3 of the saving process. Finally, the shards + returned by the custom save function are merged with the other shards. + + The save function takes in a dictionary of `Trackables` and a `file_prefix` + string. The function should save checkpoint shards using the SaveV2 op, and + list of the shard prefixes. SaveV2 is currently required to work a correctly, + because the code merges all of the returned shards, and the `restore_fn` will + only be given the prefix of the merged checkpoint. If you need to be able to + save and restore from unmerged shards, please file a feature request. + + Specification and example of the save function: + + ``` + def save_fn(trackables, file_prefix): + # trackables: A dictionary mapping unique string identifiers to trackables + # file_prefix: A unique file prefix generated using the registered name. + ... + # Gather the tensors to save. + ... + io_ops.SaveV2(file_prefix, tensor_names, shapes_and_slices, tensors) + return file_prefix # Returns a tensor or a list of string tensors + ``` + + The save function is executed before the unregistered save ops. + + **Custom restore function** + + Normal checkpoint restore behavior: + 1. Gather all of the Trackables that have saveable values. + 2. For each Trackable, get the names of the desired tensors to extract from + the checkpoint. + 3. Use RestoreV2 to read the saved values, and pass the restored tensors to + the corresponding Trackables. + + The custom restore function replaces steps 2 and 3. + + The restore function also takes a dictionary of `Trackables` and a + `merged_prefix` string. The `merged_prefix` is different from the + `file_prefix`, since it contains the renamed shard paths. To read from the + merged checkpoint, you must use `RestoreV2(merged_prefix, ...)`. + + Specification: + + ``` + def restore_fn(trackables, merged_prefix): + # trackables: A dictionary mapping unique string identifiers to Trackables + # merged_prefix: File prefix of the merged shard names. + + restored_tensors = io_ops.restore_v2( + merged_prefix, tensor_names, shapes_and_slices, dtypes) + ... + # Restore the checkpoint values for the given Trackables. + ``` + + The restore function is executed after the non-registered restore ops. + + Args: + package: Optional, the package that this class belongs to. + name: (Required) The name of this saver, which is saved to the checkpoint. + When a checkpoint is restored, the name and package are used to find the + the matching restore function. The name and package are also used to + generate a unique file prefix that is passed to the save_fn. + predicate: (Required) A function that returns a boolean indicating whether a + `Trackable` object should be checkpointed with this function. Predicates + are executed in the reverse order that they are added (later registrations + are checked first). + save_fn: (Required) A function that takes a dictionary of trackables and a + file prefix as the arguments, writes the checkpoint shards for the given + Trackables, and returns the list of shard prefixes. + restore_fn: (Required) A function that takes a dictionary of trackables and + a file prefix as the arguments and restores the trackable values. + strict_predicate_restore: If this is `True` (default), then an error will be + raised if the predicate fails during checkpoint restoration. If this is + `True`, checkpoint restoration will skip running the restore function. + This value is generally set to `False` when the predicate does not pass on + the Trackables after being saved/loaded from SavedModel. + + Raises: + ValueError: if the package and name are already registered. + """ + if not callable(save_fn): + raise TypeError(f"The save_fn must be callable, got: {type(save_fn)}") + if not callable(restore_fn): + raise TypeError(f"The restore_fn must be callable, got: {type(restore_fn)}") + + _saver_registry.register(package, name, predicate, (save_fn, restore_fn, + strict_predicate_restore)) + + +def get_registered_saver_name(trackable): + """Returns the name of the registered saver to use with Trackable.""" + try: + return _saver_registry.get_registered_name(trackable) + except LookupError: + return None + + +def get_save_function(registered_name): + """Returns save function registered to name.""" + return _saver_registry.name_lookup(registered_name)[0] + + +def get_restore_function(registered_name): + """Returns restore function registered to name.""" + return _saver_registry.name_lookup(registered_name)[1] + + +def get_strict_predicate_restore(registered_name): + """Returns if the registered restore can be ignored if the predicate fails.""" + try: + return _saver_registry.name_lookup(registered_name)[2] + except LookupError: + logging.warning( + "Registered saver %s was not found when restoring checkpoints.", + registered_name, + ) + return False # Return false as the default if the name isn't registered. + + +def validate_restore_function(trackable, registered_name): + """Validates whether the trackable can be restored with the saver. + + When using a checkpoint saved with a registered saver, that same saver must + also be also registered when loading. The name of that saver is saved to the + checkpoint and set in the `registered_name` arg. + + Args: + trackable: A `Trackable` object. + registered_name: String name of the expected registered saver. This argument + should be set using the name saved in a checkpoint. + + Raises: + ValueError if the saver could not be found, or if the predicate associated + with the saver does not pass. + """ + try: + _saver_registry.name_lookup(registered_name) + except LookupError: + raise ValueError( + f"Error when restoring object {trackable} from checkpoint. This " + "object was saved using a registered saver named " + f"'{registered_name}', but this saver cannot be found in the " + "current context.") + if not _saver_registry.get_predicate(registered_name)(trackable): + raise ValueError( + f"Object {trackable} was saved with the registered saver named " + f"'{registered_name}'. However, this saver cannot be used to restore the " + "object because the predicate does not pass.")