ZTWHHH commited on
Commit
2b2cee7
·
verified ·
1 Parent(s): c6921fd

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. llava_next/share/terminfo/m/mac +0 -0
  3. llava_next/share/terminfo/m/mach-color +0 -0
  4. llava_next/share/terminfo/m/mai +0 -0
  5. llava_next/share/terminfo/m/microb +0 -0
  6. llava_next/share/terminfo/m/mime +0 -0
  7. llava_next/share/terminfo/m/mime-hb +0 -0
  8. llava_next/share/terminfo/m/mime2 +0 -0
  9. llava_next/share/terminfo/m/mime314 +0 -0
  10. llava_next/share/terminfo/m/minitel1 +0 -0
  11. llava_next/share/terminfo/m/mintty +0 -0
  12. llava_next/share/terminfo/m/mod24 +0 -0
  13. llava_next/share/terminfo/m/morphos +0 -0
  14. llava_next/share/terminfo/m/mosh-256color +0 -0
  15. llava_next/share/terminfo/m/mrxvt +0 -0
  16. llava_next/share/terminfo/m/ms-vt100 +0 -0
  17. llava_next/share/terminfo/m/ms-vt100-color +0 -0
  18. llava_next/share/terminfo/m/msk227am +0 -0
  19. llava_next/share/terminfo/m/mskermit227 +0 -0
  20. llava_next/share/terminfo/m/mskermit227am +0 -0
  21. llava_next/share/terminfo/m/mt70 +0 -0
  22. videochat2/lib/python3.10/site-packages/tensorflow/python/compat/__pycache__/__init__.cpython-310.pyc +0 -0
  23. videochat2/lib/python3.10/site-packages/tensorflow/python/compat/__pycache__/compat.cpython-310.pyc +0 -0
  24. videochat2/lib/python3.10/site-packages/tensorflow/python/data/__init__.py +33 -0
  25. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/__init__.py +172 -0
  26. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/__init__.py +0 -0
  27. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/__pycache__/__init__.cpython-310.pyc +0 -0
  28. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/__init__.py +0 -0
  29. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/__pycache__/__init__.cpython-310.pyc +0 -0
  30. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/__pycache__/multi_process_cluster.cpython-310.pyc +0 -0
  31. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/__pycache__/test_base.cpython-310.pyc +0 -0
  32. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/multi_process_cluster.py +165 -0
  33. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/test_base.py +456 -0
  34. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__init__.py +0 -0
  35. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/batching.py +379 -0
  36. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/cardinality.py +113 -0
  37. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/compression_ops.py +51 -0
  38. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/counter.py +84 -0
  39. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/distribute.py +399 -0
  40. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/distributed_save_op.py +61 -0
  41. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/enumerate_ops.py +54 -0
  42. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/from_list.py +119 -0
  43. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/interleave_ops.py +261 -0
  44. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/io.py +166 -0
  45. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/iterator_ops.py +97 -0
  46. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/pad_to_cardinality.py +105 -0
  47. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/parsing_ops.py +161 -0
  48. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/prefetching_ops.py +287 -0
  49. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/random_access.py +73 -0
  50. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/random_ops.py +58 -0
.gitattributes CHANGED
@@ -881,3 +881,4 @@ videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/sparse
881
  videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_training_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
882
  videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/nn_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
883
  videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
881
  videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_training_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
882
  videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/nn_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
883
  videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
884
+ videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_nn_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
llava_next/share/terminfo/m/mac ADDED
Binary file (629 Bytes). View file
 
llava_next/share/terminfo/m/mach-color ADDED
Binary file (1.11 kB). View file
 
llava_next/share/terminfo/m/mai ADDED
Binary file (857 Bytes). View file
 
llava_next/share/terminfo/m/microb ADDED
Binary file (475 Bytes). View file
 
llava_next/share/terminfo/m/mime ADDED
Binary file (493 Bytes). View file
 
llava_next/share/terminfo/m/mime-hb ADDED
Binary file (476 Bytes). View file
 
llava_next/share/terminfo/m/mime2 ADDED
Binary file (493 Bytes). View file
 
llava_next/share/terminfo/m/mime314 ADDED
Binary file (360 Bytes). View file
 
llava_next/share/terminfo/m/minitel1 ADDED
Binary file (1.68 kB). View file
 
llava_next/share/terminfo/m/mintty ADDED
Binary file (3.88 kB). View file
 
llava_next/share/terminfo/m/mod24 ADDED
Binary file (1.14 kB). View file
 
llava_next/share/terminfo/m/morphos ADDED
Binary file (836 Bytes). View file
 
llava_next/share/terminfo/m/mosh-256color ADDED
Binary file (3.53 kB). View file
 
llava_next/share/terminfo/m/mrxvt ADDED
Binary file (3.04 kB). View file
 
llava_next/share/terminfo/m/ms-vt100 ADDED
Binary file (1.21 kB). View file
 
llava_next/share/terminfo/m/ms-vt100-color ADDED
Binary file (1.42 kB). View file
 
llava_next/share/terminfo/m/msk227am ADDED
Binary file (554 Bytes). View file
 
llava_next/share/terminfo/m/mskermit227 ADDED
Binary file (531 Bytes). View file
 
llava_next/share/terminfo/m/mskermit227am ADDED
Binary file (554 Bytes). View file
 
llava_next/share/terminfo/m/mt70 ADDED
Binary file (842 Bytes). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/compat/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (180 Bytes). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/compat/__pycache__/compat.cpython-310.pyc ADDED
Binary file (5.19 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/__init__.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """`tf.data.Dataset` API for input pipelines.
16
+
17
+ See [Importing Data](https://tensorflow.org/guide/data) for an overview.
18
+ """
19
+
20
+ # pylint: disable=unused-import
21
+ from tensorflow.python.data import experimental
22
+ from tensorflow.python.data.ops.dataset_ops import AUTOTUNE
23
+ from tensorflow.python.data.ops.dataset_ops import Dataset
24
+ from tensorflow.python.data.ops.dataset_ops import INFINITE as INFINITE_CARDINALITY
25
+ from tensorflow.python.data.ops.dataset_ops import make_initializable_iterator
26
+ from tensorflow.python.data.ops.dataset_ops import make_one_shot_iterator
27
+ from tensorflow.python.data.ops.dataset_ops import UNKNOWN as UNKNOWN_CARDINALITY
28
+ from tensorflow.python.data.ops.iterator_ops import Iterator
29
+ from tensorflow.python.data.ops.options import Options
30
+ from tensorflow.python.data.ops.readers import FixedLengthRecordDataset
31
+ from tensorflow.python.data.ops.readers import TextLineDataset
32
+ from tensorflow.python.data.ops.readers import TFRecordDataset
33
+ # pylint: enable=unused-import
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/__init__.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Experimental API for building input pipelines.
16
+
17
+ This module contains experimental `Dataset` sources and transformations that can
18
+ be used in conjunction with the `tf.data.Dataset` API. Note that the
19
+ `tf.data.experimental` API is not subject to the same backwards compatibility
20
+ guarantees as `tf.data`, but we will provide deprecation advice in advance of
21
+ removing existing functionality.
22
+
23
+ See [Importing Data](https://tensorflow.org/guide/datasets) for an overview.
24
+
25
+ @@AutoShardPolicy
26
+ @@AutotuneAlgorithm
27
+ @@AutotuneOptions
28
+ @@Counter
29
+ @@CsvDataset
30
+ @@DatasetInitializer
31
+ @@DatasetStructure
32
+ @@DistributeOptions
33
+ @@ExternalStatePolicy
34
+ @@OptimizationOptions
35
+ @@Optional
36
+ @@OptionalStructure
37
+ @@RaggedTensorStructure
38
+ @@RandomDataset
39
+ @@Reducer
40
+ @@SparseTensorStructure
41
+ @@SqlDataset
42
+ @@Structure
43
+ @@TFRecordWriter
44
+ @@TensorArrayStructure
45
+ @@TensorStructure
46
+ @@ThreadingOptions
47
+
48
+ @@assert_cardinality
49
+ @@at
50
+ @@bucket_by_sequence_length
51
+ @@cardinality
52
+ @@choose_from_datasets
53
+ @@copy_to_device
54
+ @@dense_to_ragged_batch
55
+ @@dense_to_sparse_batch
56
+ @@distribute
57
+ @@enable_debug_mode
58
+ @@enumerate_dataset
59
+ @@from_list
60
+ @@from_variant
61
+ @@get_next_as_optional
62
+ @@get_single_element
63
+ @@get_structure
64
+ @@group_by_reducer
65
+ @@group_by_window
66
+ @@ignore_errors
67
+ @@index_table_from_dataset
68
+ @@load
69
+ @@make_batched_features_dataset
70
+ @@make_csv_dataset
71
+ @@make_saveable_from_iterator
72
+ @@map_and_batch
73
+ @@map_and_batch_with_legacy_function
74
+ @@pad_to_cardinality
75
+ @@parallel_interleave
76
+ @@parse_example_dataset
77
+ @@prefetch_to_device
78
+ @@rejection_resample
79
+ @@sample_from_datasets
80
+ @@save
81
+ @@scan
82
+ @@shuffle_and_repeat
83
+ @@snapshot
84
+ @@table_from_dataset
85
+ @@take_while
86
+ @@to_variant
87
+ @@unbatch
88
+ @@unique
89
+
90
+ @@AUTOTUNE
91
+ @@INFINITE_CARDINALITY
92
+ @@SHARD_HINT
93
+ @@UNKNOWN_CARDINALITY
94
+ """
95
+
96
+ # pylint: disable=unused-import
97
+ from tensorflow.python.data.experimental import service
98
+ from tensorflow.python.data.experimental.ops.batching import dense_to_ragged_batch
99
+ from tensorflow.python.data.experimental.ops.batching import dense_to_sparse_batch
100
+ from tensorflow.python.data.experimental.ops.batching import map_and_batch
101
+ from tensorflow.python.data.experimental.ops.batching import map_and_batch_with_legacy_function
102
+ from tensorflow.python.data.experimental.ops.batching import unbatch
103
+ from tensorflow.python.data.experimental.ops.cardinality import assert_cardinality
104
+ from tensorflow.python.data.experimental.ops.cardinality import cardinality
105
+ from tensorflow.python.data.experimental.ops.cardinality import INFINITE as INFINITE_CARDINALITY
106
+ from tensorflow.python.data.experimental.ops.cardinality import UNKNOWN as UNKNOWN_CARDINALITY
107
+ from tensorflow.python.data.experimental.ops.counter import Counter
108
+ from tensorflow.python.data.experimental.ops.distribute import SHARD_HINT
109
+ from tensorflow.python.data.experimental.ops.enumerate_ops import enumerate_dataset
110
+ from tensorflow.python.data.experimental.ops.error_ops import ignore_errors
111
+ from tensorflow.python.data.experimental.ops.from_list import from_list
112
+ from tensorflow.python.data.experimental.ops.get_single_element import get_single_element
113
+ from tensorflow.python.data.experimental.ops.grouping import bucket_by_sequence_length
114
+ from tensorflow.python.data.experimental.ops.grouping import group_by_reducer
115
+ from tensorflow.python.data.experimental.ops.grouping import group_by_window
116
+ from tensorflow.python.data.experimental.ops.grouping import Reducer
117
+ from tensorflow.python.data.experimental.ops.interleave_ops import choose_from_datasets
118
+ from tensorflow.python.data.experimental.ops.interleave_ops import parallel_interleave
119
+ from tensorflow.python.data.experimental.ops.interleave_ops import sample_from_datasets
120
+ from tensorflow.python.data.experimental.ops.io import load
121
+ from tensorflow.python.data.experimental.ops.io import save
122
+ from tensorflow.python.data.experimental.ops.iterator_ops import make_saveable_from_iterator
123
+ from tensorflow.python.data.experimental.ops.lookup_ops import DatasetInitializer
124
+ from tensorflow.python.data.experimental.ops.lookup_ops import index_table_from_dataset
125
+ from tensorflow.python.data.experimental.ops.lookup_ops import table_from_dataset
126
+ from tensorflow.python.data.experimental.ops.pad_to_cardinality import pad_to_cardinality
127
+ from tensorflow.python.data.experimental.ops.parsing_ops import parse_example_dataset
128
+ from tensorflow.python.data.experimental.ops.prefetching_ops import copy_to_device
129
+ from tensorflow.python.data.experimental.ops.prefetching_ops import prefetch_to_device
130
+ from tensorflow.python.data.experimental.ops.random_access import at
131
+ from tensorflow.python.data.experimental.ops.random_ops import RandomDataset
132
+ from tensorflow.python.data.experimental.ops.readers import CsvDataset
133
+ from tensorflow.python.data.experimental.ops.readers import make_batched_features_dataset
134
+ from tensorflow.python.data.experimental.ops.readers import make_csv_dataset
135
+ from tensorflow.python.data.experimental.ops.readers import SqlDataset
136
+ from tensorflow.python.data.experimental.ops.resampling import rejection_resample
137
+ from tensorflow.python.data.experimental.ops.scan_ops import scan
138
+ from tensorflow.python.data.experimental.ops.shuffle_ops import shuffle_and_repeat
139
+ from tensorflow.python.data.experimental.ops.snapshot import snapshot
140
+ from tensorflow.python.data.experimental.ops.take_while_ops import take_while
141
+ from tensorflow.python.data.experimental.ops.unique import unique
142
+ from tensorflow.python.data.experimental.ops.writers import TFRecordWriter
143
+ from tensorflow.python.data.ops.dataset_ops import AUTOTUNE
144
+ from tensorflow.python.data.ops.dataset_ops import DatasetSpec as DatasetStructure
145
+ from tensorflow.python.data.ops.dataset_ops import from_variant
146
+ from tensorflow.python.data.ops.dataset_ops import get_structure
147
+ from tensorflow.python.data.ops.dataset_ops import to_variant
148
+ from tensorflow.python.data.ops.debug_mode import enable_debug_mode
149
+ from tensorflow.python.data.ops.iterator_ops import get_next_as_optional
150
+ from tensorflow.python.data.ops.optional_ops import Optional
151
+ from tensorflow.python.data.ops.optional_ops import OptionalSpec as OptionalStructure
152
+ from tensorflow.python.data.ops.options import AutoShardPolicy
153
+ from tensorflow.python.data.ops.options import AutotuneAlgorithm
154
+ from tensorflow.python.data.ops.options import AutotuneOptions
155
+ from tensorflow.python.data.ops.options import DistributeOptions
156
+ from tensorflow.python.data.ops.options import ExternalStatePolicy
157
+ from tensorflow.python.data.ops.options import OptimizationOptions
158
+ from tensorflow.python.data.ops.options import ThreadingOptions
159
+ from tensorflow.python.data.util.structure import _RaggedTensorStructure as RaggedTensorStructure
160
+ from tensorflow.python.data.util.structure import _SparseTensorStructure as SparseTensorStructure
161
+ from tensorflow.python.data.util.structure import _TensorArrayStructure as TensorArrayStructure
162
+ from tensorflow.python.data.util.structure import _TensorStructure as TensorStructure
163
+ from tensorflow.python.framework.type_spec import TypeSpec as Structure
164
+ # pylint: enable=unused-import
165
+
166
+ from tensorflow.python.util.all_util import remove_undocumented
167
+
168
+ _allowed_symbols = [
169
+ "service",
170
+ ]
171
+
172
+ remove_undocumented(__name__, _allowed_symbols)
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/__init__.py ADDED
File without changes
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (204 Bytes). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/__init__.py ADDED
File without changes
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (212 Bytes). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/__pycache__/multi_process_cluster.cpython-310.pyc ADDED
Binary file (6.45 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/__pycache__/test_base.cpython-310.pyc ADDED
Binary file (15.2 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/multi_process_cluster.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """tf.data service test-cluster with local and remote workers."""
16
+
17
+ import tempfile
18
+
19
+ from tensorflow.core.protobuf import data_service_pb2
20
+ from tensorflow.core.protobuf import service_config_pb2
21
+ from tensorflow.python.data.experimental.kernel_tests.service import test_base as data_service_test_base
22
+ from tensorflow.python.data.experimental.service import server_lib
23
+ from tensorflow.python.distribute import multi_process_lib
24
+ from tensorflow.python.framework import test_util
25
+ from tensorflow.python.platform import googletest
26
+
27
+ _WORKER_SHUTDOWN_QUIET_PERIOD_MS = 100
28
+
29
+
30
+ # pylint: disable=protected-access
31
+ class _RemoteWorkerProcess(multi_process_lib.Process):
32
+ """Runs a worker server in a new process to simulate a remote worker."""
33
+
34
+ def __init__(self, dispatcher_address, port, worker_tags, pipe_writer):
35
+ super(_RemoteWorkerProcess, self).__init__()
36
+ self._dispatcher_address = dispatcher_address
37
+ self._port = port
38
+ self._worker_tags = worker_tags
39
+ self._pipe_writer = pipe_writer
40
+
41
+ def run(self):
42
+ self.start_worker()
43
+
44
+ def start_worker(self):
45
+ self._worker = data_service_test_base.TestWorker(
46
+ self._dispatcher_address,
47
+ _WORKER_SHUTDOWN_QUIET_PERIOD_MS,
48
+ port=self._port,
49
+ worker_tags=self._worker_tags)
50
+ self._worker.start()
51
+ self._pipe_writer.send(self._worker.worker_address())
52
+ self._worker.join()
53
+
54
+
55
+ class MultiProcessCluster:
56
+ """tf.data service cluster with local and remote workers.
57
+
58
+ Represents a cluster with a dispatcher, `num_local_workers` local workers, and
59
+ `num_remote_workers` remote workers. Remote workers run in separate processes.
60
+ This is useful to test reading from local in-process workers. For example:
61
+
62
+ ```
63
+ cluster = multi_process_cluster.MultiProcessCluster(
64
+ num_local_workers=1, num_remote_workers=3)
65
+ num_elements = 10
66
+ dataset = self.make_distributed_range_dataset(
67
+ num_elements, cluster, target_workers="LOCAL")
68
+ self.assertDatasetProduces(dataset, list(range(num_elements)))
69
+ ```
70
+ """
71
+
72
+ def __init__(self,
73
+ num_local_workers,
74
+ num_remote_workers,
75
+ worker_tags=None,
76
+ worker_addresses=None,
77
+ deployment_mode=data_service_pb2.DEPLOYMENT_MODE_COLOCATED):
78
+ self._work_dir = tempfile.mkdtemp(dir=googletest.GetTempDir())
79
+ self._deployment_mode = deployment_mode
80
+ self._start_dispatcher(worker_addresses)
81
+ self._start_local_workers(num_local_workers, worker_tags)
82
+ self._start_remote_workers(num_remote_workers, worker_tags)
83
+
84
+ def _start_dispatcher(self, worker_addresses, port=0):
85
+ if port == 0:
86
+ port = test_util.pick_unused_port()
87
+ self._dispatcher = server_lib.DispatchServer(
88
+ service_config_pb2.DispatcherConfig(
89
+ port=port,
90
+ protocol="grpc",
91
+ work_dir=self._work_dir,
92
+ fault_tolerant_mode=True,
93
+ worker_addresses=worker_addresses,
94
+ deployment_mode=self._deployment_mode),
95
+ start=True)
96
+
97
+ def _start_local_workers(self, num_workers, worker_tags=None):
98
+ self._local_workers = []
99
+ for _ in range(num_workers):
100
+ self.start_local_worker(worker_tags)
101
+
102
+ def _start_remote_workers(self, num_workers, worker_tags=None):
103
+ # List of (worker address, remote worker process) tuples.
104
+ self._remote_workers = []
105
+ for _ in range(num_workers):
106
+ self.start_remote_worker(worker_tags)
107
+
108
+ def start_local_worker(self, worker_tags=None):
109
+ worker = data_service_test_base.TestWorker(
110
+ self.dispatcher_address(),
111
+ _WORKER_SHUTDOWN_QUIET_PERIOD_MS,
112
+ port=test_util.pick_unused_port(),
113
+ worker_tags=worker_tags)
114
+ worker.start()
115
+ self._local_workers.append(worker)
116
+
117
+ def start_remote_worker(self, worker_tags=None):
118
+ """Runs a tf.data service worker in a remote process."""
119
+
120
+ pipe_reader, pipe_writer = multi_process_lib.multiprocessing.Pipe(
121
+ duplex=False)
122
+ worker_process = _RemoteWorkerProcess(
123
+ self.dispatcher_address(),
124
+ port=test_util.pick_unused_port(),
125
+ worker_tags=worker_tags,
126
+ pipe_writer=pipe_writer)
127
+ worker_process.start()
128
+ worker_address = pipe_reader.recv()
129
+ self._remote_workers.append((worker_address, worker_process))
130
+
131
+ def restart_dispatcher(self):
132
+ port = int(self.dispatcher_address().split(":")[1])
133
+ self._dispatcher._stop()
134
+ self._start_dispatcher(
135
+ worker_addresses=(self.local_worker_addresses() +
136
+ self.remote_worker_addresses()),
137
+ port=port)
138
+
139
+ def restart_local_workers(self):
140
+ for worker in self._local_workers:
141
+ worker.restart()
142
+
143
+ def dispatcher_address(self):
144
+ return self._dispatcher._address
145
+
146
+ def local_worker_addresses(self):
147
+ return [worker.worker_address() for worker in self._local_workers]
148
+
149
+ def remote_worker_addresses(self):
150
+ return [worker_address for (worker_address, _) in self._remote_workers]
151
+
152
+ def _stop(self):
153
+ for worker in self._local_workers:
154
+ worker.stop()
155
+ for (_, worker_process) in self._remote_workers:
156
+ worker_process.kill()
157
+ self._dispatcher._stop()
158
+
159
+ def __del__(self):
160
+ self._stop()
161
+
162
+
163
+ def test_main():
164
+ """Main function to be called within `__main__` of a test file."""
165
+ multi_process_lib.test_main()
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/kernel_tests/service/test_base.py ADDED
@@ -0,0 +1,456 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Test base for tf.data service tests."""
16
+
17
+ import os
18
+ import shutil
19
+ import tempfile
20
+
21
+ from tensorflow.core.protobuf import service_config_pb2
22
+ from tensorflow.python.data.experimental.ops import data_service_ops
23
+ from tensorflow.python.data.experimental.service import server_lib
24
+ from tensorflow.python.data.kernel_tests import test_base
25
+ from tensorflow.python.data.ops import dataset_ops
26
+ from tensorflow.python.framework import combinations
27
+ from tensorflow.python.framework import dtypes
28
+ from tensorflow.python.ops import math_ops
29
+ from tensorflow.python.platform import googletest
30
+
31
+ # This will be resolved to a tmp directory by `start_dispatch_server`.
32
+ TMP_WORK_DIR = "tmp_work_dir_placeholder"
33
+ # `""` indicates not to use a work directory.
34
+ NO_WORK_DIR = ""
35
+ # We use a faster than normal heartbeat interval so that tests run faster.
36
+ TEST_HEARTBEAT_INTERVAL_MS = 100
37
+ TEST_DISPATCHER_TIMEOUT_MS = 5000
38
+ TEST_WORKER_TIMEOUT_MS = 200
39
+ TEST_JOB_GC_CHECK_INTERNAL_MS = 1000
40
+ TEST_SNAPSHOT_MAX_CHUNK_SIZE_BYTES = 16 << 10 # 16 KB
41
+ PROTOCOL = "grpc"
42
+
43
+
44
+ def all_cluster_configurations():
45
+ with_work_dir = combinations.combine(
46
+ work_dir=TMP_WORK_DIR, fault_tolerant_mode=[True, False])
47
+ without_work_dir = combinations.combine(
48
+ work_dir=NO_WORK_DIR, fault_tolerant_mode=False)
49
+ return with_work_dir + without_work_dir
50
+
51
+
52
+ def _make_worker(
53
+ dispatcher_address,
54
+ protocol,
55
+ data_transfer_protocol,
56
+ shutdown_quiet_period_ms=0,
57
+ port=0,
58
+ worker_tags=None,
59
+ cross_trainer_cache_size_bytes=None,
60
+ snapshot_max_chunk_size_bytes=TEST_SNAPSHOT_MAX_CHUNK_SIZE_BYTES,
61
+ ):
62
+ """Creates a worker server."""
63
+ defaults = server_lib.WorkerConfig(dispatcher_address=dispatcher_address)
64
+ config_proto = service_config_pb2.WorkerConfig(
65
+ dispatcher_address=dispatcher_address,
66
+ worker_address=defaults.worker_address,
67
+ port=port,
68
+ protocol=protocol,
69
+ worker_tags=worker_tags,
70
+ heartbeat_interval_ms=TEST_HEARTBEAT_INTERVAL_MS,
71
+ dispatcher_timeout_ms=TEST_DISPATCHER_TIMEOUT_MS,
72
+ data_transfer_protocol=data_transfer_protocol,
73
+ data_transfer_address=defaults.worker_address,
74
+ shutdown_quiet_period_ms=shutdown_quiet_period_ms,
75
+ cross_trainer_cache_size_bytes=cross_trainer_cache_size_bytes,
76
+ snapshot_max_chunk_size_bytes=snapshot_max_chunk_size_bytes,
77
+ )
78
+ return server_lib.WorkerServer(config_proto, start=False)
79
+
80
+
81
+ # pylint: disable=protected-access
82
+ class TestWorker:
83
+ """A tf.data service worker."""
84
+
85
+ def __init__(
86
+ self,
87
+ dispatcher_address,
88
+ shutdown_quiet_period_ms,
89
+ protocol=PROTOCOL,
90
+ data_transfer_protocol=None,
91
+ port=0,
92
+ worker_tags=None,
93
+ cross_trainer_cache_size_bytes=None,
94
+ snapshot_max_chunk_size_bytes=TEST_SNAPSHOT_MAX_CHUNK_SIZE_BYTES,
95
+ ):
96
+ self._dispatcher_address = dispatcher_address
97
+ self._shutdown_quiet_period_ms = shutdown_quiet_period_ms
98
+ self._server = _make_worker(
99
+ dispatcher_address,
100
+ protocol,
101
+ data_transfer_protocol,
102
+ shutdown_quiet_period_ms,
103
+ port=port,
104
+ worker_tags=worker_tags,
105
+ cross_trainer_cache_size_bytes=cross_trainer_cache_size_bytes,
106
+ snapshot_max_chunk_size_bytes=snapshot_max_chunk_size_bytes,
107
+ )
108
+ self._running = False
109
+ self._protocol = protocol
110
+ self._data_transfer_protocol = data_transfer_protocol
111
+
112
+ def stop(self):
113
+ self._server._stop()
114
+ self._running = False
115
+
116
+ def start(self):
117
+ self._server.start()
118
+ self._port = int(self._server._address.split(":")[1])
119
+ self._running = True
120
+
121
+ def restart(self, use_same_port=True):
122
+ """Restarts the worker, stopping it first if it is already running."""
123
+ if self._running:
124
+ self.stop()
125
+ port = 0
126
+ if use_same_port:
127
+ port = self._port
128
+ self._server = _make_worker(self._dispatcher_address,
129
+ self._protocol,
130
+ self._data_transfer_protocol,
131
+ self._shutdown_quiet_period_ms, port)
132
+ self._server.start()
133
+ self._port = int(self._server._address.split(":")[1])
134
+ self._running = True
135
+
136
+ def join(self):
137
+ self._server.join()
138
+
139
+ def num_tasks(self):
140
+ return self._server._num_tasks()
141
+
142
+ def snapshot_task_progresses(self):
143
+ return self._server._snapshot_task_progresses()
144
+
145
+ def worker_address(self):
146
+ return self._server._address
147
+
148
+
149
+ class TestCluster:
150
+ """Test tf.data service cluster."""
151
+
152
+ def __init__(
153
+ self,
154
+ num_workers,
155
+ dispatcher_port=0,
156
+ work_dir=TMP_WORK_DIR,
157
+ fault_tolerant_mode=True,
158
+ job_gc_check_interval_ms=TEST_JOB_GC_CHECK_INTERNAL_MS,
159
+ job_gc_timeout_ms=None,
160
+ worker_timeout_ms=TEST_WORKER_TIMEOUT_MS,
161
+ worker_shutdown_quiet_period_ms=0,
162
+ snapshot_max_chunk_size_bytes=TEST_SNAPSHOT_MAX_CHUNK_SIZE_BYTES,
163
+ worker_max_concurrent_snapshots=0,
164
+ start=True,
165
+ protocol=PROTOCOL,
166
+ data_transfer_protocol=None,
167
+ ):
168
+ """Creates a tf.data service test cluster.
169
+
170
+ Args:
171
+ num_workers: The number of workers to initially add to the cluster.
172
+ dispatcher_port: The port to use for the dispatcher.
173
+ work_dir: The work directory to use for the dispatcher. If set to
174
+ `TMP_WORK_DIR`, the cluster will create a new temporary directory to use
175
+ as the work directory. If set to `NO_WORK_DIR`, no work directory will
176
+ be used.
177
+ fault_tolerant_mode: Whether the dispatcher should write its state to a
178
+ journal so that it can recover from restarts.
179
+ job_gc_check_interval_ms: How often the dispatcher should scan through to
180
+ delete old and unused jobs, in milliseconds.
181
+ job_gc_timeout_ms: How long a job needs to be unused before it becomes a
182
+ candidate for garbage collection, in milliseconds.
183
+ worker_timeout_ms: How long to wait for a worker to heartbeat before
184
+ considering it missing, in milliseconds.
185
+ worker_shutdown_quiet_period_ms: When shutting down a worker, how long to
186
+ wait for the gRPC server to process the final requests.
187
+ snapshot_max_chunk_size_bytes: The maximum size of a distributed snapshot
188
+ chunk file.
189
+ worker_max_concurrent_snapshots: The maximum number of snapshots a worker
190
+ can concurrently process.
191
+ start: Whether to immediately start the servers in the cluster. If
192
+ `False`, the servers can be started later by calling
193
+ `start_dispatcher()` and `start_workers()`.
194
+ protocol: The protocol to use for communicating with the tf.data service,
195
+ e.g. "grpc".
196
+ data_transfer_protocol: (Optional.) The protocol to use for transferring
197
+ data with the tf.data service.
198
+ """
199
+ if work_dir == TMP_WORK_DIR:
200
+ work_dir = tempfile.mkdtemp(dir=googletest.GetTempDir())
201
+ self._worker_shutdown_quiet_period_ms = worker_shutdown_quiet_period_ms
202
+ self._snapshot_max_chunk_size_bytes = snapshot_max_chunk_size_bytes
203
+ self._protocol = protocol
204
+ self._data_transfer_protocol = data_transfer_protocol
205
+ self._job_gc_check_interval_ms = job_gc_check_interval_ms
206
+ self._job_gc_timeout_ms = job_gc_timeout_ms
207
+ self._worker_timeout_ms = worker_timeout_ms
208
+ self._worker_max_concurrent_snapshots = worker_max_concurrent_snapshots
209
+ self.dispatcher = server_lib.DispatchServer(
210
+ server_lib.DispatcherConfig(
211
+ port=dispatcher_port,
212
+ work_dir=work_dir,
213
+ protocol=protocol,
214
+ fault_tolerant_mode=fault_tolerant_mode,
215
+ job_gc_check_interval_ms=job_gc_check_interval_ms,
216
+ job_gc_timeout_ms=job_gc_timeout_ms,
217
+ worker_timeout_ms=worker_timeout_ms,
218
+ worker_max_concurrent_snapshots=worker_max_concurrent_snapshots,
219
+ ),
220
+ start=start,
221
+ )
222
+
223
+ self.workers = []
224
+ for _ in range(num_workers):
225
+ self.add_worker(start=start)
226
+
227
+ def dispatcher_address(self):
228
+ return self.dispatcher.target.split("://")[1]
229
+
230
+ def add_worker(self, start=True):
231
+ worker = TestWorker(
232
+ self.dispatcher_address(),
233
+ self._worker_shutdown_quiet_period_ms,
234
+ self._protocol,
235
+ self._data_transfer_protocol,
236
+ snapshot_max_chunk_size_bytes=self._snapshot_max_chunk_size_bytes,
237
+ )
238
+ if start:
239
+ worker.start()
240
+ self.workers.append(worker)
241
+
242
+ def start_dispatcher(self):
243
+ self.dispatcher.start()
244
+
245
+ def start_workers(self):
246
+ for worker in self.workers:
247
+ worker.start()
248
+
249
+ def stop_dispatcher(self):
250
+ # pylint: disable=protected-access
251
+ self.dispatcher._stop()
252
+
253
+ def restart_worker(self, index):
254
+ self.workers[index].restart()
255
+
256
+ def stop_worker(self, index):
257
+ self.workers[index].stop()
258
+
259
+ def stop_workers(self):
260
+ for worker in self.workers:
261
+ worker.stop()
262
+
263
+ # pylint: disable=protected-access
264
+ def restart_dispatcher(self):
265
+ """Stops `dispatcher` and creates a new dispatcher with the same port.
266
+
267
+ Restarting is supported only when the dispatcher is configured with
268
+ `fault_tolerant_mode=True`.
269
+ """
270
+ if not self.dispatcher._config.fault_tolerant_mode:
271
+ raise ValueError(
272
+ "Trying to restart the dispatcher without fault-tolerance.")
273
+ port = int(self.dispatcher_address().split(":")[1])
274
+ self.dispatcher._stop()
275
+ self.dispatcher = server_lib.DispatchServer(
276
+ server_lib.DispatcherConfig(
277
+ port=port,
278
+ work_dir=self.dispatcher._config.work_dir,
279
+ protocol=self._protocol,
280
+ fault_tolerant_mode=self.dispatcher._config.fault_tolerant_mode,
281
+ job_gc_check_interval_ms=self._job_gc_check_interval_ms,
282
+ job_gc_timeout_ms=self._job_gc_timeout_ms,
283
+ worker_timeout_ms=self._worker_timeout_ms,
284
+ worker_max_concurrent_snapshots=
285
+ self._worker_max_concurrent_snapshots,
286
+ )
287
+ )
288
+
289
+ def num_registered_workers(self):
290
+ return self.dispatcher._num_workers()
291
+
292
+ def num_tasks_on_workers(self):
293
+ return sum(worker.num_tasks() for worker in self.workers)
294
+
295
+ def snapshot_streams(self, path):
296
+ return self.dispatcher._snapshot_streams(path)
297
+
298
+ def __del__(self):
299
+ # Destroy workers before the dispatcher for clean shutdown.
300
+ self.workers.clear()
301
+ del self.dispatcher
302
+
303
+
304
+ class TestBase(test_base.DatasetTestBase):
305
+ """Base class for tf.data service tests."""
306
+
307
+ def setUp(self):
308
+ self.default_data_transfer_protocol = None
309
+ self.default_compression = "AUTO"
310
+
311
+ def set_default_data_transfer_protocol(self, protocol):
312
+ self.default_data_transfer_protocol = protocol
313
+
314
+ def set_default_compression(self, compression):
315
+ self.default_compression = compression
316
+
317
+ def make_test_cluster(self, *args, **kwargs):
318
+ if "data_transfer_protocol" not in kwargs:
319
+ kwargs["data_transfer_protocol"] = self.default_data_transfer_protocol
320
+ return TestCluster(*args, **kwargs)
321
+
322
+ def make_distributed_dataset(self,
323
+ dataset,
324
+ cluster,
325
+ processing_mode="parallel_epochs",
326
+ **kwargs):
327
+ kwargs["task_refresh_interval_hint_ms"] = 20
328
+ if "data_transfer_protocol" not in kwargs:
329
+ kwargs["data_transfer_protocol"] = self.default_data_transfer_protocol
330
+ if "compression" not in kwargs:
331
+ kwargs["compression"] = self.default_compression
332
+
333
+ # pylint: disable=protected-access
334
+ return dataset.apply(
335
+ data_service_ops._distribute(
336
+ processing_mode,
337
+ cluster.dispatcher_address(),
338
+ **kwargs))
339
+
340
+ def make_distributed_range_dataset(self,
341
+ num_elements,
342
+ cluster,
343
+ **kwargs):
344
+ dataset = dataset_ops.Dataset.range(num_elements)
345
+ return self.make_distributed_dataset(dataset, cluster, **kwargs)
346
+
347
+ def make_coordinated_read_dataset(
348
+ self,
349
+ cluster,
350
+ num_consumers,
351
+ sharding_policy=data_service_ops.ShardingPolicy.OFF):
352
+ """Creates a dataset that performs coordinated reads.
353
+
354
+ The dataset simulates `num_consumers` consumers by using parallel
355
+ interleave to read with `num_consumers` threads, one for each consumer. The
356
+ nth element of the dataset is produced by consumer `n % num_consumers`.
357
+
358
+ The dataset executed on each worker will produce groups of `num_consumers`
359
+ sequentially increasing numbers. For example, if `num_consumers=3` a worker
360
+ dataset could produce [0, 1, 2, 9, 10, 11, 21, 22, 23]. This enables
361
+ `checkCoordinatedReadGroups` below to assess whether the values received in
362
+ each step came from the same group.
363
+
364
+ Args:
365
+ cluster: A tf.data service `TestCluster`.
366
+ num_consumers: The number of consumers to simulate.
367
+ sharding_policy: The sharding policy to use. Currently only OFF and
368
+ DYNAMIC are supported.
369
+
370
+ Returns:
371
+ A dataset that simulates reading with `num_consumers` consumers.
372
+ """
373
+ if sharding_policy not in [
374
+ data_service_ops.ShardingPolicy.OFF,
375
+ data_service_ops.ShardingPolicy.DYNAMIC
376
+ ]:
377
+ raise ValueError(f"Unsupported sharding policy: {sharding_policy}")
378
+ # Start from 0 so that we can detect when a new worker is added with
379
+ # ShardingPolicy.OFF.
380
+ ds = dataset_ops.Dataset.from_tensors(math_ops.cast(0, dtypes.int64))
381
+ ds = ds.concatenate(dataset_ops.Dataset.random())
382
+ # Ensure that all elements in the same group are consecutive.
383
+ def make_group(x):
384
+ # Avoid overflowing an int64 in (x+1)*num_consumers below.
385
+ x = x % (2**32)
386
+ return dataset_ops.Dataset.range(x*num_consumers, (x+1)*num_consumers)
387
+ ds = ds.flat_map(make_group)
388
+ consumers = []
389
+ for consumer_index in range(num_consumers):
390
+ consumers.append(
391
+ self.make_distributed_dataset(
392
+ ds,
393
+ cluster,
394
+ job_name="test",
395
+ processing_mode=sharding_policy,
396
+ consumer_index=consumer_index,
397
+ num_consumers=num_consumers))
398
+ # Use parallel interleave to read from consumers in parallel.
399
+ ds = dataset_ops.Dataset.from_tensor_slices(consumers)
400
+ ds = ds.interleave(
401
+ lambda x: x,
402
+ cycle_length=num_consumers,
403
+ num_parallel_calls=num_consumers)
404
+ return ds
405
+
406
+ def checkCoordinatedReadGroups(self, results, num_consumers):
407
+ """Validates results from a `make_coordinted_read_dataset` dataset.
408
+
409
+ Each group of `num_consumers` results should be consecutive, indicating that
410
+ they were produced by the same worker.
411
+
412
+ Args:
413
+ results: The elements produced by the dataset.
414
+ num_consumers: The number of consumers.
415
+ """
416
+ groups = [
417
+ results[start:start + num_consumers]
418
+ for start in range(0, len(results), num_consumers)
419
+ ]
420
+ incorrect_groups = []
421
+ for group in groups:
422
+ # Check that each group of `num_consumers` results are consecutive.
423
+ for offset in range(1, len(group)):
424
+ if group[0] + offset != group[offset]:
425
+ incorrect_groups.append(group)
426
+ break
427
+ self.assertEmpty(
428
+ incorrect_groups,
429
+ "Incorrect groups: {}.\nAll groups: {}".format(incorrect_groups,
430
+ groups))
431
+
432
+ def read(self, get_next, results, count):
433
+ for _ in range(count):
434
+ results.append(self.evaluate(get_next()))
435
+
436
+
437
+ class TempDir:
438
+ """Temporary directory for unit testing."""
439
+
440
+ def __init__(self):
441
+ temp_dir = tempfile.mkdtemp(dir=googletest.GetTempDir())
442
+ self._path = os.path.join(
443
+ tempfile.mkdtemp(dir=temp_dir), "tf_data_snapshot")
444
+
445
+ @property
446
+ def full_path(self) -> str:
447
+ return self._path
448
+
449
+ def __fspath__(self) -> str:
450
+ return self._path
451
+
452
+ def __del__(self):
453
+ try:
454
+ shutil.rmtree(self.full_path)
455
+ except FileNotFoundError:
456
+ pass
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__init__.py ADDED
File without changes
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/batching.py ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Batching dataset transformations."""
16
+ from tensorflow.python.data.ops import dataset_ops
17
+ from tensorflow.python.data.ops import structured_function
18
+ from tensorflow.python.data.util import convert
19
+ from tensorflow.python.data.util import nest
20
+ from tensorflow.python.framework import dtypes
21
+ from tensorflow.python.framework import ops
22
+ from tensorflow.python.framework import sparse_tensor
23
+ from tensorflow.python.framework import tensor_shape
24
+ from tensorflow.python.framework import tensor_util
25
+ from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
26
+ from tensorflow.python.util import deprecation
27
+ from tensorflow.python.util.tf_export import tf_export
28
+
29
+
30
+ @tf_export("data.experimental.dense_to_ragged_batch")
31
+ @deprecation.deprecated(None, "Use `tf.data.Dataset.ragged_batch` instead.")
32
+ def dense_to_ragged_batch(batch_size,
33
+ drop_remainder=False,
34
+ row_splits_dtype=dtypes.int64):
35
+ """A transformation that batches ragged elements into `tf.RaggedTensor`s.
36
+
37
+ This transformation combines multiple consecutive elements of the input
38
+ dataset into a single element.
39
+
40
+ Like `tf.data.Dataset.batch`, the components of the resulting element will
41
+ have an additional outer dimension, which will be `batch_size` (or
42
+ `N % batch_size` for the last element if `batch_size` does not divide the
43
+ number of input elements `N` evenly and `drop_remainder` is `False`). If
44
+ your program depends on the batches having the same outer dimension, you
45
+ should set the `drop_remainder` argument to `True` to prevent the smaller
46
+ batch from being produced.
47
+
48
+ Unlike `tf.data.Dataset.batch`, the input elements to be batched may have
49
+ different shapes:
50
+
51
+ * If an input element is a `tf.Tensor` whose static `tf.TensorShape` is
52
+ fully defined, then it is batched as normal.
53
+ * If an input element is a `tf.Tensor` whose static `tf.TensorShape` contains
54
+ one or more axes with unknown size (i.e., `shape[i]=None`), then the output
55
+ will contain a `tf.RaggedTensor` that is ragged up to any of such
56
+ dimensions.
57
+ * If an input element is a `tf.RaggedTensor` or any other type, then it is
58
+ batched as normal.
59
+
60
+ Example:
61
+
62
+ >>> dataset = tf.data.Dataset.from_tensor_slices(np.arange(6))
63
+ >>> dataset = dataset.map(lambda x: tf.range(x))
64
+ >>> dataset.element_spec.shape
65
+ TensorShape([None])
66
+ >>> dataset = dataset.apply(
67
+ ... tf.data.experimental.dense_to_ragged_batch(batch_size=2))
68
+ >>> for batch in dataset:
69
+ ... print(batch)
70
+ <tf.RaggedTensor [[], [0]]>
71
+ <tf.RaggedTensor [[0, 1], [0, 1, 2]]>
72
+ <tf.RaggedTensor [[0, 1, 2, 3], [0, 1, 2, 3, 4]]>
73
+
74
+ Args:
75
+ batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
76
+ consecutive elements of this dataset to combine in a single batch.
77
+ drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
78
+ whether the last batch should be dropped in the case it has fewer than
79
+ `batch_size` elements; the default behavior is not to drop the smaller
80
+ batch.
81
+ row_splits_dtype: The dtype that should be used for the `row_splits` of any
82
+ new ragged tensors. Existing `tf.RaggedTensor` elements do not have their
83
+ row_splits dtype changed.
84
+
85
+ Returns:
86
+ Dataset: A `Dataset`.
87
+ """
88
+ def _apply_fn(dataset):
89
+ return dataset.ragged_batch(batch_size, drop_remainder, row_splits_dtype)
90
+
91
+ return _apply_fn
92
+
93
+
94
+ @tf_export("data.experimental.dense_to_sparse_batch")
95
+ @deprecation.deprecated(None, "Use `tf.data.Dataset.sparse_batch` instead.")
96
+ def dense_to_sparse_batch(batch_size, row_shape):
97
+ """A transformation that batches ragged elements into `tf.sparse.SparseTensor`s.
98
+
99
+ Like `Dataset.padded_batch()`, this transformation combines multiple
100
+ consecutive elements of the dataset, which might have different
101
+ shapes, into a single element. The resulting element has three
102
+ components (`indices`, `values`, and `dense_shape`), which
103
+ comprise a `tf.sparse.SparseTensor` that represents the same data. The
104
+ `row_shape` represents the dense shape of each row in the
105
+ resulting `tf.sparse.SparseTensor`, to which the effective batch size is
106
+ prepended. For example:
107
+
108
+ ```python
109
+ # NOTE: The following examples use `{ ... }` to represent the
110
+ # contents of a dataset.
111
+ a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }
112
+
113
+ a.apply(tf.data.experimental.dense_to_sparse_batch(
114
+ batch_size=2, row_shape=[6])) ==
115
+ {
116
+ ([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1]], # indices
117
+ ['a', 'b', 'c', 'a', 'b'], # values
118
+ [2, 6]), # dense_shape
119
+ ([[0, 0], [0, 1], [0, 2], [0, 3]],
120
+ ['a', 'b', 'c', 'd'],
121
+ [1, 6])
122
+ }
123
+ ```
124
+
125
+ Args:
126
+ batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
127
+ consecutive elements of this dataset to combine in a single batch.
128
+ row_shape: A `tf.TensorShape` or `tf.int64` vector tensor-like object
129
+ representing the equivalent dense shape of a row in the resulting
130
+ `tf.sparse.SparseTensor`. Each element of this dataset must have the same
131
+ rank as `row_shape`, and must have size less than or equal to `row_shape`
132
+ in each dimension.
133
+
134
+ Returns:
135
+ A `Dataset` transformation function, which can be passed to
136
+ `tf.data.Dataset.apply`.
137
+ """
138
+
139
+ def _apply_fn(dataset):
140
+ return dataset.sparse_batch(batch_size, row_shape)
141
+
142
+ return _apply_fn
143
+
144
+
145
+ @deprecation.deprecated(None, "Use `tf.data.experimental.map_and_batch()")
146
+ @tf_export(v1=["data.experimental.map_and_batch_with_legacy_function"])
147
+ def map_and_batch_with_legacy_function(map_func,
148
+ batch_size,
149
+ num_parallel_batches=None,
150
+ drop_remainder=False,
151
+ num_parallel_calls=None):
152
+ """Fused implementation of `map` and `batch`.
153
+
154
+ NOTE: This is an escape hatch for existing uses of `map_and_batch` that do not
155
+ work with V2 functions. New uses are strongly discouraged and existing uses
156
+ should migrate to `map_and_batch` as this method will not be removed in V2.
157
+
158
+ Args:
159
+ map_func: A function mapping a nested structure of tensors to another
160
+ nested structure of tensors.
161
+ batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
162
+ consecutive elements of this dataset to combine in a single batch.
163
+ num_parallel_batches: (Optional.) A `tf.int64` scalar `tf.Tensor`,
164
+ representing the number of batches to create in parallel. On one hand,
165
+ higher values can help mitigate the effect of stragglers. On the other
166
+ hand, higher values can increase contention if CPU is scarce.
167
+ drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
168
+ whether the last batch should be dropped in case its size is smaller than
169
+ desired; the default behavior is not to drop the smaller batch.
170
+ num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
171
+ representing the number of elements to process in parallel. If not
172
+ specified, `batch_size * num_parallel_batches` elements will be processed
173
+ in parallel. If the value `tf.data.AUTOTUNE` is used, then
174
+ the number of parallel calls is set dynamically based on available CPU.
175
+
176
+ Returns:
177
+ A `Dataset` transformation function, which can be passed to
178
+ `tf.data.Dataset.apply`.
179
+
180
+ Raises:
181
+ ValueError: If both `num_parallel_batches` and `num_parallel_calls` are
182
+ specified.
183
+ """
184
+
185
+ if num_parallel_batches is None and num_parallel_calls is None:
186
+ num_parallel_calls = batch_size
187
+ elif num_parallel_batches is not None and num_parallel_calls is None:
188
+ num_parallel_calls = batch_size * num_parallel_batches
189
+ elif num_parallel_batches is not None and num_parallel_calls is not None:
190
+ raise ValueError(
191
+ "`map_and_batch_with_legacy_function` allows only one of "
192
+ "`num_parallel_batches` and "
193
+ "`num_parallel_calls` to be set, but "
194
+ f"`num_parallel_batches` was set to {num_parallel_batches} "
195
+ f"and `num_parallel_calls` as set to {num_parallel_calls}.")
196
+
197
+ def _apply_fn(dataset):
198
+ return _MapAndBatchDataset(dataset, map_func, batch_size,
199
+ num_parallel_calls, drop_remainder,
200
+ use_legacy_function=True)
201
+
202
+ return _apply_fn
203
+
204
+
205
+ @deprecation.deprecated(
206
+ None,
207
+ "Use `tf.data.Dataset.map(map_func, num_parallel_calls)` followed by "
208
+ "`tf.data.Dataset.batch(batch_size, drop_remainder)`. Static tf.data "
209
+ "optimizations will take care of using the fused implementation.")
210
+ @tf_export("data.experimental.map_and_batch")
211
+ def map_and_batch(map_func,
212
+ batch_size,
213
+ num_parallel_batches=None,
214
+ drop_remainder=False,
215
+ num_parallel_calls=None):
216
+ """Fused implementation of `map` and `batch`.
217
+
218
+ Maps `map_func` across `batch_size` consecutive elements of this dataset
219
+ and then combines them into a batch. Functionally, it is equivalent to `map`
220
+ followed by `batch`. This API is temporary and deprecated since input pipeline
221
+ optimization now fuses consecutive `map` and `batch` operations automatically.
222
+
223
+ Args:
224
+ map_func: A function mapping a nested structure of tensors to another
225
+ nested structure of tensors.
226
+ batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
227
+ consecutive elements of this dataset to combine in a single batch.
228
+ num_parallel_batches: (Optional.) A `tf.int64` scalar `tf.Tensor`,
229
+ representing the number of batches to create in parallel. On one hand,
230
+ higher values can help mitigate the effect of stragglers. On the other
231
+ hand, higher values can increase contention if CPU is scarce.
232
+ drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
233
+ whether the last batch should be dropped in case its size is smaller than
234
+ desired; the default behavior is not to drop the smaller batch.
235
+ num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
236
+ representing the number of elements to process in parallel. If not
237
+ specified, `batch_size * num_parallel_batches` elements will be processed
238
+ in parallel. If the value `tf.data.AUTOTUNE` is used, then
239
+ the number of parallel calls is set dynamically based on available CPU.
240
+
241
+ Returns:
242
+ A `Dataset` transformation function, which can be passed to
243
+ `tf.data.Dataset.apply`.
244
+
245
+ Raises:
246
+ ValueError: If both `num_parallel_batches` and `num_parallel_calls` are
247
+ specified.
248
+ """
249
+
250
+ if num_parallel_batches is None and num_parallel_calls is None:
251
+ num_parallel_calls = batch_size
252
+ elif num_parallel_batches is not None and num_parallel_calls is None:
253
+ num_parallel_calls = batch_size * num_parallel_batches
254
+ elif num_parallel_batches is not None and num_parallel_calls is not None:
255
+ raise ValueError(
256
+ "`map_and_batch` allows only one of `num_parallel_batches` and "
257
+ "`num_parallel_calls` to be set, but "
258
+ f"`num_parallel_batches` was set to {num_parallel_batches} "
259
+ f"and `num_parallel_calls` as set to {num_parallel_calls}.")
260
+
261
+ def _apply_fn(dataset):
262
+ return _MapAndBatchDataset(dataset, map_func, batch_size,
263
+ num_parallel_calls, drop_remainder)
264
+
265
+ return _apply_fn
266
+
267
+
268
+ @deprecation.deprecated(None, "Use `tf.data.Dataset.unbatch()`.")
269
+ @tf_export("data.experimental.unbatch")
270
+ def unbatch():
271
+ """Splits elements of a dataset into multiple elements on the batch dimension.
272
+
273
+ For example, if elements of the dataset are shaped `[B, a0, a1, ...]`,
274
+ where `B` may vary for each input element, then for each element in the
275
+ dataset, the unbatched dataset will contain `B` consecutive elements
276
+ of shape `[a0, a1, ...]`.
277
+
278
+ ```python
279
+ # NOTE: The following example uses `{ ... }` to represent the contents
280
+ # of a dataset.
281
+ a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }
282
+
283
+ a.unbatch() == {
284
+ 'a', 'b', 'c', 'a', 'b', 'a', 'b', 'c', 'd'}
285
+ ```
286
+
287
+ Returns:
288
+ A `Dataset` transformation function, which can be passed to
289
+ `tf.data.Dataset.apply`.
290
+ """
291
+
292
+ def _apply_fn(dataset):
293
+ return dataset.unbatch()
294
+
295
+ return _apply_fn
296
+
297
+
298
+ class _DenseToSparseBatchDataset(dataset_ops.UnaryDataset):
299
+ """A `Dataset` that batches ragged dense elements into `tf.sparse.SparseTensor`s."""
300
+
301
+ def __init__(self, input_dataset, batch_size, row_shape):
302
+ """See `Dataset.dense_to_sparse_batch()` for more details."""
303
+ if not isinstance(
304
+ dataset_ops.get_legacy_output_types(input_dataset), dtypes.DType):
305
+ raise TypeError("`dense_to_sparse_batch` requires an input dataset whose "
306
+ "elements have a single component, but the given dataset "
307
+ "has the following component types: "
308
+ f"{dataset_ops.get_legacy_output_types(input_dataset)}.")
309
+ self._input_dataset = input_dataset
310
+ self._batch_size = batch_size
311
+ self._row_shape = row_shape
312
+ self._element_spec = sparse_tensor.SparseTensorSpec(
313
+ tensor_shape.TensorShape([None]).concatenate(self._row_shape),
314
+ dataset_ops.get_legacy_output_types(input_dataset))
315
+
316
+ variant_tensor = ged_ops.dense_to_sparse_batch_dataset(
317
+ self._input_dataset._variant_tensor, # pylint: disable=protected-access
318
+ self._batch_size,
319
+ row_shape=convert.partial_shape_to_tensor(self._row_shape),
320
+ **self._flat_structure)
321
+ super(_DenseToSparseBatchDataset, self).__init__(input_dataset,
322
+ variant_tensor)
323
+
324
+ @property
325
+ def element_spec(self):
326
+ return self._element_spec
327
+
328
+
329
+ class _MapAndBatchDataset(dataset_ops.UnaryDataset):
330
+ """A `Dataset` that maps a function over a batch of elements."""
331
+
332
+ def __init__(self, input_dataset, map_func, batch_size, num_parallel_calls,
333
+ drop_remainder, use_legacy_function=False):
334
+ self._input_dataset = input_dataset
335
+
336
+ self._map_func = structured_function.StructuredFunctionWrapper(
337
+ map_func,
338
+ "tf.data.experimental.map_and_batch()",
339
+ dataset=input_dataset,
340
+ use_legacy_function=use_legacy_function)
341
+ self._batch_size_t = ops.convert_to_tensor(
342
+ batch_size, dtype=dtypes.int64, name="batch_size")
343
+ self._num_parallel_calls_t = ops.convert_to_tensor(
344
+ num_parallel_calls, dtype=dtypes.int64, name="num_parallel_calls")
345
+ self._drop_remainder_t = ops.convert_to_tensor(
346
+ drop_remainder, dtype=dtypes.bool, name="drop_remainder")
347
+
348
+ constant_drop_remainder = tensor_util.constant_value(self._drop_remainder_t)
349
+ # pylint: disable=protected-access
350
+ if constant_drop_remainder:
351
+ # NOTE(mrry): `constant_drop_remainder` may be `None` (unknown statically)
352
+ # or `False` (explicitly retaining the remainder).
353
+ # pylint: disable=g-long-lambda
354
+ self._element_spec = nest.map_structure(
355
+ lambda component_spec: component_spec._batch(
356
+ tensor_util.constant_value(self._batch_size_t)),
357
+ self._map_func.output_structure)
358
+ else:
359
+ self._element_spec = nest.map_structure(
360
+ lambda component_spec: component_spec._batch(None),
361
+ self._map_func.output_structure)
362
+ # pylint: enable=protected-access
363
+ variant_tensor = ged_ops.map_and_batch_dataset(
364
+ self._input_dataset._variant_tensor, # pylint: disable=protected-access
365
+ self._map_func.function.captured_inputs,
366
+ f=self._map_func.function,
367
+ batch_size=self._batch_size_t,
368
+ num_parallel_calls=self._num_parallel_calls_t,
369
+ drop_remainder=self._drop_remainder_t,
370
+ preserve_cardinality=True,
371
+ **self._flat_structure)
372
+ super(_MapAndBatchDataset, self).__init__(input_dataset, variant_tensor)
373
+
374
+ def _functions(self):
375
+ return [self._map_func]
376
+
377
+ @property
378
+ def element_spec(self):
379
+ return self._element_spec
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/cardinality.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Cardinality analysis of `Dataset` objects."""
16
+ from tensorflow.python.data.ops import dataset_ops
17
+ from tensorflow.python.framework import dtypes
18
+ from tensorflow.python.framework import ops
19
+ from tensorflow.python.ops import gen_dataset_ops
20
+ from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
21
+ from tensorflow.python.util.tf_export import tf_export
22
+
23
+
24
+ INFINITE = -1
25
+ UNKNOWN = -2
26
+ tf_export("data.experimental.INFINITE_CARDINALITY").export_constant(
27
+ __name__, "INFINITE")
28
+ tf_export("data.experimental.UNKNOWN_CARDINALITY").export_constant(
29
+ __name__, "UNKNOWN")
30
+
31
+
32
+ # TODO(b/157691652): Deprecate this method after migrating users to the new API.
33
+ @tf_export("data.experimental.cardinality")
34
+ def cardinality(dataset):
35
+ """Returns the cardinality of `dataset`, if known.
36
+
37
+ The operation returns the cardinality of `dataset`. The operation may return
38
+ `tf.data.experimental.INFINITE_CARDINALITY` if `dataset` contains an infinite
39
+ number of elements or `tf.data.experimental.UNKNOWN_CARDINALITY` if the
40
+ analysis fails to determine the number of elements in `dataset` (e.g. when the
41
+ dataset source is a file).
42
+
43
+ >>> dataset = tf.data.Dataset.range(42)
44
+ >>> print(tf.data.experimental.cardinality(dataset).numpy())
45
+ 42
46
+ >>> dataset = dataset.repeat()
47
+ >>> cardinality = tf.data.experimental.cardinality(dataset)
48
+ >>> print((cardinality == tf.data.experimental.INFINITE_CARDINALITY).numpy())
49
+ True
50
+ >>> dataset = dataset.filter(lambda x: True)
51
+ >>> cardinality = tf.data.experimental.cardinality(dataset)
52
+ >>> print((cardinality == tf.data.experimental.UNKNOWN_CARDINALITY).numpy())
53
+ True
54
+
55
+ Args:
56
+ dataset: A `tf.data.Dataset` for which to determine cardinality.
57
+
58
+ Returns:
59
+ A scalar `tf.int64` `Tensor` representing the cardinality of `dataset`. If
60
+ the cardinality is infinite or unknown, the operation returns the named
61
+ constant `INFINITE_CARDINALITY` and `UNKNOWN_CARDINALITY` respectively.
62
+ """
63
+
64
+ return gen_dataset_ops.dataset_cardinality(dataset._variant_tensor) # pylint: disable=protected-access
65
+
66
+
67
+ @tf_export("data.experimental.assert_cardinality")
68
+ def assert_cardinality(expected_cardinality):
69
+ """Asserts the cardinality of the input dataset.
70
+
71
+ NOTE: The following assumes that "examples.tfrecord" contains 42 records.
72
+
73
+ >>> dataset = tf.data.TFRecordDataset("examples.tfrecord")
74
+ >>> cardinality = tf.data.experimental.cardinality(dataset)
75
+ >>> print((cardinality == tf.data.experimental.UNKNOWN_CARDINALITY).numpy())
76
+ True
77
+ >>> dataset = dataset.apply(tf.data.experimental.assert_cardinality(42))
78
+ >>> print(tf.data.experimental.cardinality(dataset).numpy())
79
+ 42
80
+
81
+ Args:
82
+ expected_cardinality: The expected cardinality of the input dataset.
83
+
84
+ Returns:
85
+ A `Dataset` transformation function, which can be passed to
86
+ `tf.data.Dataset.apply`.
87
+
88
+ Raises:
89
+ FailedPreconditionError: The assertion is checked at runtime (when iterating
90
+ the dataset) and an error is raised if the actual and expected cardinality
91
+ differ.
92
+ """
93
+ def _apply_fn(dataset):
94
+ return _AssertCardinalityDataset(dataset, expected_cardinality)
95
+
96
+ return _apply_fn
97
+
98
+
99
+ class _AssertCardinalityDataset(dataset_ops.UnaryUnchangedStructureDataset):
100
+ """A `Dataset` that assert the cardinality of its input."""
101
+
102
+ def __init__(self, input_dataset, expected_cardinality):
103
+ self._input_dataset = input_dataset
104
+ self._expected_cardinality = ops.convert_to_tensor(
105
+ expected_cardinality, dtype=dtypes.int64, name="expected_cardinality")
106
+
107
+ # pylint: enable=protected-access
108
+ variant_tensor = ged_ops.assert_cardinality_dataset(
109
+ self._input_dataset._variant_tensor, # pylint: disable=protected-access
110
+ self._expected_cardinality,
111
+ **self._flat_structure)
112
+ super(_AssertCardinalityDataset, self).__init__(input_dataset,
113
+ variant_tensor)
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/compression_ops.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Ops for compressing and uncompressing dataset elements."""
16
+ from tensorflow.python.data.util import structure
17
+ from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
18
+
19
+
20
+ def compress(element):
21
+ """Compress a dataset element.
22
+
23
+ Args:
24
+ element: A nested structure of types supported by Tensorflow.
25
+
26
+ Returns:
27
+ A variant tensor representing the compressed element. This variant can be
28
+ passed to `uncompress` to get back the original element.
29
+ """
30
+ element_spec = structure.type_spec_from_value(element)
31
+ tensor_list = structure.to_tensor_list(element_spec, element)
32
+ return ged_ops.compress_element(tensor_list)
33
+
34
+
35
+ def uncompress(element, output_spec):
36
+ """Uncompress a compressed dataset element.
37
+
38
+ Args:
39
+ element: A scalar variant tensor to uncompress. The element should have been
40
+ created by calling `compress`.
41
+ output_spec: A nested structure of `tf.TypeSpec` representing the type(s) of
42
+ the uncompressed element.
43
+
44
+ Returns:
45
+ The uncompressed element.
46
+ """
47
+ flat_types = structure.get_flat_tensor_types(output_spec)
48
+ flat_shapes = structure.get_flat_tensor_shapes(output_spec)
49
+ tensor_list = ged_ops.uncompress_element(
50
+ element, output_types=flat_types, output_shapes=flat_shapes)
51
+ return structure.from_tensor_list(output_spec, tensor_list)
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/counter.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """The Counter Dataset."""
16
+ from tensorflow.python import tf2
17
+ from tensorflow.python.compat import v2_compat
18
+ from tensorflow.python.data.ops import dataset_ops
19
+ from tensorflow.python.framework import dtypes
20
+ from tensorflow.python.util import deprecation
21
+ from tensorflow.python.util.tf_export import tf_export
22
+
23
+
24
+ @tf_export("data.experimental.Counter", v1=[])
25
+ @deprecation.deprecated(None, "Use `tf.data.Dataset.counter(...)` instead.")
26
+ def CounterV2(start=0, step=1, dtype=dtypes.int64):
27
+ """Creates a `Dataset` that counts from `start` in steps of size `step`.
28
+
29
+ Unlike `tf.data.Dataset.range` which will stop at some ending number,
30
+ `Counter` will produce elements indefinitely.
31
+
32
+ >>> dataset = tf.data.experimental.Counter().take(5)
33
+ >>> list(dataset.as_numpy_iterator())
34
+ [0, 1, 2, 3, 4]
35
+ >>> dataset.element_spec
36
+ TensorSpec(shape=(), dtype=tf.int64, name=None)
37
+ >>> dataset = tf.data.experimental.Counter(dtype=tf.int32)
38
+ >>> dataset.element_spec
39
+ TensorSpec(shape=(), dtype=tf.int32, name=None)
40
+ >>> dataset = tf.data.experimental.Counter(start=2).take(5)
41
+ >>> list(dataset.as_numpy_iterator())
42
+ [2, 3, 4, 5, 6]
43
+ >>> dataset = tf.data.experimental.Counter(start=2, step=5).take(5)
44
+ >>> list(dataset.as_numpy_iterator())
45
+ [2, 7, 12, 17, 22]
46
+ >>> dataset = tf.data.experimental.Counter(start=10, step=-1).take(5)
47
+ >>> list(dataset.as_numpy_iterator())
48
+ [10, 9, 8, 7, 6]
49
+
50
+ Args:
51
+ start: (Optional.) The starting value for the counter. Defaults to 0.
52
+ step: (Optional.) The step size for the counter. Defaults to 1.
53
+ dtype: (Optional.) The data type for counter elements. Defaults to
54
+ `tf.int64`.
55
+
56
+ Returns:
57
+ A `Dataset` of scalar `dtype` elements.
58
+ """
59
+ return dataset_ops.Dataset.counter(start, step, dtype)
60
+
61
+
62
+ @tf_export(v1=["data.experimental.Counter"])
63
+ @deprecation.deprecated(None, "Use `tf.data.Dataset.counter(...)` instead.")
64
+ def CounterV1(start=0, step=1, dtype=dtypes.int64):
65
+ return dataset_ops.DatasetV1Adapter(CounterV2(start, step, dtype))
66
+
67
+
68
+ CounterV1.__doc__ = CounterV2.__doc__
69
+
70
+ if tf2.enabled():
71
+ Counter = CounterV2
72
+ else:
73
+ Counter = CounterV1
74
+
75
+
76
+ def _tf2_callback(): # pylint: disable=invalid-name
77
+ global Counter
78
+ if tf2.enabled():
79
+ Counter = CounterV2
80
+ else:
81
+ Counter = CounterV1
82
+
83
+
84
+ v2_compat.register_data_v2_callback(_tf2_callback)
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/distribute.py ADDED
@@ -0,0 +1,399 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Distribution Strategy-related dataset transformations."""
16
+
17
+ from tensorflow.python.data.ops import dataset_ops
18
+ from tensorflow.python.data.ops.options import ExternalStatePolicy
19
+ from tensorflow.python.data.util import nest
20
+ from tensorflow.python.framework import constant_op
21
+ from tensorflow.python.framework import dtypes
22
+ from tensorflow.python.framework import ops
23
+ from tensorflow.python.framework import tensor_shape
24
+ from tensorflow.python.framework import tensor_util
25
+ from tensorflow.python.ops import array_ops
26
+ from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
27
+ from tensorflow.python.types import data as data_types
28
+ from tensorflow.python.util.tf_export import tf_export
29
+
30
+ SHARD_HINT = -1
31
+ tf_export("data.experimental.SHARD_HINT").export_constant(
32
+ __name__, "SHARD_HINT")
33
+
34
+
35
+ class _AutoShardDataset(dataset_ops.UnaryDataset):
36
+ """A `Dataset` that shards the `Dataset` automatically.
37
+
38
+ This dataset takes in an existing dataset and tries to automatically figure
39
+ out how to shard the dataset in a multi-worker scenario using graph rewrites.
40
+
41
+ If the AutoShardPolicy is set to FILE, it walks up the dataset graph until
42
+ it finds a reader dataset, then inserts a ShardDataset op before that node
43
+ so that each worker only sees some files.
44
+
45
+ If the AutoShardPolicy is set to DATA, it inserts a ShardDataset op at the
46
+ end of the input pipeline, before any terminal PrefetchDataset if there is
47
+ one. Additionally, if there is a RebatchDatasetV2 in the input pipeline, it
48
+ is written to legacy RebatchDataset for correctness reasons, since
49
+ RebatchDatasetV2 is incompatible with data sharding.
50
+
51
+ If the AutoShardPolicy is set to AUTO, it tries to do file-based sharding.
52
+ If it cannot find a reader dataset, it falls back to doing data-based
53
+ sharding.
54
+
55
+ If the AutoShardPolicy is set to OFF, it does nothing.
56
+
57
+ Attributes:
58
+ num_workers: Total number of workers to shard this dataset across.
59
+ index: The current worker index (out of the total number of workers) this
60
+ dataset is for.
61
+ num_replicas: The total number of replicas across all workers. This is used
62
+ only when sharding by data (either DATA or AUTO) in order to rewrite
63
+ RebatchDatasetV2 to RebatchDataset.
64
+
65
+ Raises:
66
+ NotFoundError: If we cannot find a suitable reader dataset to begin
67
+ automatically sharding the dataset.
68
+ """
69
+
70
+ def __init__(self, input_dataset, num_workers, index, num_replicas=None):
71
+ self._input_dataset = input_dataset
72
+
73
+ self._element_spec = input_dataset.element_spec
74
+ variant_tensor = ged_ops.auto_shard_dataset(
75
+ self._input_dataset._variant_tensor, # pylint: disable=protected-access
76
+ num_workers=num_workers,
77
+ index=index,
78
+ auto_shard_policy=int(
79
+ input_dataset.options().experimental_distribute.auto_shard_policy),
80
+ num_replicas=num_replicas,
81
+ **self._flat_structure)
82
+ super(_AutoShardDataset, self).__init__(input_dataset, variant_tensor)
83
+
84
+ @property
85
+ def element_spec(self):
86
+ return self._element_spec
87
+
88
+
89
+ def _AutoShardDatasetV1(input_dataset, num_workers, index, num_replicas=None): # pylint: disable=invalid-name
90
+ return dataset_ops.DatasetV1Adapter(
91
+ _AutoShardDataset(input_dataset, num_workers, index, num_replicas))
92
+
93
+
94
+ class _LegacyRebatchDataset(dataset_ops.UnaryDataset):
95
+ """A `Dataset` that divides its input batches into `num_replicas` sub-batches.
96
+
97
+ For each batch in the input dataset, _LegacyRebatchDataset will produce
98
+ `num_replicas` smaller batches whose sizes add up to the original batch size.
99
+
100
+ For example:
101
+
102
+ ```python
103
+ ds = tf.data.Dataset.range(8)
104
+ ds = ds.batch(4)
105
+ ds = _LegacyRebatchDataset(ds, num_replicas=3)
106
+ for elem in ds:
107
+ print(elem)
108
+ >> [0, 1], [2, 3], [], [4, 5], [6, 7], []
109
+ ```
110
+ """
111
+
112
+ def __init__(self, input_dataset, num_replicas):
113
+ """Creates a _LegacyRebatchDataset.
114
+
115
+ Args:
116
+ input_dataset: `Dataset` to rebatch.
117
+ num_replicas: A `tf.int64` scalar, representing the number of sub-batches
118
+ to split each batch from `input_dataset` into.
119
+ """
120
+
121
+ def recalculate_batch_size(type_spec):
122
+ """Recalculates the output_shape after dividing it by num_replicas."""
123
+ output_shape = type_spec._to_legacy_output_shapes() # pylint: disable=protected-access
124
+ if not isinstance(output_shape, tensor_shape.TensorShape):
125
+ return None
126
+
127
+ # If the output shape is unknown, we set the batch dimension to unknown.
128
+ if output_shape.rank is None:
129
+ return None
130
+
131
+ if len(output_shape) < 1:
132
+ raise ValueError(
133
+ "Invalid `input_dataset`. Expected a dataset whose elements "
134
+ "have rank >= 1 but found a dataset whose elements are scalars. "
135
+ "Fix the issue by adding the `batch` transformation to the "
136
+ "dataset.")
137
+ output_dims = [d.value for d in output_shape.dims]
138
+
139
+ if output_dims[0] is not None and output_dims[0] % num_replicas == 0:
140
+ return output_dims[0] // num_replicas
141
+
142
+ # Set the batch dimension to unknown. If the global batch size does not
143
+ # divide num_replicas evenly, the minibatches may have different sizes.
144
+ return None
145
+
146
+ def rebatch(type_spec):
147
+ # pylint: disable=protected-access
148
+ batch_size = recalculate_batch_size(type_spec)
149
+ return type_spec._unbatch()._batch(batch_size)
150
+ # pylint: enable=protected-access
151
+
152
+ self._element_spec = nest.map_structure(
153
+ rebatch, dataset_ops.get_structure(input_dataset))
154
+
155
+ # auto_shard rewrite assumes that there's normalize_to_dense before
156
+ # rebatch_dataset.
157
+ # LINT.IfChange
158
+ input_dataset = dataset_ops.normalize_to_dense(input_dataset)
159
+ variant_tensor = ged_ops.rebatch_dataset(
160
+ input_dataset._variant_tensor, # pylint: disable=protected-access
161
+ num_replicas=num_replicas,
162
+ **self._flat_structure)
163
+ # LINT.ThenChange(//tensorflow/core/grappler/optimizers/data/auto_shard.cc)
164
+ super(_LegacyRebatchDataset, self).__init__(input_dataset, variant_tensor)
165
+
166
+ @property
167
+ def element_spec(self):
168
+ return self._element_spec
169
+
170
+
171
+ class _RemoteDataset(dataset_ops.DatasetSource):
172
+ """Creates a dataset on a given `device` given a graph def."""
173
+
174
+ def __init__(self, graph_def, device, element_spec):
175
+ self._elem_spec = element_spec
176
+ with ops.device(device):
177
+ variant_tensor = ged_ops.dataset_from_graph(graph_def)
178
+ super(_RemoteDataset, self).__init__(variant_tensor)
179
+
180
+ @property
181
+ def element_spec(self):
182
+ return self._elem_spec
183
+
184
+
185
+ def replicate(dataset, devices):
186
+ """A transformation that replicates `dataset` onto a list of devices.
187
+
188
+ Args:
189
+ dataset: A `tf.data.Dataset` object.
190
+ devices: A list of devices to replicate the dataset on.
191
+
192
+ Returns:
193
+ A dictionary mapping device name to a dataset on that device.
194
+ """
195
+ if not isinstance(dataset, data_types.DatasetV2):
196
+ raise TypeError(
197
+ f"Invalid `dataset`. Expected a `tf.data.Dataset` object but "
198
+ f"got {type(dataset)}.")
199
+
200
+ # pylint: disable=protected-access
201
+ dataset_device = dataset._variant_tensor.device
202
+
203
+ datasets = {}
204
+ if len(devices) == 1 and devices[0] == dataset_device:
205
+ datasets[devices[0]] = dataset
206
+ return datasets
207
+
208
+ with ops.colocate_with(dataset._variant_tensor):
209
+ dataset = dataset._apply_debug_options()
210
+ graph_def = dataset._as_serialized_graph(
211
+ strip_device_assignment=True,
212
+ external_state_policy=ExternalStatePolicy.WARN)
213
+ for device in devices:
214
+ ds = _RemoteDataset(graph_def, device, dataset.element_spec)
215
+ datasets[device] = ds
216
+ return datasets
217
+
218
+
219
+ def batch_sizes_for_worker(global_batch_size, num_workers,
220
+ num_replicas_per_worker, worker_index):
221
+ """Determines how to rebatch a dataset for the given worker.
222
+
223
+ Given the global batch size, number of workers, number of replicas per worker,
224
+ and worker index, returns the correct batch sizes for rebatching a dataset
225
+ on worker `worker_index` of `num_workers`, such that each global step (across
226
+ all workers and replicas) will consume global_batch_size elements. The
227
+ returned value should be passed as the `batch_sizes` input parameter to
228
+ `tf.data.experimental.rebatch()`. The returned batch sizes meet the following
229
+ constraints:
230
+
231
+ Let G = global_batch_size, W = num_workers, R = num_replicas_per_worker
232
+ (A) for any worker, len(batch_sizes) = W * R
233
+ (B) for any worker, sum(batch_sizes) == G
234
+ (C) for any global step (i.e. R iterations on each worker), the sum of batches
235
+ consumed by replicas across all workers is G.
236
+ (D) any two batch sizes of any two replicas differs by at most one.
237
+
238
+ For example, suppose we have G = 7, W = 2, R = 2, and suppose we have two
239
+ files which each contain 7 elements:
240
+
241
+ ```python
242
+ # WORKER 0
243
+ batch_sizes_0 = batch_sizes_for_worker(global_batch_size=global_batch_size,
244
+ num_workers=2,
245
+ num_replicas_per_worker=2,
246
+ worker_index=0)
247
+ print(batch_sizes_0)
248
+ >> [2, 2, 2, 1]
249
+
250
+ dataset_0 = tf.data.Dataset.from_tensor_slices(["file_a", "file_b"])
251
+ dataset_0 = dataset_0.shard(num_shards, index=0)
252
+ dataset_0 = dataset_0.batch(7)
253
+ dataset_0 = dataset_0.apply(tf.data.experimental.rebatch(batch_sizes_0))
254
+ for elem in dataset_0:
255
+ print(elem)
256
+ >> [[A0, A1], [A2, A3], [A4, A5], [A6]]
257
+
258
+ # WORKER 1
259
+ batch_sizes_1 = batch_sizes_for_worker(global_batch_size=global_batch_size,
260
+ num_workers=2,
261
+ num_replicas_per_worker=2,
262
+ worker_index=1)
263
+ print(batch_sizes_1)
264
+ >> [2, 1, 2, 2]
265
+
266
+ dataset_1 = tf.data.Dataset.from_tensor_slices(["file_a", "file_b"])
267
+ dataset_1 = dataset_1.shard(num_shards, index=1)
268
+ dataset_1 = dataset_1.batch(7)
269
+ dataset_1 = dataset_1.apply(tf.data.experimental.rebatch(batch_sizes_1))
270
+ for elem in dataset_1:
271
+ print(elem)
272
+ >> [[B0, B1], [B2], [B3, B4], [B5, B6]]
273
+ ```
274
+
275
+ The above example will produce the following elements:
276
+
277
+ Step 1:
278
+ Worker 0 Replica 0: [A0, A1]
279
+ Worker 0 Replica 1: [A2, A3]
280
+ Worker 1 Replica 0: [B0, B1]
281
+ Worker 1 Replica 1: [B2]
282
+ Total batch size = 7
283
+
284
+ Step 2:
285
+ Worker 0 Replica 0: [A4, A5]
286
+ Worker 0 Replica 1: [A6]
287
+ Worker 1 Replica 0: [B3, B4]
288
+ Worker 1 Replica 1: [B5, B6]
289
+ Total batch size = 7
290
+
291
+ Args:
292
+ global_batch_size: A `tf.int64` scalar, representing the global batch size.
293
+ num_workers: An integer representing the number of workers the dataset will
294
+ be distributed across.
295
+ num_replicas_per_worker: An integer representing the number of replicas per
296
+ worker. All workers are assumed to have the same number of replicas.
297
+ worker_index: An integer index of the worker to be rebatched.
298
+
299
+ Returns:
300
+ A `tf.int64` vector, representing the batch sizes to rebatch the dataset
301
+ into.
302
+ """
303
+ # Constraint (A)
304
+ num_subbatches = num_workers * num_replicas_per_worker
305
+
306
+ offset = worker_index * num_replicas_per_worker
307
+
308
+ const_value = tensor_util.constant_value(global_batch_size)
309
+ if const_value is not None:
310
+ # Use the constant global batch size for further calculations
311
+ global_batch_size = const_value
312
+
313
+ # Let N = W * R. Constraint (B) and (D) jointly mean that the iterations
314
+ # should have batch size either floor(B/N) or ceil(B/N). Namely, of the N
315
+ # subbatches a batch is split into, B - N * floor(B/N) of them will have size
316
+ # ceil(B/N), and the rest will have size floor(B/N).
317
+ floor = global_batch_size // num_subbatches
318
+ num_ceil = global_batch_size - (num_subbatches * floor)
319
+
320
+ # For worker 0, we assign the first num_ceil subbatches to have size
321
+ # ceil(B/N), and the remainder to have size floor(B/N). The other workers will
322
+ # each be offset by R * worker_index in order to meet constraint (C).
323
+ if const_value is not None:
324
+ # If the global batch size is a known constant value, we return a constant
325
+ # tensor directly instead of manipulating it with TF ops. This allows for
326
+ # better downstream shape inference.
327
+ worker_0 = [floor + 1] * num_ceil + [floor] * (num_subbatches - num_ceil)
328
+ return ops.convert_to_tensor(
329
+ worker_0[offset:] + worker_0[:offset],
330
+ dtype=dtypes.int64,
331
+ name="batch_sizes")
332
+
333
+ worker_0 = array_ops.ones(num_subbatches, dtype=dtypes.int64)
334
+ worker_0 = floor * worker_0 + array_ops.concat([
335
+ array_ops.ones(num_ceil, dtype=dtypes.int64),
336
+ array_ops.zeros(num_subbatches - num_ceil, dtype=dtypes.int64)
337
+ ],
338
+ axis=0)
339
+
340
+ return array_ops.concat([worker_0[offset:], worker_0[:offset]], axis=0)
341
+
342
+
343
+ def compute_batch_size(dataset):
344
+ """An operation that returns the batch size of the dataset.
345
+
346
+ This op tries to infer the batch size statically by walking up the dataset
347
+ tree from the final dataset node and returning the batch size of the first
348
+ batching dataset (such as from .batch() and .padded_batch()) that it
349
+ encounters. This differs from using the `element_spec` of a dataset in that it
350
+ does not account for partial batches.
351
+
352
+ This operation may fail if it encounters contradictory batch sizes (for
353
+ example, if the dataset is created by zipping together two datasets with
354
+ different batch sizes), if there are no explicit batching transformations, or
355
+ if there are operations downstream from the batching transformation that may
356
+ modify its batch size. In these cases, it returns a -1.
357
+
358
+ Args:
359
+ dataset: A `tf.data.Dataset` object.
360
+
361
+ Returns:
362
+ A `tf.int64` Tensor representing the batch size of the dataset sans partial
363
+ batches. If this cannot be inferred statically, the value of this tensor
364
+ will be -1.
365
+ """
366
+
367
+ def get_static_batch_dim(type_spec):
368
+ try:
369
+ output_shape = type_spec._to_legacy_output_shapes() # pylint: disable=protected-access
370
+ except NotImplementedError:
371
+ return None
372
+ if not isinstance(output_shape, tensor_shape.TensorShape):
373
+ return None
374
+ if output_shape.rank is None:
375
+ return None
376
+ return output_shape.dims[0].value
377
+
378
+ batch_dims = [
379
+ get_static_batch_dim(type_spec)
380
+ for type_spec in nest.flatten(dataset_ops.get_structure(dataset))
381
+ ]
382
+
383
+ if all(d is not None for d in batch_dims):
384
+
385
+ if all(d == batch_dims[0] for d in batch_dims):
386
+ # If all batch dimensions are known and equal, return that directly.
387
+ batch_dim = batch_dims[0]
388
+ else:
389
+ # If all batch dimensions are known but not all equal, return -1.
390
+ batch_dim = -1
391
+
392
+ return constant_op.constant(
393
+ batch_dim, dtype=dtypes.int64, name="static_batch_size")
394
+
395
+ # If any batch dimensions are unknown, use compute_batch_size op.
396
+ return ged_ops.compute_batch_size(dataset._variant_tensor) # pylint: disable=protected-access
397
+
398
+
399
+ _AutoShardDatasetV1.__doc__ = _AutoShardDataset.__doc__
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/distributed_save_op.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Distributed saving of a dataset to disk."""
16
+
17
+ from tensorflow.core.protobuf import snapshot_pb2
18
+ from tensorflow.python.ops import gen_experimental_dataset_ops
19
+ # TODO(b/238903802): Use TypeSpec serialization methods directly.
20
+ from tensorflow.python.saved_model import nested_structure_coder
21
+
22
+
23
+ # TODO(b/250921378): Add example to docstring and export to TF API.
24
+ def distributed_save(dataset, path, dispatcher_address, compression="AUTO"):
25
+ """Initiates the process of distributedly saving a dataset to disk.
26
+
27
+ Args:
28
+ dataset: The `tf.data.Dataset` to save.
29
+ path: A string indicating the filepath of the directory to which to save
30
+ `dataset`.
31
+ dispatcher_address: A string indicating the address of the dispatcher for
32
+ the tf.data service instance used to save `dataset`.
33
+ compression: (Optional.) A string indicating whether and how to compress the
34
+ `dataset` materialization. If `"AUTO"`, the tf.data runtime decides which
35
+ algorithm to use. If `"GZIP"` or `"SNAPPY"`, that specific algorithm is
36
+ used. If `None`, the `dataset` materialization is not compressed.
37
+
38
+ Returns:
39
+ An operation which when executed performs the distributed save.
40
+
41
+ Raises:
42
+ ValueError: If `dispatcher_address` is invalid.
43
+ """
44
+ if not isinstance(dispatcher_address, str):
45
+ raise ValueError("`dispatcher_address` must be a string, but is a "
46
+ f"{type(dispatcher_address)} ({dispatcher_address}")
47
+ if not dispatcher_address:
48
+ raise ValueError("`dispatcher_address` must not be empty")
49
+
50
+ metadata = snapshot_pb2.DistributedSnapshotMetadata(
51
+ element_spec=nested_structure_coder.encode_structure(
52
+ dataset.element_spec).SerializeToString(),
53
+ compression=compression,
54
+ )
55
+
56
+ return gen_experimental_dataset_ops.distributed_save(
57
+ dataset._variant_tensor, # pylint: disable=protected-access
58
+ directory=path,
59
+ address=dispatcher_address,
60
+ metadata=metadata.SerializeToString(),
61
+ )
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/enumerate_ops.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Enumerate dataset transformations."""
16
+ from tensorflow.python.util import deprecation
17
+ from tensorflow.python.util.tf_export import tf_export
18
+
19
+
20
+ @deprecation.deprecated(None, "Use `tf.data.Dataset.enumerate()`.")
21
+ @tf_export("data.experimental.enumerate_dataset")
22
+ def enumerate_dataset(start=0):
23
+ """A transformation that enumerates the elements of a dataset.
24
+
25
+ It is similar to python's `enumerate`.
26
+ For example:
27
+
28
+ ```python
29
+ # NOTE: The following examples use `{ ... }` to represent the
30
+ # contents of a dataset.
31
+ a = { 1, 2, 3 }
32
+ b = { (7, 8), (9, 10) }
33
+
34
+ # The nested structure of the `datasets` argument determines the
35
+ # structure of elements in the resulting dataset.
36
+ a.apply(tf.data.experimental.enumerate_dataset(start=5))
37
+ => { (5, 1), (6, 2), (7, 3) }
38
+ b.apply(tf.data.experimental.enumerate_dataset())
39
+ => { (0, (7, 8)), (1, (9, 10)) }
40
+ ```
41
+
42
+ Args:
43
+ start: A `tf.int64` scalar `tf.Tensor`, representing the start value for
44
+ enumeration.
45
+
46
+ Returns:
47
+ A `Dataset` transformation function, which can be passed to
48
+ `tf.data.Dataset.apply`.
49
+ """
50
+
51
+ def _apply_fn(dataset):
52
+ return dataset.enumerate(start)
53
+
54
+ return _apply_fn
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/from_list.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Python API for creating a dataset from a list."""
16
+
17
+ import itertools
18
+
19
+ from tensorflow.python.data.ops import dataset_ops
20
+ from tensorflow.python.data.util import nest
21
+ from tensorflow.python.data.util import structure
22
+ from tensorflow.python.ops import gen_experimental_dataset_ops
23
+ from tensorflow.python.util.tf_export import tf_export
24
+
25
+
26
+ class _ListDataset(dataset_ops.DatasetSource):
27
+ """A `Dataset` of elements from a list."""
28
+
29
+ def __init__(self, elements, name=None):
30
+ if not elements:
31
+ raise ValueError("Invalid `elements`. `elements` should not be empty.")
32
+ if not isinstance(elements, list):
33
+ raise ValueError("Invalid `elements`. `elements` must be a list.")
34
+
35
+ elements = [structure.normalize_element(element) for element in elements]
36
+ type_specs = [
37
+ structure.type_spec_from_value(element) for element in elements
38
+ ]
39
+
40
+ # Check that elements have same nested structure.
41
+ num_elements = len(elements)
42
+ for i in range(1, num_elements):
43
+ nest.assert_same_structure(type_specs[0], type_specs[i])
44
+
45
+ # Infer elements' supershape.
46
+ flattened_type_specs = [nest.flatten(type_spec) for type_spec in type_specs]
47
+ num_tensors_per_element = len(flattened_type_specs[0])
48
+ flattened_structure = [None] * num_tensors_per_element
49
+ for i in range(num_tensors_per_element):
50
+ flattened_structure[i] = flattened_type_specs[0][i]
51
+ for j in range(1, num_elements):
52
+ flattened_structure[i] = flattened_structure[
53
+ i].most_specific_common_supertype([flattened_type_specs[j][i]])
54
+
55
+ if not isinstance(type_specs[0], dataset_ops.DatasetSpec):
56
+ self._tensors = list(
57
+ itertools.chain.from_iterable(
58
+ [nest.flatten(element) for element in elements]))
59
+ else:
60
+ self._tensors = [x._variant_tensor for x in elements]
61
+ self._structure = nest.pack_sequence_as(type_specs[0], flattened_structure)
62
+ self._name = name
63
+ variant_tensor = gen_experimental_dataset_ops.list_dataset(
64
+ self._tensors,
65
+ output_types=self._flat_types,
66
+ output_shapes=self._flat_shapes,
67
+ metadata=self._metadata.SerializeToString())
68
+ super(_ListDataset, self).__init__(variant_tensor)
69
+
70
+ @property
71
+ def element_spec(self):
72
+ return self._structure
73
+
74
+
75
+ @tf_export("data.experimental.from_list")
76
+ def from_list(elements, name=None):
77
+ """Creates a `Dataset` comprising the given list of elements.
78
+
79
+ The returned dataset will produce the items in the list one by one. The
80
+ functionality is identical to `Dataset.from_tensor_slices` when elements are
81
+ scalars, but different when elements have structure. Consider the following
82
+ example.
83
+
84
+ >>> dataset = tf.data.experimental.from_list([(1, 'a'), (2, 'b'), (3, 'c')])
85
+ >>> list(dataset.as_numpy_iterator())
86
+ [(1, b'a'), (2, b'b'), (3, b'c')]
87
+
88
+ To get the same output with `from_tensor_slices`, the data needs to be
89
+ reorganized:
90
+
91
+ >>> dataset = tf.data.Dataset.from_tensor_slices(([1, 2, 3], ['a', 'b', 'c']))
92
+ >>> list(dataset.as_numpy_iterator())
93
+ [(1, b'a'), (2, b'b'), (3, b'c')]
94
+
95
+ Unlike `from_tensor_slices`, `from_list` supports non-rectangular input:
96
+
97
+ >>> dataset = tf.data.experimental.from_list([[1], [2, 3]])
98
+ >>> list(dataset.as_numpy_iterator())
99
+ [array([1], dtype=int32), array([2, 3], dtype=int32)]
100
+
101
+ Achieving the same with `from_tensor_slices` requires the use of ragged
102
+ tensors.
103
+
104
+ `from_list` can be more performant than `from_tensor_slices` in some cases,
105
+ since it avoids the need for data slicing each epoch. However, it can also be
106
+ less performant, because data is stored as many small tensors rather than a
107
+ few large tensors as in `from_tensor_slices`. The general guidance is to
108
+ prefer `from_list` from a performance perspective when the number of elements
109
+ is small (less than 1000).
110
+
111
+ Args:
112
+ elements: A list of elements whose components have the same nested
113
+ structure.
114
+ name: (Optional.) A name for the tf.data operation.
115
+
116
+ Returns:
117
+ Dataset: A `Dataset` of the `elements`.
118
+ """
119
+ return _ListDataset(elements, name)
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/interleave_ops.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Non-deterministic dataset transformations."""
16
+ from tensorflow.python import tf2
17
+ from tensorflow.python.compat import v2_compat
18
+ from tensorflow.python.data.ops import dataset_ops
19
+ from tensorflow.python.data.ops import readers
20
+ from tensorflow.python.util import deprecation
21
+ from tensorflow.python.util.tf_export import tf_export
22
+
23
+
24
+ @deprecation.deprecated(
25
+ None,
26
+ "Use `tf.data.Dataset.interleave(map_func, cycle_length, block_length, "
27
+ "num_parallel_calls=tf.data.AUTOTUNE)` instead. If sloppy "
28
+ "execution is desired, use `tf.data.Options.deterministic`.")
29
+ @tf_export("data.experimental.parallel_interleave")
30
+ def parallel_interleave(map_func,
31
+ cycle_length,
32
+ block_length=1,
33
+ sloppy=False,
34
+ buffer_output_elements=None,
35
+ prefetch_input_elements=None):
36
+ """A parallel version of the `Dataset.interleave()` transformation.
37
+
38
+ `parallel_interleave()` maps `map_func` across its input to produce nested
39
+ datasets, and outputs their elements interleaved. Unlike
40
+ `tf.data.Dataset.interleave`, it gets elements from `cycle_length` nested
41
+ datasets in parallel, which increases the throughput, especially in the
42
+ presence of stragglers. Furthermore, the `sloppy` argument can be used to
43
+ improve performance, by relaxing the requirement that the outputs are produced
44
+ in a deterministic order, and allowing the implementation to skip over nested
45
+ datasets whose elements are not readily available when requested.
46
+
47
+ Example usage:
48
+
49
+ ```python
50
+ # Preprocess 4 files concurrently.
51
+ filenames = tf.data.Dataset.list_files("/path/to/data/train*.tfrecords")
52
+ dataset = filenames.apply(
53
+ tf.data.experimental.parallel_interleave(
54
+ lambda filename: tf.data.TFRecordDataset(filename),
55
+ cycle_length=4))
56
+ ```
57
+
58
+ WARNING: If `sloppy` is `True`, the order of produced elements is not
59
+ deterministic.
60
+
61
+ Args:
62
+ map_func: A function mapping a nested structure of tensors to a `Dataset`.
63
+ cycle_length: The number of input `Dataset`s to interleave from in parallel.
64
+ block_length: The number of consecutive elements to pull from an input
65
+ `Dataset` before advancing to the next input `Dataset`.
66
+ sloppy: A boolean controlling whether determinism should be traded for
67
+ performance by allowing elements to be produced out of order. If `sloppy`
68
+ is `None`, the `tf.data.Options.deterministic` dataset option (`True` by
69
+ default) is used to decide whether to enforce a deterministic order.
70
+ buffer_output_elements: The number of elements each iterator being
71
+ interleaved should buffer (similar to the `.prefetch()` transformation for
72
+ each interleaved iterator).
73
+ prefetch_input_elements: The number of input elements to transform to
74
+ iterators before they are needed for interleaving.
75
+
76
+ Returns:
77
+ A `Dataset` transformation function, which can be passed to
78
+ `tf.data.Dataset.apply`.
79
+ """
80
+
81
+ def _apply_fn(dataset):
82
+ return readers.ParallelInterleaveDataset(dataset, map_func, cycle_length,
83
+ block_length, sloppy,
84
+ buffer_output_elements,
85
+ prefetch_input_elements)
86
+
87
+ return _apply_fn
88
+
89
+
90
+ @deprecation.deprecated(None,
91
+ "Use `tf.data.Dataset.sample_from_datasets(...)`.")
92
+ @tf_export("data.experimental.sample_from_datasets", v1=[])
93
+ def sample_from_datasets_v2(datasets,
94
+ weights=None,
95
+ seed=None,
96
+ stop_on_empty_dataset=False):
97
+ """Samples elements at random from the datasets in `datasets`.
98
+
99
+ Creates a dataset by interleaving elements of `datasets` with `weight[i]`
100
+ probability of picking an element from dataset `i`. Sampling is done without
101
+ replacement. For example, suppose we have 2 datasets:
102
+
103
+ ```python
104
+ dataset1 = tf.data.Dataset.range(0, 3)
105
+ dataset2 = tf.data.Dataset.range(100, 103)
106
+ ```
107
+
108
+ Suppose also that we sample from these 2 datasets with the following weights:
109
+
110
+ ```python
111
+ sample_dataset = tf.data.Dataset.sample_from_datasets(
112
+ [dataset1, dataset2], weights=[0.5, 0.5])
113
+ ```
114
+
115
+ One possible outcome of elements in sample_dataset is:
116
+
117
+ ```
118
+ print(list(sample_dataset.as_numpy_iterator()))
119
+ # [100, 0, 1, 101, 2, 102]
120
+ ```
121
+
122
+ Args:
123
+ datasets: A non-empty list of `tf.data.Dataset` objects with compatible
124
+ structure.
125
+ weights: (Optional.) A list or Tensor of `len(datasets)` floating-point
126
+ values where `weights[i]` represents the probability to sample from
127
+ `datasets[i]`, or a `tf.data.Dataset` object where each element is such a
128
+ list. Defaults to a uniform distribution across `datasets`.
129
+ seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
130
+ seed that will be used to create the distribution. See
131
+ `tf.random.set_seed` for behavior.
132
+ stop_on_empty_dataset: If `True`, sampling stops if it encounters an empty
133
+ dataset. If `False`, it skips empty datasets. It is recommended to set it
134
+ to `True`. Otherwise, the distribution of samples starts off as the user
135
+ intends, but may change as input datasets become empty. This can be
136
+ difficult to detect since the dataset starts off looking correct. Default
137
+ to `False` for backward compatibility.
138
+
139
+ Returns:
140
+ A dataset that interleaves elements from `datasets` at random, according to
141
+ `weights` if provided, otherwise with uniform probability.
142
+
143
+ Raises:
144
+ TypeError: If the `datasets` or `weights` arguments have the wrong type.
145
+ ValueError:
146
+ - If `datasets` is empty, or
147
+ - If `weights` is specified and does not match the length of `datasets`.
148
+ """
149
+ return dataset_ops.Dataset.sample_from_datasets(
150
+ datasets=datasets,
151
+ weights=weights,
152
+ seed=seed,
153
+ stop_on_empty_dataset=stop_on_empty_dataset)
154
+
155
+
156
+ @deprecation.deprecated(None,
157
+ "Use `tf.data.Dataset.sample_from_datasets(...)`.")
158
+ @tf_export(v1=["data.experimental.sample_from_datasets"])
159
+ def sample_from_datasets_v1(datasets,
160
+ weights=None,
161
+ seed=None,
162
+ stop_on_empty_dataset=False):
163
+ return dataset_ops.DatasetV1Adapter(
164
+ sample_from_datasets_v2(datasets, weights, seed, stop_on_empty_dataset))
165
+
166
+
167
+ sample_from_datasets_v1.__doc__ = sample_from_datasets_v2.__doc__
168
+
169
+
170
+ @deprecation.deprecated(
171
+ None, "Use `tf.data.Dataset.choose_from_datasets(...)` instead. Note that, "
172
+ "unlike the experimental endpoint, the non-experimental endpoint "
173
+ "sets `stop_on_empty_dataset=True` by default. You should set this "
174
+ "argument explicitly in case you would like to match the behavior of the "
175
+ "experimental endpoint.")
176
+ @tf_export("data.experimental.choose_from_datasets", v1=[])
177
+ def choose_from_datasets_v2(datasets,
178
+ choice_dataset,
179
+ stop_on_empty_dataset=False):
180
+ """Creates a dataset that deterministically chooses elements from `datasets`.
181
+
182
+ For example, given the following datasets:
183
+
184
+ ```python
185
+ datasets = [tf.data.Dataset.from_tensors("foo").repeat(),
186
+ tf.data.Dataset.from_tensors("bar").repeat(),
187
+ tf.data.Dataset.from_tensors("baz").repeat()]
188
+
189
+ # Define a dataset containing `[0, 1, 2, 0, 1, 2, 0, 1, 2]`.
190
+ choice_dataset = tf.data.Dataset.range(3).repeat(3)
191
+
192
+ result = tf.data.experimental.choose_from_datasets(datasets, choice_dataset)
193
+ ```
194
+
195
+ The elements of `result` will be:
196
+
197
+ ```
198
+ "foo", "bar", "baz", "foo", "bar", "baz", "foo", "bar", "baz"
199
+ ```
200
+
201
+ Args:
202
+ datasets: A non-empty list of `tf.data.Dataset` objects with compatible
203
+ structure.
204
+ choice_dataset: A `tf.data.Dataset` of scalar `tf.int64` tensors between `0`
205
+ and `len(datasets) - 1`.
206
+ stop_on_empty_dataset: If `True`, selection stops if it encounters an empty
207
+ dataset. If `False`, it skips empty datasets. It is recommended to set it
208
+ to `True`. Otherwise, the selected elements start off as the user intends,
209
+ but may change as input datasets become empty. This can be difficult to
210
+ detect since the dataset starts off looking correct. Default to `False`
211
+ for backward compatibility.
212
+
213
+ Returns:
214
+ A dataset that interleaves elements from `datasets` according to the values
215
+ of `choice_dataset`.
216
+
217
+ Raises:
218
+ TypeError: If `datasets` or `choice_dataset` has the wrong type.
219
+ ValueError: If `datasets` is empty.
220
+ """
221
+ return dataset_ops.Dataset.choose_from_datasets(
222
+ datasets=datasets,
223
+ choice_dataset=choice_dataset,
224
+ stop_on_empty_dataset=stop_on_empty_dataset)
225
+
226
+
227
+ @deprecation.deprecated(
228
+ None, "Use `tf.data.Dataset.choose_from_datasets(...)` instead. Note that, "
229
+ "unlike the experimental endpoint, the non-experimental endpoint "
230
+ "sets `stop_on_empty_dataset=True` by default. You should set this "
231
+ "argument explicitly in case you would like to match the behavior of the "
232
+ "experimental endpoint.")
233
+ @tf_export(v1=["data.experimental.choose_from_datasets"])
234
+ def choose_from_datasets_v1(datasets,
235
+ choice_dataset,
236
+ stop_on_empty_dataset=False):
237
+ return dataset_ops.DatasetV1Adapter(
238
+ choose_from_datasets_v2(datasets, choice_dataset, stop_on_empty_dataset))
239
+
240
+
241
+ choose_from_datasets_v1.__doc__ = choose_from_datasets_v2.__doc__
242
+
243
+ if tf2.enabled():
244
+ choose_from_datasets = choose_from_datasets_v2
245
+ sample_from_datasets = sample_from_datasets_v2
246
+ else:
247
+ choose_from_datasets = choose_from_datasets_v1
248
+ sample_from_datasets = sample_from_datasets_v1
249
+
250
+
251
+ def _tf2_callback():
252
+ global choose_from_datasets, sample_from_datasets
253
+ if tf2.enabled():
254
+ choose_from_datasets = choose_from_datasets_v2
255
+ sample_from_datasets = sample_from_datasets_v2
256
+ else:
257
+ choose_from_datasets = choose_from_datasets_v1
258
+ sample_from_datasets = sample_from_datasets_v1
259
+
260
+
261
+ v2_compat.register_data_v2_callback(_tf2_callback)
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/io.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Python API for save and loading a dataset."""
16
+
17
+ from tensorflow.python.data.ops import dataset_ops
18
+ from tensorflow.python.util import deprecation
19
+ from tensorflow.python.util.tf_export import tf_export
20
+
21
+ COMPRESSION_GZIP = "GZIP"
22
+ COMPRESSION_SNAPPY = "NONE"
23
+ DATASET_SPEC_FILENAME = "dataset_spec.pb"
24
+
25
+
26
+ @tf_export("data.experimental.save", v1=[])
27
+ @deprecation.deprecated(None, "Use `tf.data.Dataset.save(...)` instead.")
28
+ def save(dataset,
29
+ path,
30
+ compression=None,
31
+ shard_func=None,
32
+ checkpoint_args=None):
33
+ """Saves the content of the given dataset.
34
+
35
+ Example usage:
36
+
37
+ >>> import tempfile
38
+ >>> path = os.path.join(tempfile.gettempdir(), "saved_data")
39
+ >>> # Save a dataset
40
+ >>> dataset = tf.data.Dataset.range(2)
41
+ >>> tf.data.experimental.save(dataset, path)
42
+ >>> new_dataset = tf.data.experimental.load(path)
43
+ >>> for elem in new_dataset:
44
+ ... print(elem)
45
+ tf.Tensor(0, shape=(), dtype=int64)
46
+ tf.Tensor(1, shape=(), dtype=int64)
47
+
48
+ The saved dataset is saved in multiple file "shards". By default, the dataset
49
+ output is divided to shards in a round-robin fashion but custom sharding can
50
+ be specified via the `shard_func` function. For example, you can save the
51
+ dataset to using a single shard as follows:
52
+
53
+ ```python
54
+ dataset = make_dataset()
55
+ def custom_shard_func(element):
56
+ return np.int64(0)
57
+ dataset = tf.data.experimental.save(
58
+ path="/path/to/data", ..., shard_func=custom_shard_func)
59
+ ```
60
+
61
+ To enable checkpointing, pass in `checkpoint_args` to the `save` method
62
+ as follows:
63
+
64
+ ```python
65
+ dataset = tf.data.Dataset.range(100)
66
+ save_dir = "..."
67
+ checkpoint_prefix = "..."
68
+ step_counter = tf.Variable(0, trainable=False)
69
+ checkpoint_args = {
70
+ "checkpoint_interval": 50,
71
+ "step_counter": step_counter,
72
+ "directory": checkpoint_prefix,
73
+ "max_to_keep": 20,
74
+ }
75
+ dataset.save(dataset, save_dir, checkpoint_args=checkpoint_args)
76
+ ```
77
+
78
+ NOTE: The directory layout and file format used for saving the dataset is
79
+ considered an implementation detail and may change. For this reason, datasets
80
+ saved through `tf.data.experimental.save` should only be consumed through
81
+ `tf.data.experimental.load`, which is guaranteed to be backwards compatible.
82
+
83
+ Args:
84
+ dataset: The dataset to save.
85
+ path: Required. A directory to use for saving the dataset.
86
+ compression: Optional. The algorithm to use to compress data when writing
87
+ it. Supported options are `GZIP` and `NONE`. Defaults to `NONE`.
88
+ shard_func: Optional. A function to control the mapping of dataset elements
89
+ to file shards. The function is expected to map elements of the input
90
+ dataset to int64 shard IDs. If present, the function will be traced and
91
+ executed as graph computation.
92
+ checkpoint_args: Optional args for checkpointing which will be passed into
93
+ the `tf.train.CheckpointManager`. If `checkpoint_args` are not specified,
94
+ then checkpointing will not be performed. The `save()` implementation
95
+ creates a `tf.train.Checkpoint` object internally, so users should not
96
+ set the `checkpoint` argument in `checkpoint_args`.
97
+
98
+ Returns:
99
+ An operation which when executed performs the save. When writing
100
+ checkpoints, returns None. The return value is useful in unit tests.
101
+
102
+ Raises:
103
+ ValueError if `checkpoint` is passed into `checkpoint_args`.
104
+ """
105
+ return dataset.save(path, compression, shard_func, checkpoint_args)
106
+
107
+
108
+ @tf_export("data.experimental.load", v1=[])
109
+ @deprecation.deprecated(None, "Use `tf.data.Dataset.load(...)` instead.")
110
+ def load(path, element_spec=None, compression=None, reader_func=None):
111
+ """Loads a previously saved dataset.
112
+
113
+ Example usage:
114
+
115
+ >>> import tempfile
116
+ >>> path = os.path.join(tempfile.gettempdir(), "saved_data")
117
+ >>> # Save a dataset
118
+ >>> dataset = tf.data.Dataset.range(2)
119
+ >>> tf.data.experimental.save(dataset, path)
120
+ >>> new_dataset = tf.data.experimental.load(path)
121
+ >>> for elem in new_dataset:
122
+ ... print(elem)
123
+ tf.Tensor(0, shape=(), dtype=int64)
124
+ tf.Tensor(1, shape=(), dtype=int64)
125
+
126
+
127
+ If the default option of sharding the saved dataset was used, the element
128
+ order of the saved dataset will be preserved when loading it.
129
+
130
+ The `reader_func` argument can be used to specify a custom order in which
131
+ elements should be loaded from the individual shards. The `reader_func` is
132
+ expected to take a single argument -- a dataset of datasets, each containing
133
+ elements of one of the shards -- and return a dataset of elements. For
134
+ example, the order of shards can be shuffled when loading them as follows:
135
+
136
+ ```python
137
+ def custom_reader_func(datasets):
138
+ datasets = datasets.shuffle(NUM_SHARDS)
139
+ return datasets.interleave(lambda x: x, num_parallel_calls=AUTOTUNE)
140
+
141
+ dataset = tf.data.experimental.load(
142
+ path="/path/to/data", ..., reader_func=custom_reader_func)
143
+ ```
144
+
145
+ Args:
146
+ path: Required. A path pointing to a previously saved dataset.
147
+ element_spec: Optional. A nested structure of `tf.TypeSpec` objects matching
148
+ the structure of an element of the saved dataset and specifying the type
149
+ of individual element components. If not provided, the nested structure of
150
+ `tf.TypeSpec` saved with the saved dataset is used. Note that this
151
+ argument is required in graph mode.
152
+ compression: Optional. The algorithm to use to decompress the data when
153
+ reading it. Supported options are `GZIP` and `NONE`. Defaults to `NONE`.
154
+ reader_func: Optional. A function to control how to read data from shards.
155
+ If present, the function will be traced and executed as graph computation.
156
+
157
+ Returns:
158
+ A `tf.data.Dataset` instance.
159
+
160
+ Raises:
161
+ FileNotFoundError: If `element_spec` is not specified and the saved nested
162
+ structure of `tf.TypeSpec` can not be located with the saved dataset.
163
+ ValueError: If `element_spec` is not specified and the method is executed
164
+ in graph mode.
165
+ """
166
+ return dataset_ops.Dataset.load(path, element_spec, compression, reader_func)
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/iterator_ops.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Iterator ops."""
16
+
17
+ from tensorflow.python.data.ops import iterator_ops
18
+ from tensorflow.python.data.ops import options as options_lib
19
+ from tensorflow.python.util import deprecation
20
+ from tensorflow.python.util.tf_export import tf_export
21
+
22
+
23
+ def _convert_external_state_policy_to_enum(external_state_policy):
24
+ if isinstance(external_state_policy, options_lib.ExternalStatePolicy):
25
+ return external_state_policy
26
+ if external_state_policy == "warn":
27
+ return options_lib.ExternalStatePolicy.WARN
28
+ if external_state_policy == "ignore":
29
+ return options_lib.ExternalStatePolicy.IGNORE
30
+ if external_state_policy == "fail":
31
+ return options_lib.ExternalStatePolicy.FAIL
32
+ raise ValueError(
33
+ f"Invalid `ExternalStatePolicy.` Supported values include 'warn', "
34
+ f"'ignore', and 'fail.' Received {external_state_policy}."
35
+ )
36
+
37
+
38
+ @tf_export("data.experimental.make_saveable_from_iterator")
39
+ @deprecation.deprecated(
40
+ None, "`make_saveable_from_iterator` is intended for use in TF1 with "
41
+ "`tf.compat.v1.Saver`. In TF2, use `tf.train.Checkpoint` instead.")
42
+ def make_saveable_from_iterator(iterator, external_state_policy=None):
43
+ """Returns a SaveableObject for saving/restoring iterator state using Saver.
44
+
45
+ Args:
46
+ iterator: Iterator.
47
+ external_state_policy: A string that identifies how to handle input
48
+ pipelines that depend on external state. Possible values are
49
+ 'ignore': The external state is silently ignored.
50
+ 'warn': The external state is ignored, logging a warning.
51
+ 'fail': The operation fails upon encountering external state.
52
+ By default we set it to 'fail'.
53
+
54
+ Returns:
55
+ A SaveableObject for saving/restoring iterator state using Saver.
56
+
57
+ Raises:
58
+ ValueError: If iterator does not support checkpointing.
59
+ ValueError: If `external_state_policy` is not one of 'warn', 'ignore' or
60
+ 'fail'.
61
+
62
+ For example:
63
+
64
+ ```python
65
+ with tf.Graph().as_default():
66
+ ds = tf.data.Dataset.range(10)
67
+ iterator = ds.make_initializable_iterator()
68
+ # Build the iterator SaveableObject.
69
+ saveable_obj = tf.data.experimental.make_saveable_from_iterator(iterator)
70
+ # Add the SaveableObject to the SAVEABLE_OBJECTS collection so
71
+ # it can be automatically saved using Saver.
72
+ tf.compat.v1.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, saveable_obj)
73
+ saver = tf.compat.v1.train.Saver()
74
+
75
+ while continue_training:
76
+ ... Perform training ...
77
+ if should_save_checkpoint:
78
+ saver.save()
79
+ ```
80
+
81
+ Note: When restoring the iterator, the existing iterator state is completely
82
+ discarded. This means that any changes you may have made to the Dataset
83
+ graph will be discarded as well! This includes the new Dataset graph
84
+ that you may have built during validation. So, while running validation,
85
+ make sure to run the initializer for the validation input pipeline after
86
+ restoring the checkpoint.
87
+
88
+ Note: Not all iterators support checkpointing yet. Attempting to save the
89
+ state of an unsupported iterator will throw an error.
90
+ """
91
+ if external_state_policy is None:
92
+ external_state_policy = "fail"
93
+ policy_enum = _convert_external_state_policy_to_enum(external_state_policy)
94
+ return iterator_ops._IteratorSaveable( # pylint: disable=protected-access
95
+ iterator._iterator_resource, # pylint: disable=protected-access
96
+ iterator._iterator_resource.name, # pylint: disable=protected-access
97
+ external_state_policy=policy_enum)
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/pad_to_cardinality.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """The implementation of `tf.data.experimental.pad_to_cardinality`."""
16
+
17
+ from collections.abc import Mapping
18
+
19
+ from tensorflow.python.data.ops import dataset_ops
20
+ from tensorflow.python.eager import context
21
+ from tensorflow.python.ops import array_ops
22
+ from tensorflow.python.util import nest
23
+ from tensorflow.python.util.tf_export import tf_export
24
+
25
+
26
+ @tf_export("data.experimental.pad_to_cardinality")
27
+ def pad_to_cardinality(cardinality, mask_key="valid"):
28
+ """Pads a dataset with fake elements to reach the desired cardinality.
29
+
30
+ The dataset to pad must have a known and finite cardinality and contain
31
+ dictionary elements. The `mask_key` will be added to differentiate between
32
+ real and padding elements -- real elements will have a `<mask_key>=True` entry
33
+ while padding elements will have a `<mask_key>=False` entry.
34
+
35
+ Example usage:
36
+
37
+ >>> ds = tf.data.Dataset.from_tensor_slices({'a': [1, 2]})
38
+ >>> ds = ds.apply(tf.data.experimental.pad_to_cardinality(3))
39
+ >>> list(ds.as_numpy_iterator())
40
+ [{'a': 1, 'valid': True}, {'a': 2, 'valid': True}, {'a': 0, 'valid': False}]
41
+
42
+ This can be useful, e.g. during eval, when partial batches are undesirable but
43
+ it is also important not to drop any data.
44
+
45
+ ```
46
+ ds = ...
47
+ # Round up to the next full batch.
48
+ target_cardinality = -(-ds.cardinality() // batch_size) * batch_size
49
+ ds = ds.apply(tf.data.experimental.pad_to_cardinality(target_cardinality))
50
+ # Set `drop_remainder` so that batch shape will be known statically. No data
51
+ # will actually be dropped since the batch size divides the cardinality.
52
+ ds = ds.batch(batch_size, drop_remainder=True)
53
+ ```
54
+
55
+ Args:
56
+ cardinality: The cardinality to pad the dataset to.
57
+ mask_key: The key to use for identifying real vs padding elements.
58
+
59
+ Returns:
60
+ A dataset transformation that can be applied via `Dataset.apply()`.
61
+ """
62
+
63
+ def make_filler_dataset(ds):
64
+ padding = cardinality - ds.cardinality()
65
+
66
+ filler_element = nest.map_structure(
67
+ lambda spec: array_ops.zeros(spec.shape, spec.dtype), ds.element_spec
68
+ )
69
+ filler_element[mask_key] = False
70
+ filler_dataset = dataset_ops.Dataset.from_tensors(filler_element)
71
+ filler_dataset = filler_dataset.repeat(padding)
72
+ return filler_dataset
73
+
74
+ def apply_valid_mask(x):
75
+ x[mask_key] = True
76
+ return x
77
+
78
+ def _apply_fn(dataset):
79
+ # The cardinality tensor is unknown during tracing, so we only check it
80
+ # in eager mode.
81
+ if context.executing_eagerly():
82
+ if dataset.cardinality() < 0:
83
+ raise ValueError(
84
+ "The dataset passed into `pad_to_cardinality` must "
85
+ "have a known cardinalty, but has cardinality "
86
+ f"{dataset.cardinality()}"
87
+ )
88
+ if dataset.cardinality() > cardinality:
89
+ raise ValueError(
90
+ "The dataset passed into `pad_to_cardinality` must "
91
+ "have a cardinalty less than the target cardinality "
92
+ f"({cardinality}), but has cardinality "
93
+ f"{dataset.cardinality()}"
94
+ )
95
+ if not isinstance(dataset.element_spec, Mapping):
96
+ raise ValueError(
97
+ "`pad_to_cardinality` requires its input dataset to "
98
+ "be a dictionary."
99
+ )
100
+ filler = make_filler_dataset(dataset)
101
+ dataset = dataset.map(apply_valid_mask)
102
+ dataset = dataset.concatenate(filler)
103
+ return dataset
104
+
105
+ return _apply_fn
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/parsing_ops.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Experimental `dataset` API for parsing example."""
16
+ from tensorflow.python.data.ops import dataset_ops
17
+ from tensorflow.python.data.util import structure
18
+ from tensorflow.python.framework import dtypes
19
+ from tensorflow.python.framework import sparse_tensor
20
+ from tensorflow.python.framework import tensor_spec
21
+ from tensorflow.python.ops import gen_experimental_dataset_ops
22
+ from tensorflow.python.ops import parsing_ops
23
+ from tensorflow.python.ops.ragged import ragged_tensor
24
+ from tensorflow.python.util import deprecation
25
+ from tensorflow.python.util.tf_export import tf_export
26
+
27
+
28
+ class _ParseExampleDataset(dataset_ops.UnaryDataset):
29
+ """A `Dataset` that parses `example` dataset into a `dict` dataset."""
30
+
31
+ def __init__(self, input_dataset, features, num_parallel_calls,
32
+ deterministic):
33
+ self._input_dataset = input_dataset
34
+ if not structure.are_compatible(
35
+ input_dataset.element_spec,
36
+ tensor_spec.TensorSpec([None], dtypes.string)):
37
+ raise TypeError("Input dataset should be a dataset of vectors of "
38
+ f"strings. Instead it is `{input_dataset.element_spec}`.")
39
+ self._num_parallel_calls = num_parallel_calls
40
+ if deterministic is None:
41
+ self._deterministic = "default"
42
+ elif deterministic:
43
+ self._deterministic = "true"
44
+ else:
45
+ self._deterministic = "false"
46
+ # pylint: disable=protected-access
47
+ self._features = parsing_ops._prepend_none_dimension(features)
48
+ params = parsing_ops._ParseOpParams.from_features(self._features, [
49
+ parsing_ops.VarLenFeature, parsing_ops.SparseFeature,
50
+ parsing_ops.FixedLenFeature, parsing_ops.FixedLenSequenceFeature,
51
+ parsing_ops.RaggedFeature
52
+ ])
53
+ # pylint: enable=protected-access
54
+ self._sparse_keys = params.sparse_keys
55
+ self._sparse_types = params.sparse_types
56
+ self._ragged_keys = params.ragged_keys
57
+ self._ragged_value_types = params.ragged_value_types
58
+ self._ragged_split_types = params.ragged_split_types
59
+ self._dense_keys = params.dense_keys
60
+ self._dense_defaults = params.dense_defaults_vec
61
+ self._dense_shapes = params.dense_shapes_as_proto
62
+ self._dense_types = params.dense_types
63
+ input_dataset_shape = dataset_ops.get_legacy_output_shapes(
64
+ self._input_dataset)
65
+
66
+ self._element_spec = {}
67
+
68
+ for (key, value_type) in zip(params.sparse_keys, params.sparse_types):
69
+ self._element_spec[key] = sparse_tensor.SparseTensorSpec(
70
+ input_dataset_shape.concatenate([None]), value_type)
71
+
72
+ for (key, value_type, dense_shape) in zip(params.dense_keys,
73
+ params.dense_types,
74
+ params.dense_shapes):
75
+ self._element_spec[key] = tensor_spec.TensorSpec(
76
+ input_dataset_shape.concatenate(dense_shape), value_type)
77
+
78
+ for (key, value_type, splits_type) in zip(params.ragged_keys,
79
+ params.ragged_value_types,
80
+ params.ragged_split_types):
81
+ self._element_spec[key] = ragged_tensor.RaggedTensorSpec(
82
+ input_dataset_shape.concatenate([None]), value_type, 1, splits_type)
83
+
84
+ variant_tensor = (
85
+ gen_experimental_dataset_ops.parse_example_dataset_v2(
86
+ self._input_dataset._variant_tensor, # pylint: disable=protected-access
87
+ self._num_parallel_calls,
88
+ self._dense_defaults,
89
+ self._sparse_keys,
90
+ self._dense_keys,
91
+ self._sparse_types,
92
+ self._dense_shapes,
93
+ deterministic=self._deterministic,
94
+ ragged_keys=self._ragged_keys,
95
+ ragged_value_types=self._ragged_value_types,
96
+ ragged_split_types=self._ragged_split_types,
97
+ **self._flat_structure))
98
+ super(_ParseExampleDataset, self).__init__(input_dataset, variant_tensor)
99
+
100
+ @property
101
+ def element_spec(self):
102
+ return self._element_spec
103
+
104
+
105
+ @tf_export("data.experimental.parse_example_dataset")
106
+ @deprecation.deprecated(
107
+ None, "Use `tf.data.Dataset.map(tf.io.parse_example(...))` instead.")
108
+ def parse_example_dataset(features, num_parallel_calls=1, deterministic=None):
109
+ """A transformation that parses `Example` protos into a `dict` of tensors.
110
+
111
+ Parses a number of serialized `Example` protos given in `serialized`. We refer
112
+ to `serialized` as a batch with `batch_size` many entries of individual
113
+ `Example` protos.
114
+
115
+ This op parses serialized examples into a dictionary mapping keys to `Tensor`,
116
+ `SparseTensor`, and `RaggedTensor` objects. `features` is a dict from keys to
117
+ `VarLenFeature`, `RaggedFeature`, `SparseFeature`, and `FixedLenFeature`
118
+ objects. Each `VarLenFeature` and `SparseFeature` is mapped to a
119
+ `SparseTensor`; each `RaggedFeature` is mapped to a `RaggedTensor`; and each
120
+ `FixedLenFeature` is mapped to a `Tensor`. See `tf.io.parse_example` for more
121
+ details about feature dictionaries.
122
+
123
+ Args:
124
+ features: A `dict` mapping feature keys to `FixedLenFeature`,
125
+ `VarLenFeature`, `RaggedFeature`, and `SparseFeature` values.
126
+ num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
127
+ representing the number of parsing processes to call in parallel.
128
+ deterministic: (Optional.) A boolean controlling whether determinism
129
+ should be traded for performance by allowing elements to be produced out
130
+ of order if some parsing calls complete faster than others. If
131
+ `deterministic` is `None`, the
132
+ `tf.data.Options.deterministic` dataset option (`True` by default) is used
133
+ to decide whether to produce elements deterministically.
134
+
135
+ Returns:
136
+ A dataset transformation function, which can be passed to
137
+ `tf.data.Dataset.apply`.
138
+
139
+ Raises:
140
+ ValueError: if features argument is None.
141
+ """
142
+ if features is None:
143
+ raise ValueError("Argument `features` is required, but not specified.")
144
+
145
+ def _apply_fn(dataset):
146
+ """Function from `Dataset` to `Dataset` that applies the transformation."""
147
+ out_dataset = _ParseExampleDataset(dataset, features, num_parallel_calls,
148
+ deterministic)
149
+ if any(
150
+ isinstance(feature, parsing_ops.SparseFeature) or
151
+ isinstance(feature, parsing_ops.RaggedFeature)
152
+ for feature in features.values()):
153
+ # pylint: disable=protected-access
154
+ # pylint: disable=g-long-lambda
155
+ out_dataset = out_dataset.map(
156
+ lambda x: parsing_ops._construct_tensors_for_composite_features(
157
+ features, x),
158
+ num_parallel_calls=num_parallel_calls)
159
+ return out_dataset
160
+
161
+ return _apply_fn
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/prefetching_ops.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Python wrapper for prefetching_ops."""
16
+ from tensorflow.python.data.ops import dataset_ops
17
+ from tensorflow.python.data.ops import iterator_ops
18
+ from tensorflow.python.data.ops import structured_function
19
+ from tensorflow.python.data.util import structure
20
+ from tensorflow.python.eager import def_function
21
+ from tensorflow.python.framework import device as framework_device
22
+ from tensorflow.python.framework import dtypes
23
+ from tensorflow.python.framework import ops
24
+ from tensorflow.python.framework import tensor_spec
25
+ from tensorflow.python.ops import array_ops
26
+ from tensorflow.python.ops import functional_ops
27
+ from tensorflow.python.ops import gen_dataset_ops
28
+ from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
29
+ from tensorflow.python.ops import resource_variable_ops
30
+ from tensorflow.python.util.tf_export import tf_export
31
+
32
+
33
+ @tf_export("data.experimental.prefetch_to_device")
34
+ def prefetch_to_device(device, buffer_size=None):
35
+ """A transformation that prefetches dataset values to the given `device`.
36
+
37
+ NOTE: Although the transformation creates a `tf.data.Dataset`, the
38
+ transformation must be the final `Dataset` in the input pipeline.
39
+
40
+ For example,
41
+ >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
42
+ >>> dataset = dataset.apply(tf.data.experimental.prefetch_to_device("/cpu:0"))
43
+ >>> for element in dataset:
44
+ ... print(f'Tensor {element} is on device {element.device}')
45
+ Tensor 1 is on device /job:localhost/replica:0/task:0/device:CPU:0
46
+ Tensor 2 is on device /job:localhost/replica:0/task:0/device:CPU:0
47
+ Tensor 3 is on device /job:localhost/replica:0/task:0/device:CPU:0
48
+
49
+ Args:
50
+ device: A string. The name of a device to which elements will be prefetched.
51
+ buffer_size: (Optional.) The number of elements to buffer on `device`.
52
+ Defaults to an automatically chosen value.
53
+
54
+ Returns:
55
+ A `Dataset` transformation function, which can be passed to
56
+ `tf.data.Dataset.apply`.
57
+ """
58
+ def _apply_fn(dataset):
59
+ return dataset.apply(
60
+ copy_to_device(target_device=device)).prefetch(buffer_size)
61
+
62
+ return _apply_fn
63
+
64
+
65
+ @tf_export("data.experimental.copy_to_device")
66
+ def copy_to_device(target_device, source_device="/cpu:0"):
67
+ """A transformation that copies dataset elements to the given `target_device`.
68
+
69
+ Args:
70
+ target_device: The name of a device to which elements will be copied.
71
+ source_device: The original device on which `input_dataset` will be placed.
72
+
73
+ Returns:
74
+ A `Dataset` transformation function, which can be passed to
75
+ `tf.data.Dataset.apply`.
76
+ """
77
+
78
+ def _apply_fn(dataset):
79
+ return _CopyToDeviceDataset(
80
+ dataset, target_device=target_device, source_device=source_device)
81
+
82
+ return _apply_fn
83
+
84
+
85
+ # TODO(rohanj): Use the _input_hostmem attr on the RemoteCall ops to indicate
86
+ # all inputs to the Op are in host memory, thereby avoiding some unnecessary
87
+ # Sends and Recvs.
88
+ class _CopyToDeviceDataset(dataset_ops.UnaryUnchangedStructureDataset):
89
+ """A `Dataset` that copies elements to another device."""
90
+
91
+ def __init__(self, input_dataset, target_device, source_device="/cpu:0"):
92
+ """Constructs a _CopyToDeviceDataset.
93
+
94
+ Args:
95
+ input_dataset: `Dataset` to be copied
96
+ target_device: The name of the device to which elements would be copied.
97
+ source_device: Device where input_dataset would be placed.
98
+ """
99
+ self._input_dataset = input_dataset._apply_debug_options() # pylint: disable=protected-access
100
+ self._target_device = target_device
101
+ spec = framework_device.DeviceSpec().from_string(self._target_device)
102
+ self._is_gpu_target = (spec.device_type == "GPU")
103
+ self._source_device_string = source_device
104
+ self._source_device = ops.convert_to_tensor(source_device)
105
+
106
+ wrap_ds_variant = gen_dataset_ops.wrap_dataset_variant(
107
+ self._input_dataset._variant_tensor) # pylint: disable=protected-access
108
+
109
+ @def_function.function()
110
+ def _init_func():
111
+ """Creates an iterator for the input dataset.
112
+
113
+ Returns:
114
+ A `string` tensor that encapsulates the iterator created.
115
+ """
116
+ ds_variant = gen_dataset_ops.unwrap_dataset_variant(wrap_ds_variant)
117
+ resource = gen_dataset_ops.anonymous_iterator(
118
+ **self._input_dataset._flat_structure) # pylint: disable=protected-access
119
+ with ops.control_dependencies(
120
+ [gen_dataset_ops.make_iterator(ds_variant, resource)]):
121
+ return gen_dataset_ops.iterator_to_string_handle(resource)
122
+
123
+ init_func_concrete = _init_func.get_concrete_function() # pylint: disable=protected-access
124
+
125
+ @def_function.function()
126
+ def _remote_init_func():
127
+ return functional_ops.remote_call(
128
+ target=self._source_device,
129
+ args=init_func_concrete.captured_inputs,
130
+ Tout=[dtypes.string],
131
+ f=init_func_concrete)
132
+
133
+ self._init_func = _remote_init_func.get_concrete_function() # pylint: disable=protected-access
134
+ self._init_captured_args = self._init_func.captured_inputs
135
+
136
+ @def_function.function(
137
+ input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
138
+ def _next_func(string_handle):
139
+ """Calls get_next for created iterator.
140
+
141
+ Args:
142
+ string_handle: An iterator string handle created by _init_func
143
+ Returns:
144
+ The elements generated from `input_dataset`
145
+ """
146
+ with ops.device(self._source_device_string):
147
+ iterator = iterator_ops.Iterator.from_string_handle(
148
+ string_handle,
149
+ dataset_ops.get_legacy_output_types(self),
150
+ dataset_ops.get_legacy_output_shapes(self),
151
+ dataset_ops.get_legacy_output_classes(self))
152
+ return structure.to_tensor_list(self.element_spec, iterator.get_next())
153
+
154
+ next_func_concrete = _next_func.get_concrete_function() # pylint: disable=protected-access
155
+
156
+ @def_function.function(
157
+ input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
158
+ experimental_attributes={"experimental_ints_on_device": True})
159
+ def _remote_next_func(string_handle):
160
+ return functional_ops.remote_call(
161
+ target=self._source_device,
162
+ args=[string_handle] + next_func_concrete.captured_inputs,
163
+ Tout=self._input_dataset._flat_types, # pylint: disable=protected-access
164
+ f=next_func_concrete)
165
+
166
+ self._next_func = _remote_next_func.get_concrete_function()
167
+ self._next_captured_args = self._next_func.captured_inputs
168
+
169
+ @def_function.function(
170
+ input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
171
+ def _finalize_func(string_handle):
172
+ """Destroys the iterator resource created.
173
+
174
+ Args:
175
+ string_handle: An iterator string handle created by _init_func
176
+ Returns:
177
+ Tensor constant 0
178
+ """
179
+ iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2(
180
+ string_handle,
181
+ **self._input_dataset._flat_structure) # pylint: disable=protected-access
182
+ with ops.control_dependencies([
183
+ resource_variable_ops.destroy_resource_op(
184
+ iterator_resource, ignore_lookup_error=True)]):
185
+ return array_ops.constant(0, dtypes.int64)
186
+
187
+ finalize_func_concrete = _finalize_func.get_concrete_function() # pylint: disable=protected-access
188
+
189
+ @def_function.function(
190
+ input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
191
+ def _remote_finalize_func(string_handle):
192
+ return functional_ops.remote_call(
193
+ target=self._source_device,
194
+ args=[string_handle] + finalize_func_concrete.captured_inputs,
195
+ Tout=[dtypes.int64],
196
+ f=finalize_func_concrete)
197
+
198
+ self._finalize_func = _remote_finalize_func.get_concrete_function( # pylint: disable=protected-access
199
+ )
200
+ self._finalize_captured_args = self._finalize_func.captured_inputs
201
+
202
+ g = ops.get_default_graph()
203
+ self._init_func.add_to_graph(g)
204
+ self._next_func.add_to_graph(g)
205
+ self._finalize_func.add_to_graph(g)
206
+ # pylint: enable=protected-scope
207
+
208
+ with ops.device(self._target_device):
209
+ variant_tensor = gen_dataset_ops.generator_dataset(
210
+ self._init_captured_args,
211
+ self._next_captured_args,
212
+ self._finalize_captured_args,
213
+ init_func=self._init_func,
214
+ next_func=self._next_func,
215
+ finalize_func=self._finalize_func,
216
+ **self._input_dataset._flat_structure) # pylint: disable=protected-access
217
+ super(_CopyToDeviceDataset, self).__init__(input_dataset, variant_tensor)
218
+
219
+ # The one_shot_iterator implementation needs a 0 arg _make_dataset function
220
+ # that thereby captures all the inputs required to create the dataset. Since
221
+ # there are strings that are inputs to the GeneratorDataset which can't be
222
+ # placed on a GPU, this fails for the GPU case. Therefore, disabling it for
223
+ # GPU
224
+ def make_one_shot_iterator(self):
225
+ if self._is_gpu_target:
226
+ raise ValueError(
227
+ "`make_one_shot_iterator` is not compatible with GPU execution. "
228
+ "Please use `Dataset.make_initializable_iterator()` instead."
229
+ )
230
+ else:
231
+ return super(_CopyToDeviceDataset, self).make_one_shot_iterator()
232
+
233
+
234
+ class _MapOnGpuDataset(dataset_ops.UnaryDataset):
235
+ """A `Dataset` that maps a function over elements in its using a GPU."""
236
+
237
+ def __init__(self, input_dataset, map_func, use_inter_op_parallelism=True):
238
+ """See `Dataset.map()` for details."""
239
+ self._input_dataset = input_dataset
240
+ self._use_inter_op_parallelism = use_inter_op_parallelism
241
+
242
+ self._map_func = structured_function.StructuredFunctionWrapper(
243
+ map_func,
244
+ self._transformation_name(),
245
+ dataset=input_dataset,
246
+ defun_kwargs={"experimental_ints_on_device": True})
247
+ variant_tensor = ged_ops.experimental_map_dataset(
248
+ self._input_dataset._variant_tensor, # pylint: disable=protected-access
249
+ self._map_func.function.captured_inputs,
250
+ f=self._map_func.function,
251
+ use_inter_op_parallelism=self._use_inter_op_parallelism,
252
+ **self._flat_structure)
253
+ super(_MapOnGpuDataset, self).__init__(input_dataset, variant_tensor)
254
+
255
+ def _functions(self):
256
+ return [self._map_func]
257
+
258
+ @property
259
+ def element_spec(self):
260
+ return self._map_func.output_structure
261
+
262
+ def _transformation_name(self):
263
+ return "map_on_gpu()"
264
+
265
+
266
+ def map_on_gpu(map_func):
267
+ """Maps `map_func` across the elements of this dataset.
268
+
269
+ NOTE: This is a highly experimental version of `tf.data.Dataset.map` that runs
270
+ `map_func` on GPU. It must be used after applying the
271
+ `tf.data.experimental.copy_to_device` transformation with a GPU device
272
+ argument.
273
+
274
+ Args:
275
+ map_func: A function mapping a nested structure of tensors (having shapes
276
+ and types defined by `self.output_shapes` and `self.output_types`) to
277
+ another nested structure of tensors.
278
+
279
+ Returns:
280
+ A `Dataset` transformation function, which can be passed to
281
+ `tf.data.Dataset.apply`.
282
+ """
283
+
284
+ def _apply_fn(dataset):
285
+ return _MapOnGpuDataset(dataset, map_func)
286
+
287
+ return _apply_fn
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/random_access.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Python API for random indexing into a dataset."""
16
+
17
+ from tensorflow.python.data.util import structure
18
+ from tensorflow.python.ops import gen_experimental_dataset_ops
19
+ from tensorflow.python.util.tf_export import tf_export
20
+
21
+
22
+ @tf_export("data.experimental.at", v1=[])
23
+ def at(dataset, index):
24
+ """Returns the element at a specific index in a datasest.
25
+
26
+ Currently, random access is supported for the following tf.data operations:
27
+
28
+ - `tf.data.Dataset.from_tensor_slices`,
29
+ - `tf.data.Dataset.from_tensors`,
30
+ - `tf.data.Dataset.shuffle`,
31
+ - `tf.data.Dataset.batch`,
32
+ - `tf.data.Dataset.shard`,
33
+ - `tf.data.Dataset.map`,
34
+ - `tf.data.Dataset.range`,
35
+ - `tf.data.Dataset.zip`,
36
+ - `tf.data.Dataset.skip`,
37
+ - `tf.data.Dataset.repeat`,
38
+ - `tf.data.Dataset.list_files`,
39
+ - `tf.data.Dataset.SSTableDataset`,
40
+ - `tf.data.Dataset.concatenate`,
41
+ - `tf.data.Dataset.enumerate`,
42
+ - `tf.data.Dataset.parallel_map`,
43
+ - `tf.data.Dataset.prefetch`,
44
+ - `tf.data.Dataset.take`,
45
+ - `tf.data.Dataset.cache` (in-memory only)
46
+
47
+ Users can use the cache operation to enable random access for any dataset,
48
+ even one comprised of transformations which are not on this list.
49
+ E.g., to get the third element of a TFDS dataset:
50
+
51
+ ```python
52
+ ds = tfds.load("mnist", split="train").cache()
53
+ elem = tf.data.Dataset.experimental.at(ds, 3)
54
+ ```
55
+
56
+ Args:
57
+ dataset: A `tf.data.Dataset` to determine whether it supports random access.
58
+ index: The index at which to fetch the element.
59
+
60
+ Returns:
61
+ A (nested) structure of values matching `tf.data.Dataset.element_spec`.
62
+
63
+ Raises:
64
+ UnimplementedError: If random access is not yet supported for a dataset.
65
+ """
66
+ # pylint: disable=protected-access
67
+ return structure.from_tensor_list(
68
+ dataset.element_spec,
69
+ gen_experimental_dataset_ops.get_element_at_index(
70
+ dataset._variant_tensor,
71
+ index,
72
+ output_types=structure.get_flat_tensor_types(dataset.element_spec),
73
+ output_shapes=structure.get_flat_tensor_shapes(dataset.element_spec)))
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/random_ops.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Datasets for random number generators."""
16
+ import functools
17
+
18
+ from tensorflow.python import tf2
19
+ from tensorflow.python.compat import v2_compat
20
+ from tensorflow.python.data.ops import dataset_ops
21
+ from tensorflow.python.data.ops import random_op
22
+ from tensorflow.python.util import deprecation
23
+ from tensorflow.python.util.tf_export import tf_export
24
+
25
+
26
+ # TODO(b/260143413): Migrate users to `tf.data.Dataset.random`.
27
+ @deprecation.deprecated(None, "Use `tf.data.Dataset.random(...)`.")
28
+ @tf_export("data.experimental.RandomDataset", v1=[])
29
+ class RandomDatasetV2(random_op._RandomDataset): # pylint: disable=protected-access
30
+ """A `Dataset` of pseudorandom values."""
31
+
32
+
33
+ @deprecation.deprecated(None, "Use `tf.data.Dataset.random(...)`.")
34
+ @tf_export(v1=["data.experimental.RandomDataset"])
35
+ class RandomDatasetV1(dataset_ops.DatasetV1Adapter):
36
+ """A `Dataset` of pseudorandom values."""
37
+
38
+ @functools.wraps(RandomDatasetV2.__init__)
39
+ def __init__(self, seed=None):
40
+ wrapped = RandomDatasetV2(seed)
41
+ super(RandomDatasetV1, self).__init__(wrapped)
42
+
43
+
44
+ if tf2.enabled():
45
+ RandomDataset = RandomDatasetV2
46
+ else:
47
+ RandomDataset = RandomDatasetV1
48
+
49
+
50
+ def _tf2_callback():
51
+ global RandomDataset
52
+ if tf2.enabled():
53
+ RandomDataset = RandomDatasetV2
54
+ else:
55
+ RandomDataset = RandomDatasetV1
56
+
57
+
58
+ v2_compat.register_data_v2_callback(_tf2_callback)