ZTWHHH commited on
Commit
c175bc4
·
verified ·
1 Parent(s): 2b2cee7

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/__pycache__/__init__.cpython-310.pyc +0 -0
  3. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/__init__.cpython-310.pyc +0 -0
  4. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/batching.cpython-310.pyc +0 -0
  5. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/cardinality.cpython-310.pyc +0 -0
  6. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/compression_ops.cpython-310.pyc +0 -0
  7. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/counter.cpython-310.pyc +0 -0
  8. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/data_service_ops.cpython-310.pyc +0 -0
  9. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/distribute.cpython-310.pyc +0 -0
  10. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/distributed_save_op.cpython-310.pyc +0 -0
  11. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/enumerate_ops.cpython-310.pyc +0 -0
  12. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/error_ops.cpython-310.pyc +0 -0
  13. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/from_list.cpython-310.pyc +0 -0
  14. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/get_single_element.cpython-310.pyc +0 -0
  15. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/grouping.cpython-310.pyc +0 -0
  16. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/interleave_ops.cpython-310.pyc +0 -0
  17. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/io.cpython-310.pyc +0 -0
  18. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/iterator_ops.cpython-310.pyc +0 -0
  19. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/lookup_ops.cpython-310.pyc +0 -0
  20. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/map_defun.cpython-310.pyc +0 -0
  21. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/matching_files.cpython-310.pyc +0 -0
  22. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/pad_to_cardinality.cpython-310.pyc +0 -0
  23. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/parsing_ops.cpython-310.pyc +0 -0
  24. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/prefetching_ops.cpython-310.pyc +0 -0
  25. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/random_access.cpython-310.pyc +0 -0
  26. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/random_ops.cpython-310.pyc +0 -0
  27. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/readers.cpython-310.pyc +0 -0
  28. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/resampling.cpython-310.pyc +0 -0
  29. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/scan_ops.cpython-310.pyc +0 -0
  30. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/shuffle_ops.cpython-310.pyc +0 -0
  31. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/snapshot.cpython-310.pyc +0 -0
  32. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/take_while_ops.cpython-310.pyc +0 -0
  33. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/testing.cpython-310.pyc +0 -0
  34. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/unique.cpython-310.pyc +0 -0
  35. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/writers.cpython-310.pyc +0 -0
  36. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/data_service_ops.py +1176 -0
  37. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/error_ops.py +51 -0
  38. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/get_single_element.py +109 -0
  39. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/grouping.py +428 -0
  40. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/lookup_ops.py +238 -0
  41. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/map_defun.py +65 -0
  42. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/matching_files.py +35 -0
  43. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/scan_ops.py +45 -0
  44. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/snapshot.py +276 -0
  45. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/unique.py +43 -0
  46. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/writers.py +126 -0
  47. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/__init__.py +426 -0
  48. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/__pycache__/__init__.cpython-310.pyc +0 -0
  49. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/__pycache__/server_lib.cpython-310.pyc +0 -0
  50. videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/_pywrap_server_lib.pyi +54 -0
.gitattributes CHANGED
@@ -882,3 +882,7 @@ videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_tr
882
  videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/nn_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
883
  videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
884
  videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_nn_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
882
  videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/nn_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
883
  videochat2/lib/python3.10/site-packages/tensorflow/python/ops/ragged/__pycache__/ragged_tensor.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
884
  videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/gen_nn_ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
885
+ videochat2/lib/python3.10/site-packages/tensorflow/python/ops/__pycache__/metrics_impl.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
886
+ videochat2/lib/python3.10/site-packages/tensorflow/python/ops/parallel_for/__pycache__/pfor.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
887
+ videochat2/lib/python3.10/site-packages/tensorflow/python/lib/io/_pywrap_record_io.so filter=lfs diff=lfs merge=lfs -text
888
+ videochat2/lib/python3.10/site-packages/tensorflow/python/lib/core/_pywrap_py_func.so filter=lfs diff=lfs merge=lfs -text
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (6.4 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (195 Bytes). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/batching.cpython-310.pyc ADDED
Binary file (14.7 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/cardinality.cpython-310.pyc ADDED
Binary file (4.12 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/compression_ops.cpython-310.pyc ADDED
Binary file (1.57 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/counter.cpython-310.pyc ADDED
Binary file (2.5 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/data_service_ops.cpython-310.pyc ADDED
Binary file (49.1 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/distribute.cpython-310.pyc ADDED
Binary file (13.4 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/distributed_save_op.cpython-310.pyc ADDED
Binary file (1.88 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/enumerate_ops.cpython-310.pyc ADDED
Binary file (1.56 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/error_ops.cpython-310.pyc ADDED
Binary file (1.69 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/from_list.cpython-310.pyc ADDED
Binary file (4.49 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/get_single_element.cpython-310.pyc ADDED
Binary file (3.81 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/grouping.cpython-310.pyc ADDED
Binary file (15.5 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/interleave_ops.cpython-310.pyc ADDED
Binary file (8.73 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/io.cpython-310.pyc ADDED
Binary file (6.22 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/iterator_ops.cpython-310.pyc ADDED
Binary file (3.26 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/lookup_ops.cpython-310.pyc ADDED
Binary file (8.67 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/map_defun.cpython-310.pyc ADDED
Binary file (2.24 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/matching_files.cpython-310.pyc ADDED
Binary file (1.33 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/pad_to_cardinality.cpython-310.pyc ADDED
Binary file (3.65 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/parsing_ops.cpython-310.pyc ADDED
Binary file (5.81 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/prefetching_ops.cpython-310.pyc ADDED
Binary file (10 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/random_access.cpython-310.pyc ADDED
Binary file (2.28 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/random_ops.cpython-310.pyc ADDED
Binary file (1.65 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/readers.cpython-310.pyc ADDED
Binary file (40.7 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/resampling.cpython-310.pyc ADDED
Binary file (1.72 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/scan_ops.cpython-310.pyc ADDED
Binary file (1.54 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/shuffle_ops.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/snapshot.cpython-310.pyc ADDED
Binary file (9.98 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/take_while_ops.cpython-310.pyc ADDED
Binary file (1.13 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/testing.cpython-310.pyc ADDED
Binary file (6.81 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/unique.cpython-310.pyc ADDED
Binary file (1.23 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/__pycache__/writers.cpython-310.pyc ADDED
Binary file (4.49 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/data_service_ops.py ADDED
@@ -0,0 +1,1176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Python API for executing a tf.data.Dataset using a tf.data service."""
16
+
17
+ import enum
18
+ import functools
19
+ from typing import Callable
20
+
21
+ from tensorflow.core.protobuf import data_service_pb2
22
+ from tensorflow.python import tf2
23
+ from tensorflow.python.data.experimental.ops import compression_ops
24
+ from tensorflow.python.data.experimental.service import _pywrap_server_lib
25
+ from tensorflow.python.data.experimental.service import _pywrap_utils
26
+ from tensorflow.python.data.ops import dataset_ops
27
+ from tensorflow.python.data.ops import options as options_lib
28
+ from tensorflow.python.data.ops import structured_function
29
+ from tensorflow.python.data.ops.options import AutoShardPolicy
30
+ from tensorflow.python.data.ops.options import ExternalStatePolicy
31
+ from tensorflow.python.eager import context
32
+ from tensorflow.python.framework import dtypes
33
+ from tensorflow.python.framework import ops
34
+ from tensorflow.python.framework import tensor
35
+ from tensorflow.python.framework import tensor_util
36
+ from tensorflow.python.ops import gen_experimental_dataset_ops
37
+ from tensorflow.python.ops import string_ops
38
+ from tensorflow.python.saved_model import nested_structure_coder
39
+ from tensorflow.python.util.tf_export import tf_export
40
+
41
+ COMPRESSION_AUTO = "AUTO"
42
+ COMPRESSION_NONE = None
43
+ _PARALLEL_EPOCHS = "parallel_epochs"
44
+ _DISTRIBUTED_EPOCH = "distributed_epoch"
45
+
46
+
47
+ @tf_export("data.experimental.service.ShardingPolicy")
48
+ class ShardingPolicy(enum.IntEnum):
49
+ """Specifies how to shard data among tf.data service workers.
50
+
51
+ OFF: No sharding will be performed. Each worker produces the entire dataset
52
+ without any sharding. With this mode, the best practice is to shuffle the
53
+ dataset nondeterministically so that workers process the dataset in different
54
+ orders. If workers are restarted or join the cluster mid-job, they will begin
55
+ processing the dataset from the beginning.
56
+
57
+ DYNAMIC: The input dataset is dynamically split among workers at runtime. Each
58
+ worker gets the next split when it reads data from the dispatcher. Data is
59
+ produced non-deterministically in this mode. Dynamic sharding works well with
60
+ varying-sized tf.data service clusters, e.g., when you need to auto-scale your
61
+ workers. Dynamic sharding provides at-most once visitation guarantees. No
62
+ examples will be repeated, but some may be missed if a tf.data service worker
63
+ gets restarted while processing a file.
64
+
65
+ The following are static sharding policies. The semantics are similar to
66
+ `tf.data.experimental.AutoShardPolicy`. These policies require:
67
+ * The tf.data service cluster is configured with a fixed list of workers
68
+ in DispatcherConfig.
69
+ * Each client only reads from the local tf.data service worker.
70
+
71
+ If a worker is restarted while performing static sharding, the worker will
72
+ begin processing its shard again from the beginning.
73
+
74
+ FILE: Shards by input files (i.e. each worker will get a fixed set of files to
75
+ process). When this option is selected, make sure that there is at least as
76
+ many files as workers. If there are fewer input files than workers, a runtime
77
+ error will be raised.
78
+
79
+ DATA: Shards by elements produced by the dataset. Each worker will process the
80
+ whole dataset and discard the portion that is not for itself. Note that for
81
+ this mode to correctly partition the dataset elements, the dataset needs to
82
+ produce elements in a deterministic order.
83
+
84
+ FILE_OR_DATA: Attempts FILE-based sharding, falling back to DATA-based
85
+ sharding on failure.
86
+
87
+ HINT: Looks for the presence of `shard(SHARD_HINT, ...)` which is treated as a
88
+ placeholder to replace with `shard(num_workers, worker_index)`.
89
+ """
90
+
91
+ # LINT.IfChange(tf_data_service_sharding_policy)
92
+ OFF = 0
93
+ DYNAMIC = 1
94
+ FILE = 2
95
+ DATA = 3
96
+ FILE_OR_DATA = 4
97
+ HINT = 5
98
+ # LINT.ThenChange()
99
+
100
+ def _to_proto(self) -> data_service_pb2.ProcessingModeDef.ShardingPolicy:
101
+ """Converts the policy to ProcessingModeDef proto enum."""
102
+
103
+ if self == ShardingPolicy.OFF:
104
+ return data_service_pb2.ProcessingModeDef.OFF
105
+ if self == ShardingPolicy.DYNAMIC:
106
+ return data_service_pb2.ProcessingModeDef.DYNAMIC
107
+ if self == ShardingPolicy.FILE:
108
+ return data_service_pb2.ProcessingModeDef.FILE
109
+ if self == ShardingPolicy.DATA:
110
+ return data_service_pb2.ProcessingModeDef.DATA
111
+ if self == ShardingPolicy.FILE_OR_DATA:
112
+ return data_service_pb2.ProcessingModeDef.FILE_OR_DATA
113
+ if self == ShardingPolicy.HINT:
114
+ return data_service_pb2.ProcessingModeDef.HINT
115
+ raise ValueError(f"Unable to convert sharding policy {self!r} to proto.")
116
+
117
+
118
+ @tf_export("data.experimental.service.CrossTrainerCache")
119
+ class CrossTrainerCache:
120
+ """Options related to the tf.data service cross trainer cache.
121
+
122
+ This is used to enable cross-trainer cache when distributing a dataset. For
123
+ example:
124
+
125
+ ```
126
+ dataset = dataset.apply(tf.data.experimental.service.distribute(
127
+ processing_mode=tf.data.experimental.service.ShardingPolicy.OFF,
128
+ service=FLAGS.tf_data_service_address,
129
+ job_name="job",
130
+ cross_trainer_cache=data_service_ops.CrossTrainerCache(
131
+ trainer_id=trainer_id())))
132
+ ```
133
+
134
+ For more details, refer to
135
+ https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers.
136
+ """
137
+
138
+ def __init__(self, trainer_id):
139
+ """Constructs a CrossTrainerCache.
140
+
141
+ Args:
142
+ trainer_id: Each training job has a unique ID. Once a job has consumed
143
+ data, the data remains in the cache and is re-used by jobs with different
144
+ `trainer_id`s. Requests with the same `trainer_id` do not re-use data.
145
+
146
+ Raises:
147
+ ValueError if `trainer_id` is empty.
148
+ """
149
+ if not trainer_id:
150
+ raise ValueError(
151
+ "tf.data service cross-trainer cache requires a non-empty trainer ID."
152
+ )
153
+ self.trainer_id = trainer_id
154
+
155
+ def _to_proto(self) -> data_service_pb2.CrossTrainerCacheOptions:
156
+ return data_service_pb2.CrossTrainerCacheOptions(trainer_id=self.trainer_id)
157
+
158
+
159
+ def _get_validated_sharding_policy(processing_mode) -> ShardingPolicy:
160
+ """Validates `processing_mode` and converts it to ShardingPolicy."""
161
+
162
+ if isinstance(processing_mode, ShardingPolicy):
163
+ return processing_mode
164
+ if processing_mode == _PARALLEL_EPOCHS:
165
+ return ShardingPolicy.OFF
166
+ if processing_mode == _DISTRIBUTED_EPOCH:
167
+ return ShardingPolicy.DYNAMIC
168
+
169
+ raise ValueError("tf.data service processing mode should be a "
170
+ "`tf.data.experimental.service.ShardingPolicy`, "
171
+ "`\"parallel_epochs\"`, or `\"distributed_epoch\"`. Got "
172
+ f"{processing_mode!r}.")
173
+
174
+
175
+ def _validate_job_name(job_name) -> None:
176
+ if job_name is None:
177
+ return
178
+ if not isinstance(job_name, str):
179
+ raise ValueError("`job_name` must be a string, but `job_name` was of type "
180
+ f"{type(job_name)}. job_name={job_name}")
181
+ if not job_name:
182
+ raise ValueError("`job_name` must not be empty")
183
+
184
+
185
+ def _validate_compression(compression) -> None:
186
+ valid_compressions = [COMPRESSION_AUTO, COMPRESSION_NONE]
187
+ if compression not in valid_compressions:
188
+ raise ValueError(f"Invalid `compression` argument: {compression}. "
189
+ f"Must be one of {valid_compressions}.")
190
+
191
+
192
+ def _get_compression_proto(
193
+ compression) -> data_service_pb2.DataServiceMetadata.Compression:
194
+ if compression == COMPRESSION_AUTO:
195
+ return data_service_pb2.DataServiceMetadata.COMPRESSION_SNAPPY
196
+ if compression == COMPRESSION_NONE:
197
+ return data_service_pb2.DataServiceMetadata.COMPRESSION_OFF
198
+ raise ValueError(f"Invalid `compression` argument: {compression}. "
199
+ f"Must be one of {[COMPRESSION_AUTO, COMPRESSION_NONE]}.")
200
+
201
+
202
+ def _to_tensor(dataset_id) -> tensor.Tensor:
203
+ """Converts `dataset_id` to Tensor."""
204
+
205
+ if isinstance(dataset_id, tensor.Tensor):
206
+ return dataset_id
207
+ if isinstance(dataset_id, str) or isinstance(dataset_id, bytes):
208
+ return ops.convert_to_tensor(
209
+ dataset_id, dtype=dtypes.string, name="dataset_id")
210
+ return ops.convert_to_tensor(
211
+ dataset_id, dtype=dtypes.int64, name="dataset_id")
212
+
213
+
214
+ def _to_string(dataset_id) -> str:
215
+ """Converts `dataset_id` to string."""
216
+
217
+ if isinstance(dataset_id, tensor.Tensor):
218
+ return (dataset_id if dataset_id.dtype == dtypes.string else
219
+ string_ops.as_string(dataset_id))
220
+ return (dataset_id.decode()
221
+ if isinstance(dataset_id, bytes) else str(dataset_id))
222
+
223
+
224
+ class _DataServiceDatasetV2(dataset_ops.DatasetSource):
225
+ """A `Dataset` that reads elements from the tf.data service."""
226
+
227
+ def __init__(self,
228
+ dataset_id,
229
+ processing_mode,
230
+ address,
231
+ element_spec,
232
+ protocol,
233
+ data_transfer_protocol,
234
+ job_name=None,
235
+ consumer_index=None,
236
+ num_consumers=None,
237
+ max_outstanding_requests=None,
238
+ task_refresh_interval_hint_ms=None,
239
+ cross_trainer_cache=None,
240
+ target_workers="AUTO"):
241
+ """Constructs a _DataServiceDatasetV2.
242
+
243
+ Args:
244
+ dataset_id: The dataset id for the dataset to read from.
245
+ processing_mode: A `tf.data.experimental.service.ShardingPolicy`
246
+ specifying how to shard the dataset among tf.data workers. See
247
+ `tf.data.experimental.service.ShardingPolicy` for details. For backwards
248
+ compatibility, `processing_mode` may also be set to the strings
249
+ `"parallel_epochs"` or `"distributed_epoch"`, which are respectively
250
+ equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`.
251
+ address: The tf.data service address, e.g. "localhost:5000".
252
+ element_spec: The dataset element spec for the dataset to read from.
253
+ protocol: The protocol to use for communicating with the tf.data service,
254
+ e.g. "grpc".
255
+ data_transfer_protocol: (Optional.) The protocol to use for transferring
256
+ data with the tf.data service. By default, data is transferred using
257
+ gRPC.
258
+ job_name: (Optional.) The name of the job. If provided, it must be a
259
+ non-empty string or Tensor. This argument makes it possible for multiple
260
+ datasets to share the same job. The default behavior is that the dataset
261
+ creates anonymous, exclusively owned jobs.
262
+ consumer_index: (Optional.) The index of the consumer in the range from
263
+ `0` to `num_consumers`. Must be specified alongside `num_consumers`.
264
+ When specified, consumers will read from the job in a strict round-robin
265
+ order, instead of the default first-come-first-served order.
266
+ num_consumers: (Optional.) The number of consumers which will consume from
267
+ the job. Must be specified alongside `consumer_index`. When specified,
268
+ consumers will read from the job in a strict round-robin order, instead
269
+ of the default first-come-first-served order. When `num_consumers` is
270
+ specified, the dataset must have infinite cardinality to prevent a
271
+ producer from running out of data early and causing consumers to go out
272
+ of sync.
273
+ max_outstanding_requests: (Optional.) A limit on how many elements may be
274
+ requested at the same time. You can use this option to control the
275
+ amount of memory used, since `distribute` won't use more than
276
+ `element_size` * `max_outstanding_requests` of memory.
277
+ task_refresh_interval_hint_ms: (Optional.) A hint for how often to query
278
+ the dispatcher for task changes.
279
+ cross_trainer_cache: (Optional.) If a `CrossTrainerCache` object is
280
+ provided, dataset iteration will be shared across concurrently running
281
+ trainers. See
282
+ https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers
283
+ for details.
284
+ target_workers: (Optional.) Which workers to read from. If `"AUTO"`,
285
+ tf.data runtime decides which workers to read from. If `"ANY"`, reads
286
+ from any tf.data service workers. If `"LOCAL"`, only reads from local
287
+ in-processs tf.data service workers. `"AUTO"` works well for most cases,
288
+ while users can specify other targets. For example, `"LOCAL"` helps
289
+ avoid RPCs and data copy if every TF worker colocates with a tf.data
290
+ service worker. Consumers of a shared job must use the same
291
+ `target_workers`. Defaults to `"AUTO"`.
292
+ """
293
+ if consumer_index is None != num_consumers is None:
294
+ raise ValueError(
295
+ "Must either set both `consumer_index` and `num_consumers`, "
296
+ "or neither. ",
297
+ f"consumer_index={consumer_index}, num_consumers={num_consumers}")
298
+ if num_consumers is not None and job_name is None:
299
+ raise ValueError("`job_name` must be set when setting `num_consumers`. "
300
+ f"num_consumers was set to {num_consumers}.")
301
+
302
+ processing_mode_def = data_service_pb2.ProcessingModeDef(
303
+ sharding_policy=_get_validated_sharding_policy(
304
+ processing_mode)._to_proto())
305
+ if job_name is None:
306
+ job_name = ""
307
+ if max_outstanding_requests is None:
308
+ max_outstanding_requests = dataset_ops.AUTOTUNE
309
+ if task_refresh_interval_hint_ms is None:
310
+ task_refresh_interval_hint_ms = dataset_ops.AUTOTUNE
311
+
312
+ self._dataset_id = _to_tensor(dataset_id)
313
+ self._processing_mode = ops.convert_to_tensor(
314
+ processing_mode_def.SerializeToString(),
315
+ dtype=dtypes.string,
316
+ name="processing_mode")
317
+ self._address = ops.convert_to_tensor(
318
+ address, dtype=dtypes.string, name="address")
319
+ self._protocol = ops.convert_to_tensor(
320
+ protocol, dtype=dtypes.string, name="protocol")
321
+ self._job_name = ops.convert_to_tensor(
322
+ job_name, dtype=dtypes.string, name="job_name")
323
+ self._consumer_index = ops.convert_to_tensor(
324
+ -1 if consumer_index is None else consumer_index,
325
+ dtype=dtypes.int64,
326
+ name="consumer_index")
327
+ self._num_consumers = ops.convert_to_tensor(
328
+ -1 if num_consumers is None else num_consumers,
329
+ dtype=dtypes.int64,
330
+ name="num_consumers")
331
+ self._max_outstanding_requests = ops.convert_to_tensor(
332
+ max_outstanding_requests,
333
+ dtype=dtypes.int64,
334
+ name="max_outstanding_requests")
335
+ self._element_spec = element_spec
336
+ uncompress_func = structured_function.StructuredFunctionWrapper(
337
+ lambda x: compression_ops.uncompress(x, output_spec=element_spec),
338
+ transformation_name="DataServiceDataset.uncompress()",
339
+ input_structure=tensor.TensorSpec(shape=(), dtype=dtypes.variant))
340
+ cross_trainer_cache_options = (
341
+ cross_trainer_cache._to_proto().SerializeToString()
342
+ if cross_trainer_cache else None)
343
+
344
+ compat_kwargs = {}
345
+ if data_transfer_protocol is not None:
346
+ compat_kwargs["data_transfer_protocol"] = data_transfer_protocol
347
+
348
+ # If `uncompress` is `True`, the dataset will query the servers to find
349
+ # out the actual compression used. It is always set to `True` the first
350
+ # time the graph is built, and set to false when serializing, so we will
351
+ # uncompress at most once.
352
+ uncompress = True
353
+ variant_tensor = gen_experimental_dataset_ops.data_service_dataset_v4(
354
+ dataset_id=self._dataset_id,
355
+ processing_mode=self._processing_mode,
356
+ address=self._address,
357
+ protocol=self._protocol,
358
+ job_name=self._job_name,
359
+ consumer_index=self._consumer_index,
360
+ num_consumers=self._num_consumers,
361
+ max_outstanding_requests=self._max_outstanding_requests,
362
+ task_refresh_interval_hint_ms=task_refresh_interval_hint_ms,
363
+ iteration_counter=(
364
+ gen_experimental_dataset_ops.dummy_iteration_counter()),
365
+ target_workers=target_workers,
366
+ uncompress=uncompress,
367
+ uncompress_fn=uncompress_func.function,
368
+ cross_trainer_cache_options=cross_trainer_cache_options,
369
+ **compat_kwargs,
370
+ **self._flat_structure)
371
+ super(_DataServiceDatasetV2, self).__init__(variant_tensor)
372
+
373
+ @property
374
+ def element_spec(self):
375
+ return self._element_spec
376
+
377
+
378
+ class _DataServiceDatasetV1(dataset_ops.DatasetV1Adapter):
379
+ """A `Dataset` that executes its input through the tf.data service."""
380
+
381
+ @functools.wraps(_DataServiceDatasetV2.__init__)
382
+ def __init__(self, dataset_id, processing_mode, address, element_spec,
383
+ protocol, data_transfer_protocol, job_name, consumer_index,
384
+ num_consumers, max_outstanding_requests,
385
+ task_refresh_interval_hint_ms, cross_trainer_cache,
386
+ target_workers):
387
+
388
+ self._wrapped = _DataServiceDatasetV2(
389
+ dataset_id=dataset_id,
390
+ processing_mode=processing_mode,
391
+ address=address,
392
+ element_spec=element_spec,
393
+ protocol=protocol,
394
+ data_transfer_protocol=data_transfer_protocol,
395
+ job_name=job_name,
396
+ consumer_index=consumer_index,
397
+ num_consumers=num_consumers,
398
+ max_outstanding_requests=max_outstanding_requests,
399
+ task_refresh_interval_hint_ms=task_refresh_interval_hint_ms,
400
+ cross_trainer_cache=cross_trainer_cache,
401
+ target_workers=target_workers)
402
+ super(_DataServiceDatasetV1, self).__init__(self._wrapped)
403
+
404
+
405
+ if tf2.enabled():
406
+ _DataServiceDataset = _DataServiceDatasetV2
407
+ else:
408
+ _DataServiceDataset = _DataServiceDatasetV1
409
+
410
+
411
+ def _parse_service(service) -> tuple[str, str]:
412
+ """Converts a tf.data service string into a (protocol, address) tuple.
413
+
414
+ Args:
415
+ service: A string in the format "protocol://address" or just "address". If
416
+ the string is only an address, the default protocol will be used.
417
+
418
+ Returns:
419
+ The (protocol, address) tuple
420
+ """
421
+ if not isinstance(service, str):
422
+ raise ValueError("`service` must be a string, but `service` was of type "
423
+ f"{type(service)}. service={service}")
424
+ if not service:
425
+ raise ValueError("`service` must not be empty")
426
+ parts = service.split("://")
427
+ if len(parts) == 2:
428
+ protocol, address = parts
429
+ elif len(parts) == 1:
430
+ address = parts[0]
431
+ protocol = _pywrap_utils.TF_DATA_DefaultProtocol()
432
+ else:
433
+ raise ValueError("Malformed `service` string has multiple '://': "
434
+ f"{service}.")
435
+ # TODO(aaudibert): Considering validating reachability of address here.
436
+ return (protocol, address)
437
+
438
+
439
+ def _distribute(
440
+ processing_mode,
441
+ service,
442
+ job_name=None,
443
+ consumer_index=None,
444
+ num_consumers=None,
445
+ max_outstanding_requests=None,
446
+ task_refresh_interval_hint_ms=None,
447
+ data_transfer_protocol=None,
448
+ compression="AUTO",
449
+ cross_trainer_cache=None,
450
+ target_workers="AUTO",
451
+ ) -> Callable[dataset_ops.Dataset, dataset_ops.Dataset]:
452
+ """A transformation that moves dataset processing to the tf.data service.
453
+
454
+ This transformation is similar to `distribute`, but supports additional
455
+ parameters which we do not yet want to add to the public Python API.
456
+
457
+ Args:
458
+ processing_mode: A `tf.data.experimental.service.ShardingPolicy` specifying
459
+ how to shard the dataset among tf.data workers. See
460
+ `tf.data.experimental.service.ShardingPolicy` for details. For backwards
461
+ compatibility, `processing_mode` may also be set to the strings
462
+ `"parallel_epochs"` or `"distributed_epoch"`, which are respectively
463
+ equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`.
464
+ service: A string or a tuple indicating how to connect to the tf.data
465
+ service. If it's a string, it should be in the format
466
+ `[<protocol>://]<address>`, where `<address>` identifies the dispatcher
467
+ address and `<protocol>` can optionally be used to override the default
468
+ protocol to use. If it's a tuple, it should be (protocol, address).
469
+ job_name: (Optional.) The name of the job. If provided, it must be a
470
+ non-empty string. This argument makes it possible for multiple datasets to
471
+ share the same job. The default behavior is that the dataset creates
472
+ anonymous, exclusively owned jobs.
473
+ consumer_index: (Optional.) The index of the consumer in the range from `0`
474
+ to `num_consumers`. Must be specified alongside `num_consumers`. When
475
+ specified, consumers will read from the job in a strict round-robin order,
476
+ instead of the default first-come-first-served order.
477
+ num_consumers: (Optional.) The number of consumers which will consume from
478
+ the job. Must be specified alongside `consumer_index`. When specified,
479
+ consumers will read from the job in a strict round-robin order, instead of
480
+ the default first-come-first-served order. When `num_consumers` is
481
+ specified, the dataset must have infinite cardinality to prevent a
482
+ producer from running out of data early and causing consumers to go out of
483
+ sync.
484
+ max_outstanding_requests: (Optional.) A limit on how many elements may be
485
+ requested at the same time. You can use this option to control the amount
486
+ of memory used, since `distribute` won't use more than `element_size` *
487
+ `max_outstanding_requests` of memory.
488
+ task_refresh_interval_hint_ms: (Optional.) A hint for how often to query the
489
+ dispatcher for task changes.
490
+ data_transfer_protocol: (Optional.) The protocol to use for transferring
491
+ data with the tf.data service. By default, data is transferred using gRPC.
492
+ compression: How to compress the dataset's elements before transferring them
493
+ over the network. "AUTO" leaves the decision of how to compress up to the
494
+ tf.data service runtime. `None` indicates not to compress.
495
+ cross_trainer_cache: (Optional.) If a `CrossTrainerCache` object is
496
+ provided, dataset iteration will be shared across concurrently running
497
+ trainers. See
498
+ https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers
499
+ for details.
500
+ target_workers: (Optional.) Which workers to read from. If `"AUTO"`, tf.data
501
+ runtime decides which workers to read from. If `"ANY"`, reads from any
502
+ tf.data service workers. If `"LOCAL"`, only reads from local in-processs
503
+ tf.data service workers. `"AUTO"` works well for most cases, while users
504
+ can specify other targets. For example, `"LOCAL"` helps avoid RPCs and
505
+ data copy if every TF worker colocates with a tf.data service worker.
506
+ Consumers of a shared job must use the same `target_workers`. Defaults to
507
+ `"AUTO"`.
508
+
509
+ Returns:
510
+ Dataset: A `Dataset` of the elements produced by the data service.
511
+ """
512
+ processing_mode = _get_validated_sharding_policy(processing_mode)
513
+ _validate_compression(compression)
514
+
515
+ def _apply_fn(dataset) -> dataset_ops.Dataset: # pylint: disable=missing-docstring
516
+ dataset_id = _register_dataset(service, dataset, compression=compression)
517
+ return _from_dataset_id(
518
+ processing_mode,
519
+ service,
520
+ dataset_id,
521
+ dataset.element_spec,
522
+ job_name=job_name,
523
+ consumer_index=consumer_index,
524
+ num_consumers=num_consumers,
525
+ max_outstanding_requests=max_outstanding_requests,
526
+ task_refresh_interval_hint_ms=task_refresh_interval_hint_ms,
527
+ data_transfer_protocol=data_transfer_protocol,
528
+ cross_trainer_cache=cross_trainer_cache,
529
+ target_workers=target_workers)
530
+
531
+ return _apply_fn
532
+
533
+
534
+ @tf_export("data.experimental.service.distribute")
535
+ def distribute(
536
+ processing_mode,
537
+ service,
538
+ job_name=None,
539
+ consumer_index=None,
540
+ num_consumers=None,
541
+ max_outstanding_requests=None,
542
+ data_transfer_protocol=None,
543
+ compression="AUTO",
544
+ cross_trainer_cache=None,
545
+ target_workers="AUTO",
546
+ ) -> Callable[dataset_ops.Dataset, dataset_ops.Dataset]:
547
+ """A transformation that moves dataset processing to the tf.data service.
548
+
549
+ When you iterate over a dataset containing the `distribute` transformation,
550
+ the tf.data service creates a "job" which produces data for the dataset
551
+ iteration.
552
+
553
+ The tf.data service uses a cluster of workers to prepare data for training
554
+ your model.
555
+ The `processing_mode` argument to `tf.data.experimental.service.distribute`
556
+ describes how to leverage multiple workers to process the input dataset.
557
+ Currently, there are two processing modes to choose from: "distributed_epoch"
558
+ and "parallel_epochs".
559
+
560
+ "distributed_epoch" means that the dataset will be split across all tf.data
561
+ service workers.
562
+ The dispatcher produces "splits" for the dataset and sends them to workers for
563
+ further processing. For example, if a dataset begins with a list of filenames,
564
+ the dispatcher will iterate through the filenames and send the filenames to
565
+ tf.data workers, which will perform the rest of the dataset transformations on
566
+ those files. "distributed_epoch" is useful when your model needs to see each
567
+ element of the dataset exactly once, or if it needs to see the data in a
568
+ generally-sequential order. "distributed_epoch" only works for datasets with
569
+ splittable sources, such as `Dataset.from_tensor_slices`,
570
+ `Dataset.list_files`, or `Dataset.range`.
571
+
572
+ "parallel_epochs" means that the entire input dataset will be processed
573
+ independently by each of the tf.data service workers.
574
+ For this reason, it is important to shuffle data (e.g. filenames)
575
+ non-deterministically, so that each worker will process the elements of the
576
+ dataset in a different order. "parallel_epochs" can be used to distribute
577
+ datasets that aren't splittable.
578
+
579
+ With two workers, "parallel_epochs" will produce every element of the dataset
580
+ twice:
581
+
582
+ >>> dispatcher = tf.data.experimental.service.DispatchServer()
583
+ >>> dispatcher_address = dispatcher.target.split("://")[1]
584
+ >>> # Start two workers
585
+ >>> workers = [
586
+ ... tf.data.experimental.service.WorkerServer(
587
+ ... tf.data.experimental.service.WorkerConfig(
588
+ ... dispatcher_address=dispatcher_address)) for _ in range(2)
589
+ ... ]
590
+ >>> dataset = tf.data.Dataset.range(10)
591
+ >>> dataset = dataset.apply(tf.data.experimental.service.distribute(
592
+ ... processing_mode="parallel_epochs", service=dispatcher.target))
593
+ >>> print(sorted(list(dataset.as_numpy_iterator())))
594
+ [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9]
595
+
596
+ "distributed_epoch", on the other hand, will still produce each element once:
597
+
598
+ >>> dispatcher = tf.data.experimental.service.DispatchServer()
599
+ >>> dispatcher_address = dispatcher.target.split("://")[1]
600
+ >>> workers = [
601
+ ... tf.data.experimental.service.WorkerServer(
602
+ ... tf.data.experimental.service.WorkerConfig(
603
+ ... dispatcher_address=dispatcher_address)) for _ in range(2)
604
+ ... ]
605
+ >>> dataset = tf.data.Dataset.range(10)
606
+ >>> dataset = dataset.apply(tf.data.experimental.service.distribute(
607
+ ... processing_mode="distributed_epoch", service=dispatcher.target))
608
+ >>> print(sorted(list(dataset.as_numpy_iterator())))
609
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
610
+
611
+ When using `apply(tf.data.experimental.service.distribute(...))`, the dataset
612
+ before the `apply` transformation executes within the tf.data service, while
613
+ the operations after `apply` happen within the local process.
614
+
615
+ >>> dispatcher = tf.data.experimental.service.DispatchServer()
616
+ >>> dispatcher_address = dispatcher.target.split("://")[1]
617
+ >>> workers = [
618
+ ... tf.data.experimental.service.WorkerServer(
619
+ ... tf.data.experimental.service.WorkerConfig(
620
+ ... dispatcher_address=dispatcher_address)) for _ in range(2)
621
+ ... ]
622
+ >>> dataset = tf.data.Dataset.range(5)
623
+ >>> dataset = dataset.map(lambda x: x*x)
624
+ >>> dataset = dataset.apply(
625
+ ... tf.data.experimental.service.distribute("parallel_epochs",
626
+ ... dispatcher.target))
627
+ >>> dataset = dataset.map(lambda x: x+1)
628
+ >>> print(sorted(list(dataset.as_numpy_iterator())))
629
+ [1, 1, 2, 2, 5, 5, 10, 10, 17, 17]
630
+
631
+ In the above example, the dataset operations (before applying the `distribute`
632
+ function on the elements) will be executed on the tf.data workers,
633
+ and the elements are provided over RPC. The remaining transformations
634
+ (after the call to `distribute`) will be executed locally. The dispatcher
635
+ and the workers will bind to usused free ports (which are chosen at random),
636
+ in order to communicate with each other. However, to bind them to specific
637
+ ports, the `port` parameter can be passed.
638
+
639
+ The `job_name` argument allows jobs to be shared across multiple
640
+ datasets. Instead of each dataset creating its own job, all
641
+ datasets with the same `job_name` will consume from the same job. A new job
642
+ will be created for each iteration of the dataset (with each repetition of
643
+ `Dataset.repeat` counting as a new iteration). Suppose the `DispatchServer`
644
+ is serving on `localhost:5000` and two training workers (in either a single
645
+ client or multi-client setup) iterate over the below dataset, and there is a
646
+ single tf.data worker:
647
+
648
+ ```
649
+ range5_dataset = tf.data.Dataset.range(5)
650
+ dataset = range5_dataset.apply(tf.data.experimental.service.distribute(
651
+ "parallel_epochs", "localhost:5000", job_name="my_job_name"))
652
+ for iteration in range(3):
653
+ print(list(dataset))
654
+ ```
655
+
656
+ The elements of each job will be split between the two processes, with
657
+ elements being consumed by the processes on a first-come first-served basis.
658
+ One possible result is that process 1 prints
659
+
660
+ ```
661
+ [0, 2, 4]
662
+ [0, 1, 3]
663
+ [1]
664
+ ```
665
+
666
+ and process 2 prints
667
+
668
+ ```
669
+ [1, 3]
670
+ [2, 4]
671
+ [0, 2, 3, 4]
672
+ ```
673
+
674
+ Job names must not be re-used across different training jobs within the
675
+ lifetime of the tf.data service. In general, the tf.data service is expected
676
+ to live for the duration of a single training job.
677
+ To use the tf.data service with multiple training jobs, make sure to use
678
+ different job names to avoid conflicts. For example, suppose a training job
679
+ calls `distribute` with `job_name="job"` and reads until end of input. If
680
+ another independent job connects to the same tf.data service and tries to read
681
+ from `job_name="job"`, it will immediately receive end of input, without
682
+ getting any data.
683
+
684
+ **Coordinated data read**
685
+
686
+ By default, when multiple consumers read from the same job, they receive data
687
+ on a first-come first-served basis. In some use cases, it is advantageous to
688
+ coordinate the consumers. At each step, consumers read data from the same
689
+ worker.
690
+
691
+ For example, the tf.data service can be used to coordinate example sizes
692
+ across a cluster during synchronous training, so that during each step all
693
+ replicas train on similar-sized elements. To achieve this, define a dataset
694
+ which generates rounds of `num_consumers` consecutive similar-sized batches,
695
+ then enable coordinated reads by setting `consumer_index` and `num_consumers`.
696
+
697
+ NOTE: To keep consumers in sync, round robin data consumption requires that
698
+ the dataset have infinite cardinality. You can get this by adding `.repeat()`
699
+ at the end of the dataset definition.
700
+
701
+ **Keras and Distribution Strategies**
702
+
703
+ The dataset produced by the `distribute` transformation can be passed to
704
+ Keras' `Model.fit` or Distribution Strategy's
705
+ `tf.distribute.Strategy.experimental_distribute_dataset` like any other
706
+ `tf.data.Dataset`. We recommend setting a `job_name` on the call to
707
+ `distribute` so that if there are multiple workers, they read data from the
708
+ same job. Note that the autosharding normally performed by
709
+ `experimental_distribute_dataset` will be disabled when setting a `job_name`,
710
+ since sharing the job already results in splitting data across the workers.
711
+ When using a shared job, data will be dynamically balanced across workers, so
712
+ that they reach end of input about the same time. This results in better
713
+ worker utilization than with autosharding, where each worker processes an
714
+ independent set of files, and some workers may run out of data earlier than
715
+ others.
716
+
717
+ Args:
718
+ processing_mode: A `tf.data.experimental.service.ShardingPolicy` specifying
719
+ how to shard the dataset among tf.data workers. See
720
+ `tf.data.experimental.service.ShardingPolicy` for details. For backwards
721
+ compatibility, `processing_mode` may also be set to the strings
722
+ `"parallel_epochs"` or `"distributed_epoch"`, which are respectively
723
+ equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`.
724
+ service: A string or a tuple indicating how to connect to the tf.data
725
+ service. If it's a string, it should be in the format
726
+ `[<protocol>://]<address>`, where `<address>` identifies the dispatcher
727
+ address and `<protocol>` can optionally be used to override the default
728
+ protocol to use. If it's a tuple, it should be (protocol, address).
729
+ job_name: (Optional.) The name of the job. If provided, it must be a
730
+ non-empty string. This argument makes it possible for multiple datasets to
731
+ share the same job. The default behavior is that the dataset creates
732
+ anonymous, exclusively owned jobs.
733
+ consumer_index: (Optional.) The index of the consumer in the range from `0`
734
+ to `num_consumers`. Must be specified alongside `num_consumers`. When
735
+ specified, consumers will read from the job in a strict round-robin order,
736
+ instead of the default first-come-first-served order.
737
+ num_consumers: (Optional.) The number of consumers which will consume from
738
+ the job. Must be specified alongside `consumer_index`. When specified,
739
+ consumers will read from the job in a strict round-robin order, instead of
740
+ the default first-come-first-served order. When `num_consumers` is
741
+ specified, the dataset must have infinite cardinality to prevent a
742
+ producer from running out of data early and causing consumers to go out of
743
+ sync.
744
+ max_outstanding_requests: (Optional.) A limit on how many elements may be
745
+ requested at the same time. You can use this option to control the amount
746
+ of memory used, since `distribute` won't use more than `element_size` *
747
+ `max_outstanding_requests` of memory.
748
+ data_transfer_protocol: (Optional.) The protocol to use for transferring
749
+ data with the tf.data service. By default, data is transferred using gRPC.
750
+ compression: How to compress the dataset's elements before transferring them
751
+ over the network. "AUTO" leaves the decision of how to compress up to the
752
+ tf.data service runtime. `None` indicates not to compress.
753
+ cross_trainer_cache: (Optional.) If a `CrossTrainerCache` object is
754
+ provided, dataset iteration will be shared across concurrently running
755
+ trainers. See
756
+ https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers
757
+ for details.
758
+ target_workers: (Optional.) Which workers to read from. If `"AUTO"`, tf.data
759
+ runtime decides which workers to read from. If `"ANY"`, reads from any
760
+ tf.data service workers. If `"LOCAL"`, only reads from local in-processs
761
+ tf.data service workers. `"AUTO"` works well for most cases, while users
762
+ can specify other targets. For example, `"LOCAL"` helps avoid RPCs and
763
+ data copy if every TF worker colocates with a tf.data service worker.
764
+ Consumers of a shared job must use the same `target_workers`. Defaults to
765
+ `"AUTO"`.
766
+
767
+ Returns:
768
+ Dataset: A `Dataset` of the elements produced by the data service.
769
+ """
770
+ _validate_job_name(job_name)
771
+ return _distribute(
772
+ processing_mode=processing_mode,
773
+ service=service,
774
+ job_name=job_name,
775
+ consumer_index=consumer_index,
776
+ num_consumers=num_consumers,
777
+ max_outstanding_requests=max_outstanding_requests,
778
+ data_transfer_protocol=data_transfer_protocol,
779
+ compression=compression,
780
+ cross_trainer_cache=cross_trainer_cache,
781
+ target_workers=target_workers)
782
+
783
+
784
+ def _register_dataset(
785
+ service, dataset, compression, dataset_id=None) -> tensor.Tensor:
786
+ """Registers a dataset with the tf.data service.
787
+
788
+ This transformation is similar to `register_dataset`, but supports additional
789
+ parameters which we do not yet want to add to the public Python API.
790
+
791
+ Args:
792
+ service: A string or a tuple indicating how to connect to the tf.data
793
+ service. If it's a string, it should be in the format
794
+ `[<protocol>://]<address>`, where `<address>` identifies the dispatcher
795
+ address and `<protocol>` can optionally be used to override the default
796
+ protocol to use. If it's a tuple, it should be (protocol, address).
797
+ dataset: A `tf.data.Dataset` to register with the tf.data service.
798
+ compression: How to compress the dataset's elements before transferring them
799
+ over the network. "AUTO" leaves the decision of how to compress up to the
800
+ tf.data service runtime. `None` indicates not to compress.
801
+ dataset_id: (Optional.) By default, tf.data service generates a unique
802
+ (string) ID for each registered dataset. If a `dataset_id` is provided, it
803
+ will use the specified ID. If a dataset with a matching ID already exists,
804
+ no new dataset is registered. This is useful if multiple training jobs
805
+ want to (re)use the same dataset for training. In this case, they can
806
+ register the dataset with the same dataset ID.
807
+
808
+ Returns:
809
+ A scalar string tensor representing the dataset ID.
810
+ """
811
+ _validate_compression(compression)
812
+
813
+ if isinstance(service, tuple):
814
+ protocol, address = service
815
+ else:
816
+ protocol, address = _parse_service(service)
817
+ external_state_policy = dataset.options().experimental_external_state_policy
818
+ if external_state_policy is None:
819
+ external_state_policy = ExternalStatePolicy.WARN
820
+
821
+ encoded_spec = None
822
+ if context.executing_eagerly():
823
+ encoded_spec = nested_structure_coder.encode_structure(
824
+ dataset.element_spec).SerializeToString()
825
+
826
+ if compression == COMPRESSION_AUTO:
827
+ dataset = dataset.map(
828
+ lambda *x: compression_ops.compress(x),
829
+ num_parallel_calls=dataset_ops.AUTOTUNE)
830
+ dataset = dataset._apply_debug_options() # pylint: disable=protected-access
831
+
832
+ metadata = data_service_pb2.DataServiceMetadata(
833
+ element_spec=encoded_spec,
834
+ compression=_get_compression_proto(compression))
835
+
836
+ return gen_experimental_dataset_ops.register_dataset_v2(
837
+ dataset._variant_tensor, # pylint: disable=protected-access
838
+ address=address,
839
+ protocol=protocol,
840
+ external_state_policy=external_state_policy.value,
841
+ requested_dataset_id=dataset_id,
842
+ metadata=metadata.SerializeToString())
843
+
844
+
845
+ @tf_export("data.experimental.service.register_dataset")
846
+ def register_dataset(
847
+ service, dataset, compression="AUTO", dataset_id=None) -> tensor.Tensor:
848
+ """Registers a dataset with the tf.data service.
849
+
850
+ `register_dataset` registers a dataset with the tf.data service so that
851
+ datasets can be created later with
852
+ `tf.data.experimental.service.from_dataset_id`. This is useful when the
853
+ dataset
854
+ is registered by one process, then used in another process. When the same
855
+ process is both registering and reading from the dataset, it is simpler to use
856
+ `tf.data.experimental.service.distribute` instead.
857
+
858
+ If the dataset is already registered with the tf.data service,
859
+ `register_dataset` returns the already-registered dataset's id.
860
+
861
+ >>> dispatcher = tf.data.experimental.service.DispatchServer()
862
+ >>> dispatcher_address = dispatcher.target.split("://")[1]
863
+ >>> worker = tf.data.experimental.service.WorkerServer(
864
+ ... tf.data.experimental.service.WorkerConfig(
865
+ ... dispatcher_address=dispatcher_address))
866
+ >>> dataset = tf.data.Dataset.range(10)
867
+ >>> dataset_id = tf.data.experimental.service.register_dataset(
868
+ ... dispatcher.target, dataset)
869
+ >>> dataset = tf.data.experimental.service.from_dataset_id(
870
+ ... processing_mode="parallel_epochs",
871
+ ... service=dispatcher.target,
872
+ ... dataset_id=dataset_id,
873
+ ... element_spec=dataset.element_spec)
874
+ >>> print(list(dataset.as_numpy_iterator()))
875
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
876
+
877
+ Args:
878
+ service: A string or a tuple indicating how to connect to the tf.data
879
+ service. If it's a string, it should be in the format
880
+ `[<protocol>://]<address>`, where `<address>` identifies the dispatcher
881
+ address and `<protocol>` can optionally be used to override the default
882
+ protocol to use. If it's a tuple, it should be (protocol, address).
883
+ dataset: A `tf.data.Dataset` to register with the tf.data service.
884
+ compression: (Optional.) How to compress the dataset's elements before
885
+ transferring them over the network. "AUTO" leaves the decision of how to
886
+ compress up to the tf.data service runtime. `None` indicates not to
887
+ compress.
888
+ dataset_id: (Optional.) By default, tf.data service generates a unique
889
+ (string) ID for each registered dataset. If a `dataset_id` is provided, it
890
+ will use the specified ID. If a dataset with a matching ID already exists,
891
+ no new dataset is registered. This is useful if multiple training jobs
892
+ want to (re)use the same dataset for training. In this case, they can
893
+ register the dataset with the same dataset ID.
894
+
895
+ Returns:
896
+ A scalar string tensor representing the dataset ID.
897
+ """
898
+ return _register_dataset(service, dataset, compression, dataset_id)
899
+
900
+
901
+ def _from_dataset_id(processing_mode,
902
+ service,
903
+ dataset_id,
904
+ element_spec,
905
+ job_name=None,
906
+ consumer_index=None,
907
+ num_consumers=None,
908
+ max_outstanding_requests=None,
909
+ task_refresh_interval_hint_ms=None,
910
+ data_transfer_protocol=None,
911
+ cross_trainer_cache=None,
912
+ target_workers="AUTO") -> dataset_ops.Dataset:
913
+ """Creates a dataset which reads data from the tf.data service.
914
+
915
+ This transformation is similar to `from_dataset_id`, but supports additional
916
+ parameters which we do not yet want to add to the public Python API.
917
+
918
+ Args:
919
+ processing_mode: A `tf.data.experimental.service.ShardingPolicy` specifying
920
+ how to shard the dataset among tf.data workers. See
921
+ `tf.data.experimental.service.ShardingPolicy` for details. For backwards
922
+ compatibility, `processing_mode` may also be set to the strings
923
+ `"parallel_epochs"` or `"distributed_epoch"`, which are respectively
924
+ equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`.
925
+ service: A string or a tuple indicating how to connect to the tf.data
926
+ service. If it's a string, it should be in the format
927
+ `[<protocol>://]<address>`, where `<address>` identifies the dispatcher
928
+ address and `<protocol>` can optionally be used to override the default
929
+ protocol to use. If it's a tuple, it should be (protocol, address).
930
+ dataset_id: The id of the dataset to read from. This id is returned by
931
+ `register_dataset` when the dataset is registered with the tf.data
932
+ service.
933
+ element_spec: A nested structure of `tf.TypeSpec`s representing the type of
934
+ elements produced by the dataset. This argument is only required inside a
935
+ tf.function. Use `tf.data.Dataset.element_spec` to get the element spec
936
+ for a given dataset.
937
+ job_name: (Optional.) The name of the job. If provided, it must be a
938
+ non-empty string or tensor. This argument makes it possible for multiple
939
+ datasets to share the same job. The default behavior is that the dataset
940
+ creates anonymous, exclusively owned jobs.
941
+ consumer_index: (Optional.) The index of the consumer in the range from `0`
942
+ to `num_consumers`. Must be specified alongside `num_consumers`. When
943
+ specified, consumers will read from the job in a strict round-robin order,
944
+ instead of the default first-come-first-served order.
945
+ num_consumers: (Optional.) The number of consumers which will consume from
946
+ the job. Must be specified alongside `consumer_index`. When specified,
947
+ consumers will read from the job in a strict round-robin order, instead of
948
+ the default first-come-first-served order. When `num_consumers` is
949
+ specified, the dataset must have infinite cardinality to prevent a
950
+ producer from running out of data early and causing consumers to go out of
951
+ sync.
952
+ max_outstanding_requests: (Optional.) A limit on how many elements may be
953
+ requested at the same time. You can use this option to control the amount
954
+ of memory used, since `distribute` won't use more than `element_size` *
955
+ `max_outstanding_requests` of memory.
956
+ task_refresh_interval_hint_ms: (Optional.) A hint for how often to query the
957
+ dispatcher for task changes.
958
+ data_transfer_protocol: (Optional.) The protocol to use for transferring
959
+ data with the tf.data service. By default, data is transferred using gRPC.
960
+ cross_trainer_cache: (Optional.) If a `CrossTrainerCache` object is
961
+ provided, dataset iteration will be shared across concurrently running
962
+ trainers. See
963
+ https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers
964
+ for details.
965
+ target_workers: (Optional.) Which workers to read from. If `"AUTO"`, tf.data
966
+ runtime decides which workers to read from. If `"ANY"`, reads from any
967
+ tf.data service workers. If `"LOCAL"`, only reads from local in-processs
968
+ tf.data service workers. `"AUTO"` works well for most cases, while users
969
+ can specify other targets. For example, `"LOCAL"` helps avoid RPCs and
970
+ data copy if every TF worker colocates with a tf.data service worker.
971
+ Consumers of a shared job must use the same `target_workers`. Defaults to
972
+ `"AUTO"`.
973
+
974
+ Returns:
975
+ A `tf.data.Dataset` which reads from the tf.data service.
976
+ """
977
+ def _get_element_spec():
978
+ """Fetches the element spec from the server."""
979
+ data_service_metadata = None
980
+ dataset_id_val = tensor_util.constant_value(dataset_id)
981
+ try:
982
+ data_service_metadata = (
983
+ _pywrap_server_lib.TF_DATA_GetDataServiceMetadataByID(
984
+ dataset_id_val, address, protocol
985
+ )
986
+ )
987
+ except NotImplementedError as err:
988
+ raise ValueError(
989
+ "The tf.data service is running an earlier version of TensorFlow "
990
+ "that requires specifying `element_spec` as an argument to "
991
+ "`from_dataset_id`. Please either supply an element spec or update "
992
+ "the tf.data service to the latest version.") from err
993
+ except RuntimeError:
994
+ # This error results from dataset ID not found. A more appropriate error
995
+ # will be raised when the dataset is created.
996
+ pass
997
+
998
+ if not data_service_metadata or not data_service_metadata.element_spec:
999
+ dataset_id_val = tensor_util.constant_value(dataset_id)
1000
+ raise ValueError(
1001
+ f"Failed to fetch element spec for dataset id {dataset_id_val} from "
1002
+ "tf.data service. If the dataset was registered in graph mode or "
1003
+ "inside a tf.function, the `element_spec` must be specified as an "
1004
+ "argument to `from_dataset_id`.")
1005
+
1006
+ struct_pb = nested_structure_coder.struct_pb2.StructuredValue()
1007
+ struct_pb.ParseFromString(data_service_metadata.element_spec)
1008
+ return nested_structure_coder.decode_proto(struct_pb)
1009
+
1010
+ processing_mode = _get_validated_sharding_policy(processing_mode)
1011
+ if isinstance(service, tuple):
1012
+ protocol, address = service
1013
+ else:
1014
+ protocol, address = _parse_service(service)
1015
+ if job_name is not None:
1016
+ if not isinstance(job_name, str) and not isinstance(
1017
+ job_name, tensor.Tensor):
1018
+ raise ValueError(
1019
+ "`job_name` must be a string or Tensor, but `job_name` was of type "
1020
+ f"{type(job_name)}. job_name={job_name}.")
1021
+
1022
+ if not element_spec:
1023
+ if not context.executing_eagerly():
1024
+ raise ValueError(
1025
+ "In graph mode `element_spec` must be provided manually.")
1026
+ element_spec = _get_element_spec()
1027
+
1028
+ dataset = _DataServiceDataset(
1029
+ dataset_id=dataset_id,
1030
+ processing_mode=processing_mode,
1031
+ address=address,
1032
+ element_spec=element_spec,
1033
+ protocol=protocol,
1034
+ data_transfer_protocol=data_transfer_protocol,
1035
+ job_name=job_name,
1036
+ consumer_index=consumer_index,
1037
+ num_consumers=num_consumers,
1038
+ max_outstanding_requests=max_outstanding_requests,
1039
+ task_refresh_interval_hint_ms=task_refresh_interval_hint_ms,
1040
+ cross_trainer_cache=cross_trainer_cache,
1041
+ target_workers=target_workers)
1042
+
1043
+ # Disable autosharding for shared jobs.
1044
+ if job_name is not None:
1045
+ options = options_lib.Options()
1046
+ options.experimental_distribute.auto_shard_policy = AutoShardPolicy.OFF
1047
+ dataset = dataset.with_options(options)
1048
+ return dataset
1049
+
1050
+
1051
+ @tf_export("data.experimental.service.from_dataset_id")
1052
+ def from_dataset_id(processing_mode,
1053
+ service,
1054
+ dataset_id,
1055
+ element_spec=None,
1056
+ job_name=None,
1057
+ consumer_index=None,
1058
+ num_consumers=None,
1059
+ max_outstanding_requests=None,
1060
+ data_transfer_protocol=None,
1061
+ cross_trainer_cache=None,
1062
+ target_workers="AUTO") -> dataset_ops.Dataset:
1063
+ """Creates a dataset which reads data from the tf.data service.
1064
+
1065
+ This is useful when the dataset is registered by one process, then used in
1066
+ another process. When the same process is both registering and reading from
1067
+ the dataset, it is simpler to use `tf.data.experimental.service.distribute`
1068
+ instead.
1069
+
1070
+ Before using `from_dataset_id`, the dataset must have been registered with the
1071
+ tf.data service using `tf.data.experimental.service.register_dataset`.
1072
+ `register_dataset` returns a dataset id for the registered dataset. That is
1073
+ the `dataset_id` which should be passed to `from_dataset_id`.
1074
+
1075
+ The `element_spec` argument indicates the `tf.TypeSpec`s for the elements
1076
+ produced by the dataset. Currently `element_spec` must be explicitly
1077
+ specified, and match the dataset registered under `dataset_id`. `element_spec`
1078
+ defaults to `None` so that in the future we can support automatically
1079
+ discovering the `element_spec` by querying the tf.data service.
1080
+
1081
+ `tf.data.experimental.service.distribute` is a convenience method which
1082
+ combines `register_dataset` and `from_dataset_id` into a dataset
1083
+ transformation.
1084
+ See the documentation for `tf.data.experimental.service.distribute` for more
1085
+ detail about how `from_dataset_id` works.
1086
+
1087
+ >>> dispatcher = tf.data.experimental.service.DispatchServer()
1088
+ >>> dispatcher_address = dispatcher.target.split("://")[1]
1089
+ >>> worker = tf.data.experimental.service.WorkerServer(
1090
+ ... tf.data.experimental.service.WorkerConfig(
1091
+ ... dispatcher_address=dispatcher_address))
1092
+ >>> dataset = tf.data.Dataset.range(10)
1093
+ >>> dataset_id = tf.data.experimental.service.register_dataset(
1094
+ ... dispatcher.target, dataset)
1095
+ >>> dataset = tf.data.experimental.service.from_dataset_id(
1096
+ ... processing_mode="parallel_epochs",
1097
+ ... service=dispatcher.target,
1098
+ ... dataset_id=dataset_id,
1099
+ ... element_spec=dataset.element_spec)
1100
+ >>> print(list(dataset.as_numpy_iterator()))
1101
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
1102
+
1103
+ Args:
1104
+ processing_mode: A `tf.data.experimental.service.ShardingPolicy` specifying
1105
+ how to shard the dataset among tf.data workers. See
1106
+ `tf.data.experimental.service.ShardingPolicy` for details. For backwards
1107
+ compatibility, `processing_mode` may also be set to the strings
1108
+ `"parallel_epochs"` or `"distributed_epoch"`, which are respectively
1109
+ equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`.
1110
+ service: A string or a tuple indicating how to connect to the tf.data
1111
+ service. If it's a string, it should be in the format
1112
+ `[<protocol>://]<address>`, where `<address>` identifies the dispatcher
1113
+ address and `<protocol>` can optionally be used to override the default
1114
+ protocol to use. If it's a tuple, it should be (protocol, address).
1115
+ dataset_id: The id of the dataset to read from. This id is returned by
1116
+ `register_dataset` when the dataset is registered with the tf.data
1117
+ service.
1118
+ element_spec: A nested structure of `tf.TypeSpec`s representing the type of
1119
+ elements produced by the dataset. This argument is only required inside a
1120
+ tf.function. Use `tf.data.Dataset.element_spec` to get the element spec
1121
+ for a given dataset.
1122
+ job_name: (Optional.) The name of the job. If provided, it must be a
1123
+ non-empty string. This argument makes it possible for multiple datasets to
1124
+ share the same job. The default behavior is that the dataset creates
1125
+ anonymous, exclusively owned jobs.
1126
+ consumer_index: (Optional.) The index of the consumer in the range from `0`
1127
+ to `num_consumers`. Must be specified alongside `num_consumers`. When
1128
+ specified, consumers will read from the job in a strict round-robin order,
1129
+ instead of the default first-come-first-served order.
1130
+ num_consumers: (Optional.) The number of consumers which will consume from
1131
+ the job. Must be specified alongside `consumer_index`. When specified,
1132
+ consumers will read from the job in a strict round-robin order, instead of
1133
+ the default first-come-first-served order. When `num_consumers` is
1134
+ specified, the dataset must have infinite cardinality to prevent a
1135
+ producer from running out of data early and causing consumers to go out of
1136
+ sync.
1137
+ max_outstanding_requests: (Optional.) A limit on how many elements may be
1138
+ requested at the same time. You can use this option to control the amount
1139
+ of memory used, since `distribute` won't use more than `element_size` *
1140
+ `max_outstanding_requests` of memory.
1141
+ data_transfer_protocol: (Optional.) The protocol to use for transferring
1142
+ data with the tf.data service. By default, data is transferred using gRPC.
1143
+ cross_trainer_cache: (Optional.) If a `CrossTrainerCache` object is
1144
+ provided, dataset iteration will be shared across concurrently running
1145
+ trainers. See
1146
+ https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers
1147
+ for details.
1148
+ target_workers: (Optional.) Which workers to read from. If `"AUTO"`, tf.data
1149
+ runtime decides which workers to read from. If `"ANY"`, reads from any
1150
+ tf.data service workers. If `"LOCAL"`, only reads from local in-processs
1151
+ tf.data service workers. `"AUTO"` works well for most cases, while users
1152
+ can specify other targets. For example, `"LOCAL"` helps avoid RPCs and
1153
+ data copy if every TF worker colocates with a tf.data service worker.
1154
+ Consumers of a shared job must use the same `target_workers`. Defaults to
1155
+ `"AUTO"`.
1156
+
1157
+ Returns:
1158
+ A `tf.data.Dataset` which reads from the tf.data service.
1159
+ """
1160
+ _validate_job_name(job_name)
1161
+ if job_name is not None:
1162
+ job_name = string_ops.string_join(
1163
+ ["dataset_id=", _to_string(dataset_id), job_name], "/")
1164
+
1165
+ return _from_dataset_id(
1166
+ processing_mode=processing_mode,
1167
+ service=service,
1168
+ dataset_id=dataset_id,
1169
+ element_spec=element_spec,
1170
+ job_name=job_name,
1171
+ consumer_index=consumer_index,
1172
+ num_consumers=num_consumers,
1173
+ max_outstanding_requests=max_outstanding_requests,
1174
+ data_transfer_protocol=data_transfer_protocol,
1175
+ cross_trainer_cache=cross_trainer_cache,
1176
+ target_workers=target_workers)
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/error_ops.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Ignore_errors dataset transformations."""
16
+ from tensorflow.python.util import deprecation
17
+ from tensorflow.python.util.tf_export import tf_export
18
+
19
+
20
+ @tf_export("data.experimental.ignore_errors")
21
+ @deprecation.deprecated(None, "Use `tf.data.Dataset.ignore_errors` instead.")
22
+ def ignore_errors(log_warning=False):
23
+ """Creates a `Dataset` from another `Dataset` and silently ignores any errors.
24
+
25
+ Use this transformation to produce a dataset that contains the same elements
26
+ as the input, but silently drops any elements that caused an error. For
27
+ example:
28
+
29
+ ```python
30
+ dataset = tf.data.Dataset.from_tensor_slices([1., 2., 0., 4.])
31
+
32
+ # Computing `tf.debugging.check_numerics(1. / 0.)` will raise an
33
+ InvalidArgumentError.
34
+ dataset = dataset.map(lambda x: tf.debugging.check_numerics(1. / x, "error"))
35
+
36
+ # Using `ignore_errors()` will drop the element that causes an error.
37
+ dataset =
38
+ dataset.apply(tf.data.experimental.ignore_errors()) # ==> {1., 0.5, 0.2}
39
+ ```
40
+ Args:
41
+ log_warning: (Optional.) A 'tf.bool' scalar indicating whether ignored
42
+ errors should be logged to stderr. Defaults to 'False'.
43
+
44
+ Returns:
45
+ A `Dataset` transformation function, which can be passed to
46
+ `tf.data.Dataset.apply`.
47
+ """
48
+ def _apply_fn(dataset):
49
+ return dataset.ignore_errors(log_warning)
50
+
51
+ return _apply_fn
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/get_single_element.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Python wrappers for Datasets and Iterators."""
16
+ from tensorflow.python.types import data as data_types
17
+ from tensorflow.python.util import deprecation
18
+ from tensorflow.python.util.tf_export import tf_export
19
+
20
+
21
+ @deprecation.deprecated(None, "Use `tf.data.Dataset.get_single_element()`.")
22
+ @tf_export("data.experimental.get_single_element")
23
+ def get_single_element(dataset):
24
+ """Returns the single element of the `dataset` as a nested structure of tensors.
25
+
26
+ The function enables you to use a `tf.data.Dataset` in a stateless
27
+ "tensor-in tensor-out" expression, without creating an iterator.
28
+ This facilitates the ease of data transformation on tensors using the
29
+ optimized `tf.data.Dataset` abstraction on top of them.
30
+
31
+ For example, lets consider a `preprocessing_fn` which would take as an
32
+ input the raw features and returns the processed feature along with
33
+ it's label.
34
+
35
+ ```python
36
+ def preprocessing_fn(raw_feature):
37
+ # ... the raw_feature is preprocessed as per the use-case
38
+ return feature
39
+
40
+ raw_features = ... # input batch of BATCH_SIZE elements.
41
+ dataset = (tf.data.Dataset.from_tensor_slices(raw_features)
42
+ .map(preprocessing_fn, num_parallel_calls=BATCH_SIZE)
43
+ .batch(BATCH_SIZE))
44
+
45
+ processed_features = tf.data.experimental.get_single_element(dataset)
46
+ ```
47
+
48
+ In the above example, the `raw_features` tensor of length=BATCH_SIZE
49
+ was converted to a `tf.data.Dataset`. Next, each of the `raw_feature` was
50
+ mapped using the `preprocessing_fn` and the processed features were
51
+ grouped into a single batch. The final `dataset` contains only one element
52
+ which is a batch of all the processed features.
53
+
54
+ NOTE: The `dataset` should contain only one element.
55
+
56
+ Now, instead of creating an iterator for the `dataset` and retrieving the
57
+ batch of features, the `tf.data.experimental.get_single_element()` function
58
+ is used to skip the iterator creation process and directly output the batch
59
+ of features.
60
+
61
+ This can be particularly useful when your tensor transformations are
62
+ expressed as `tf.data.Dataset` operations, and you want to use those
63
+ transformations while serving your model.
64
+
65
+ # Keras
66
+
67
+ ```python
68
+
69
+ model = ... # A pre-built or custom model
70
+
71
+ class PreprocessingModel(tf.keras.Model):
72
+ def __init__(self, model):
73
+ super().__init__(self)
74
+ self.model = model
75
+
76
+ @tf.function(input_signature=[...])
77
+ def serving_fn(self, data):
78
+ ds = tf.data.Dataset.from_tensor_slices(data)
79
+ ds = ds.map(preprocessing_fn, num_parallel_calls=BATCH_SIZE)
80
+ ds = ds.batch(batch_size=BATCH_SIZE)
81
+ return tf.argmax(
82
+ self.model(tf.data.experimental.get_single_element(ds)),
83
+ axis=-1
84
+ )
85
+
86
+ preprocessing_model = PreprocessingModel(model)
87
+ your_exported_model_dir = ... # save the model to this path.
88
+ tf.saved_model.save(preprocessing_model, your_exported_model_dir,
89
+ signatures={'serving_default': preprocessing_model.serving_fn})
90
+ ```
91
+
92
+ Args:
93
+ dataset: A `tf.data.Dataset` object containing a single element.
94
+
95
+ Returns:
96
+ A nested structure of `tf.Tensor` objects, corresponding to the single
97
+ element of `dataset`.
98
+
99
+ Raises:
100
+ TypeError: if `dataset` is not a `tf.data.Dataset` object.
101
+ InvalidArgumentError: (at runtime) if `dataset` does not contain exactly
102
+ one element.
103
+ """
104
+ if not isinstance(dataset, data_types.DatasetV2):
105
+ raise TypeError(
106
+ f"Invalid `dataset`. Expected a `tf.data.Dataset` object "
107
+ f"but got {type(dataset)}.")
108
+
109
+ return dataset.get_single_element()
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/grouping.py ADDED
@@ -0,0 +1,428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Grouping dataset transformations."""
16
+ from tensorflow.python.data.ops import dataset_ops
17
+ from tensorflow.python.data.ops import structured_function
18
+ from tensorflow.python.data.util import nest
19
+ from tensorflow.python.data.util import structure
20
+ from tensorflow.python.framework import dtypes
21
+ from tensorflow.python.framework import ops
22
+ from tensorflow.python.framework import tensor_spec
23
+ from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
24
+ from tensorflow.python.util import deprecation
25
+ from tensorflow.python.util.tf_export import tf_export
26
+
27
+
28
+ @tf_export("data.experimental.group_by_reducer")
29
+ def group_by_reducer(key_func, reducer):
30
+ """A transformation that groups elements and performs a reduction.
31
+
32
+ This transformation maps element of a dataset to a key using `key_func` and
33
+ groups the elements by key. The `reducer` is used to process each group; its
34
+ `init_func` is used to initialize state for each group when it is created, the
35
+ `reduce_func` is used to update the state every time an element is mapped to
36
+ the matching group, and the `finalize_func` is used to map the final state to
37
+ an output value.
38
+
39
+ Args:
40
+ key_func: A function mapping a nested structure of tensors
41
+ (having shapes and types defined by `self.output_shapes` and
42
+ `self.output_types`) to a scalar `tf.int64` tensor.
43
+ reducer: An instance of `Reducer`, which captures the reduction logic using
44
+ the `init_func`, `reduce_func`, and `finalize_func` functions.
45
+
46
+ Returns:
47
+ A `Dataset` transformation function, which can be passed to
48
+ `tf.data.Dataset.apply`.
49
+ """
50
+
51
+ def _apply_fn(dataset):
52
+ """Function from `Dataset` to `Dataset` that applies the transformation."""
53
+ return _GroupByReducerDataset(dataset, key_func, reducer)
54
+
55
+ return _apply_fn
56
+
57
+
58
+ @deprecation.deprecated(None, "Use `tf.data.Dataset.group_by_window(...)`.")
59
+ @tf_export("data.experimental.group_by_window")
60
+ def group_by_window(key_func,
61
+ reduce_func,
62
+ window_size=None,
63
+ window_size_func=None):
64
+ """A transformation that groups windows of elements by key and reduces them.
65
+
66
+ This transformation maps each consecutive element in a dataset to a key
67
+ using `key_func` and groups the elements by key. It then applies
68
+ `reduce_func` to at most `window_size_func(key)` elements matching the same
69
+ key. All except the final window for each key will contain
70
+ `window_size_func(key)` elements; the final window may be smaller.
71
+
72
+ You may provide either a constant `window_size` or a window size determined by
73
+ the key through `window_size_func`.
74
+
75
+ Args:
76
+ key_func: A function mapping a nested structure of tensors
77
+ (having shapes and types defined by `self.output_shapes` and
78
+ `self.output_types`) to a scalar `tf.int64` tensor.
79
+ reduce_func: A function mapping a key and a dataset of up to `window_size`
80
+ consecutive elements matching that key to another dataset.
81
+ window_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
82
+ consecutive elements matching the same key to combine in a single
83
+ batch, which will be passed to `reduce_func`. Mutually exclusive with
84
+ `window_size_func`.
85
+ window_size_func: A function mapping a key to a `tf.int64` scalar
86
+ `tf.Tensor`, representing the number of consecutive elements matching
87
+ the same key to combine in a single batch, which will be passed to
88
+ `reduce_func`. Mutually exclusive with `window_size`.
89
+
90
+ Returns:
91
+ A `Dataset` transformation function, which can be passed to
92
+ `tf.data.Dataset.apply`.
93
+
94
+ Raises:
95
+ ValueError: if neither or both of {`window_size`, `window_size_func`} are
96
+ passed.
97
+ """
98
+
99
+ def _apply_fn(dataset):
100
+ """Function from `Dataset` to `Dataset` that applies the transformation."""
101
+ return dataset.group_by_window(
102
+ key_func=key_func,
103
+ reduce_func=reduce_func,
104
+ window_size=window_size,
105
+ window_size_func=window_size_func)
106
+
107
+ return _apply_fn
108
+
109
+
110
+ @deprecation.deprecated(None,
111
+ "Use `tf.data.Dataset.bucket_by_sequence_length(...)`.")
112
+ @tf_export("data.experimental.bucket_by_sequence_length")
113
+ def bucket_by_sequence_length(element_length_func,
114
+ bucket_boundaries,
115
+ bucket_batch_sizes,
116
+ padded_shapes=None,
117
+ padding_values=None,
118
+ pad_to_bucket_boundary=False,
119
+ no_padding=False,
120
+ drop_remainder=False):
121
+ """A transformation that buckets elements in a `Dataset` by length.
122
+
123
+ Elements of the `Dataset` are grouped together by length and then are padded
124
+ and batched.
125
+
126
+ This is useful for sequence tasks in which the elements have variable length.
127
+ Grouping together elements that have similar lengths reduces the total
128
+ fraction of padding in a batch which increases training step efficiency.
129
+
130
+ Below is an example to bucketize the input data to the 3 buckets
131
+ "[0, 3), [3, 5), [5, inf)" based on sequence length, with batch size 2.
132
+
133
+ >>> elements = [
134
+ ... [0], [1, 2, 3, 4], [5, 6, 7],
135
+ ... [7, 8, 9, 10, 11], [13, 14, 15, 16, 19, 20], [21, 22]]
136
+
137
+ >>> dataset = tf.data.Dataset.from_generator(
138
+ ... lambda: elements, tf.int64, output_shapes=[None])
139
+
140
+ >>> dataset = dataset.apply(
141
+ ... tf.data.experimental.bucket_by_sequence_length(
142
+ ... element_length_func=lambda elem: tf.shape(elem)[0],
143
+ ... bucket_boundaries=[3, 5],
144
+ ... bucket_batch_sizes=[2, 2, 2]))
145
+
146
+ >>> for elem in dataset.as_numpy_iterator():
147
+ ... print(elem)
148
+ [[1 2 3 4]
149
+ [5 6 7 0]]
150
+ [[ 7 8 9 10 11 0]
151
+ [13 14 15 16 19 20]]
152
+ [[ 0 0]
153
+ [21 22]]
154
+
155
+ There is also a possibility to pad the dataset till the bucket boundary.
156
+ You can also provide which value to be used while padding the data.
157
+ Below example uses `-1` as padding and it also shows the input data
158
+ being bucketizied to two buckets "[0,3], [4,6]".
159
+
160
+ >>> elements = [
161
+ ... [0], [1, 2, 3, 4], [5, 6, 7],
162
+ ... [7, 8, 9, 10, 11], [13, 14, 15, 16, 19, 20], [21, 22]]
163
+
164
+ >>> dataset = tf.data.Dataset.from_generator(
165
+ ... lambda: elements, tf.int32, output_shapes=[None])
166
+
167
+ >>> dataset = dataset.apply(
168
+ ... tf.data.experimental.bucket_by_sequence_length(
169
+ ... element_length_func=lambda elem: tf.shape(elem)[0],
170
+ ... bucket_boundaries=[4, 7],
171
+ ... bucket_batch_sizes=[2, 2, 2],
172
+ ... pad_to_bucket_boundary=True,
173
+ ... padding_values=-1))
174
+
175
+ >>> for elem in dataset.as_numpy_iterator():
176
+ ... print(elem)
177
+ [[ 0 -1 -1]
178
+ [ 5 6 7]]
179
+ [[ 1 2 3 4 -1 -1]
180
+ [ 7 8 9 10 11 -1]]
181
+ [[21 22 -1]]
182
+ [[13 14 15 16 19 20]]
183
+
184
+ When using `pad_to_bucket_boundary` option, it can be seen that it is
185
+ not always possible to maintain the bucket batch size.
186
+ You can drop the batches that do not maintain the bucket batch size by
187
+ using the option `drop_remainder`. Using the same input data as in the
188
+ above example you get the following result.
189
+
190
+ >>> elements = [
191
+ ... [0], [1, 2, 3, 4], [5, 6, 7],
192
+ ... [7, 8, 9, 10, 11], [13, 14, 15, 16, 19, 20], [21, 22]]
193
+
194
+ >>> dataset = tf.data.Dataset.from_generator(
195
+ ... lambda: elements, tf.int32, output_shapes=[None])
196
+
197
+ >>> dataset = dataset.apply(
198
+ ... tf.data.experimental.bucket_by_sequence_length(
199
+ ... element_length_func=lambda elem: tf.shape(elem)[0],
200
+ ... bucket_boundaries=[4, 7],
201
+ ... bucket_batch_sizes=[2, 2, 2],
202
+ ... pad_to_bucket_boundary=True,
203
+ ... padding_values=-1,
204
+ ... drop_remainder=True))
205
+
206
+ >>> for elem in dataset.as_numpy_iterator():
207
+ ... print(elem)
208
+ [[ 0 -1 -1]
209
+ [ 5 6 7]]
210
+ [[ 1 2 3 4 -1 -1]
211
+ [ 7 8 9 10 11 -1]]
212
+
213
+ Args:
214
+ element_length_func: function from element in `Dataset` to `tf.int32`,
215
+ determines the length of the element, which will determine the bucket it
216
+ goes into.
217
+ bucket_boundaries: `list<int>`, upper length boundaries of the buckets.
218
+ bucket_batch_sizes: `list<int>`, batch size per bucket. Length should be
219
+ `len(bucket_boundaries) + 1`.
220
+ padded_shapes: Nested structure of `tf.TensorShape` to pass to
221
+ `tf.data.Dataset.padded_batch`. If not provided, will use
222
+ `dataset.output_shapes`, which will result in variable length dimensions
223
+ being padded out to the maximum length in each batch.
224
+ padding_values: Values to pad with, passed to
225
+ `tf.data.Dataset.padded_batch`. Defaults to padding with 0.
226
+ pad_to_bucket_boundary: bool, if `False`, will pad dimensions with unknown
227
+ size to maximum length in batch. If `True`, will pad dimensions with
228
+ unknown size to bucket boundary minus 1 (i.e., the maximum length in each
229
+ bucket), and caller must ensure that the source `Dataset` does not contain
230
+ any elements with length longer than `max(bucket_boundaries)`.
231
+ no_padding: `bool`, indicates whether to pad the batch features (features
232
+ need to be either of type `tf.sparse.SparseTensor` or of same shape).
233
+ drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
234
+ whether the last batch should be dropped in the case it has fewer than
235
+ `batch_size` elements; the default behavior is not to drop the smaller
236
+ batch.
237
+
238
+ Returns:
239
+ A `Dataset` transformation function, which can be passed to
240
+ `tf.data.Dataset.apply`.
241
+
242
+ Raises:
243
+ ValueError: if `len(bucket_batch_sizes) != len(bucket_boundaries) + 1`.
244
+ """
245
+
246
+ def _apply_fn(dataset):
247
+ return dataset.bucket_by_sequence_length(
248
+ element_length_func=element_length_func,
249
+ bucket_boundaries=bucket_boundaries,
250
+ bucket_batch_sizes=bucket_batch_sizes,
251
+ padded_shapes=padded_shapes,
252
+ padding_values=padding_values,
253
+ pad_to_bucket_boundary=pad_to_bucket_boundary,
254
+ no_padding=no_padding,
255
+ drop_remainder=drop_remainder)
256
+
257
+ return _apply_fn
258
+
259
+
260
+ class _GroupByReducerDataset(dataset_ops.UnaryDataset):
261
+ """A `Dataset` that groups its input and performs a reduction."""
262
+
263
+ def __init__(self, input_dataset, key_func, reducer):
264
+ """See `group_by_reducer()` for details."""
265
+ self._input_dataset = input_dataset
266
+ self._make_key_func(key_func, input_dataset)
267
+ self._make_init_func(reducer.init_func)
268
+ self._make_reduce_func(reducer.reduce_func, input_dataset)
269
+ self._make_finalize_func(reducer.finalize_func)
270
+ variant_tensor = ged_ops.experimental_group_by_reducer_dataset(
271
+ self._input_dataset._variant_tensor, # pylint: disable=protected-access
272
+ self._key_func.function.captured_inputs,
273
+ self._init_func.function.captured_inputs,
274
+ self._reduce_func.function.captured_inputs,
275
+ self._finalize_func.function.captured_inputs,
276
+ key_func=self._key_func.function,
277
+ init_func=self._init_func.function,
278
+ reduce_func=self._reduce_func.function,
279
+ finalize_func=self._finalize_func.function,
280
+ **self._flat_structure)
281
+ super(_GroupByReducerDataset, self).__init__(input_dataset, variant_tensor)
282
+
283
+ def _make_key_func(self, key_func, input_dataset):
284
+ """Make wrapping defun for key_func."""
285
+ self._key_func = structured_function.StructuredFunctionWrapper(
286
+ key_func, self._transformation_name(), dataset=input_dataset)
287
+ if not self._key_func.output_structure.is_compatible_with(
288
+ tensor_spec.TensorSpec([], dtypes.int64)):
289
+ raise ValueError(
290
+ f"Invalid `key_func`. Expected `key_func` to return a scalar "
291
+ f"tf.int64 tensor, but instead `key_func` has output "
292
+ f"types={self._key_func.output_types} "
293
+ f"and shapes={self._key_func.output_shapes}."
294
+ )
295
+
296
+ def _make_init_func(self, init_func):
297
+ """Make wrapping defun for init_func."""
298
+ self._init_func = structured_function.StructuredFunctionWrapper(
299
+ init_func,
300
+ self._transformation_name(),
301
+ input_structure=tensor_spec.TensorSpec([], dtypes.int64))
302
+
303
+ def _make_reduce_func(self, reduce_func, input_dataset):
304
+ """Make wrapping defun for reduce_func."""
305
+
306
+ # Iteratively rerun the reduce function until reaching a fixed point on
307
+ # `self._state_structure`.
308
+ self._state_structure = self._init_func.output_structure
309
+ state_types = self._init_func.output_types
310
+ state_shapes = self._init_func.output_shapes
311
+ state_classes = self._init_func.output_classes
312
+ need_to_rerun = True
313
+ while need_to_rerun:
314
+
315
+ wrapped_func = structured_function.StructuredFunctionWrapper(
316
+ reduce_func,
317
+ self._transformation_name(),
318
+ input_structure=(self._state_structure, input_dataset.element_spec),
319
+ add_to_graph=False)
320
+
321
+ # Extract and validate class information from the returned values.
322
+ for new_state_class, state_class in zip(
323
+ nest.flatten(wrapped_func.output_classes),
324
+ nest.flatten(state_classes)):
325
+ if not issubclass(new_state_class, state_class):
326
+ raise TypeError(
327
+ f"Invalid `reducer`. The output class of the "
328
+ f"`reducer.reduce_func` {wrapped_func.output_classes}, "
329
+ f"does not match the class of the reduce state "
330
+ f"{self._state_classes}.")
331
+
332
+ # Extract and validate type information from the returned values.
333
+ for new_state_type, state_type in zip(
334
+ nest.flatten(wrapped_func.output_types), nest.flatten(state_types)):
335
+ if new_state_type != state_type:
336
+ raise TypeError(
337
+ f"Invalid `reducer`. The element types for the new state "
338
+ f"{wrapped_func.output_types} do not match the element types "
339
+ f"of the old state {self._init_func.output_types}."
340
+ )
341
+
342
+ # Extract shape information from the returned values.
343
+ flat_state_shapes = nest.flatten(state_shapes)
344
+ flat_new_state_shapes = nest.flatten(wrapped_func.output_shapes)
345
+ weakened_state_shapes = [
346
+ original.most_specific_compatible_shape(new)
347
+ for original, new in zip(flat_state_shapes, flat_new_state_shapes)
348
+ ]
349
+
350
+ need_to_rerun = False
351
+ for original_shape, weakened_shape in zip(flat_state_shapes,
352
+ weakened_state_shapes):
353
+ if original_shape.ndims is not None and (
354
+ weakened_shape.ndims is None or
355
+ original_shape.as_list() != weakened_shape.as_list()):
356
+ need_to_rerun = True
357
+ break
358
+
359
+ if need_to_rerun:
360
+ state_shapes = nest.pack_sequence_as(
361
+ self._init_func.output_shapes, weakened_state_shapes)
362
+ self._state_structure = structure.convert_legacy_structure(
363
+ state_types, state_shapes, state_classes)
364
+
365
+ self._reduce_func = wrapped_func
366
+ self._reduce_func.function.add_to_graph(ops.get_default_graph())
367
+
368
+ def _make_finalize_func(self, finalize_func):
369
+ """Make wrapping defun for finalize_func."""
370
+ self._finalize_func = structured_function.StructuredFunctionWrapper(
371
+ finalize_func,
372
+ self._transformation_name(),
373
+ input_structure=self._state_structure)
374
+
375
+ @property
376
+ def element_spec(self):
377
+ return self._finalize_func.output_structure
378
+
379
+ def _functions(self):
380
+ return [
381
+ self._key_func, self._init_func, self._reduce_func, self._finalize_func
382
+ ]
383
+
384
+ def _transformation_name(self):
385
+ return "tf.data.experimental.group_by_reducer()"
386
+
387
+
388
+ @tf_export("data.experimental.Reducer")
389
+ class Reducer:
390
+ """A reducer is used for reducing a set of elements.
391
+
392
+ A reducer is represented as a tuple of the three functions:
393
+ - init_func - to define initial value: key => initial state
394
+ - reducer_func - operation to perform on values with same key: (old state, input) => new state
395
+ - finalize_func - value to return in the end: state => result
396
+
397
+ For example,
398
+
399
+ ```
400
+ def init_func(_):
401
+ return (0.0, 0.0)
402
+
403
+ def reduce_func(state, value):
404
+ return (state[0] + value['features'], state[1] + 1)
405
+
406
+ def finalize_func(s, n):
407
+ return s / n
408
+
409
+ reducer = tf.data.experimental.Reducer(init_func, reduce_func, finalize_func)
410
+ ```
411
+ """
412
+
413
+ def __init__(self, init_func, reduce_func, finalize_func):
414
+ self._init_func = init_func
415
+ self._reduce_func = reduce_func
416
+ self._finalize_func = finalize_func
417
+
418
+ @property
419
+ def init_func(self):
420
+ return self._init_func
421
+
422
+ @property
423
+ def reduce_func(self):
424
+ return self._reduce_func
425
+
426
+ @property
427
+ def finalize_func(self):
428
+ return self._finalize_func
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/lookup_ops.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ #==============================================================================
15
+ """Lookup operations."""
16
+
17
+ from tensorflow.python.data.experimental.ops.cardinality import assert_cardinality
18
+ from tensorflow.python.framework import dtypes
19
+ from tensorflow.python.framework import ops
20
+ from tensorflow.python.framework import tensor
21
+ from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
22
+ from tensorflow.python.ops import lookup_ops
23
+ from tensorflow.python.ops import math_ops
24
+ from tensorflow.python.util.tf_export import tf_export
25
+
26
+
27
+ def _check_table_initializer_element_spec(element_spec):
28
+ """Raises an error if the given table initializer element spec is invalid."""
29
+ base_error = ("Datasets used to initialize lookup tables must "
30
+ "produce elements in the form (key, value), where "
31
+ "the keys and values are scalar tensors. ")
32
+ specific_error = None
33
+ if len(element_spec) != 2:
34
+ raise ValueError(base_error + "However, the given dataset produces "
35
+ f"{len(element_spec)} components instead of two "
36
+ "(key, value) components. Full dataset element spec: "
37
+ f"{element_spec}.")
38
+ if not isinstance(element_spec[0], tensor.TensorSpec):
39
+ raise ValueError(base_error + "However, the given dataset produces "
40
+ f"non-Tensor keys of type {type(element_spec[0])}.")
41
+ if not isinstance(element_spec[1], tensor.TensorSpec):
42
+ raise ValueError(base_error + "However, the given dataset produces "
43
+ f"non-Tensor values of type {type(element_spec[1])}.")
44
+ if element_spec[0].shape.rank not in (None, 0):
45
+ raise ValueError(
46
+ base_error + "However, the given dataset produces "
47
+ f"non-scalar key Tensors of rank {element_spec[0].shape.rank}.")
48
+ if element_spec[1].shape.rank not in (None, 0):
49
+ raise ValueError(
50
+ base_error + "However, the given dataset produces "
51
+ f"non-scalar value Tensors of rank {element_spec[1].shape.rank}.")
52
+
53
+
54
+ @tf_export("data.experimental.DatasetInitializer")
55
+ class DatasetInitializer(lookup_ops.TableInitializerBase):
56
+ """Creates a table initializer from a `tf.data.Dataset`.
57
+
58
+ Sample usage:
59
+
60
+ >>> keys = tf.data.Dataset.range(100)
61
+ >>> values = tf.data.Dataset.range(100).map(
62
+ ... lambda x: tf.strings.as_string(x * 2))
63
+ >>> ds = tf.data.Dataset.zip((keys, values))
64
+ >>> init = tf.data.experimental.DatasetInitializer(ds)
65
+ >>> table = tf.lookup.StaticHashTable(init, "")
66
+ >>> table.lookup(tf.constant([0, 1, 2], dtype=tf.int64)).numpy()
67
+ array([b'0', b'2', b'4'], dtype=object)
68
+
69
+ Attributes:
70
+ dataset: A `tf.data.Dataset` object that produces tuples of scalars. The
71
+ first scalar is treated as a key and the second as value.
72
+ Raises: ValueError if `dataset` doesn't conform to specifications.
73
+ """
74
+
75
+ def __init__(self, dataset):
76
+ """Creates a table initializer from a `tf.data.Dataset`.
77
+
78
+ Args:
79
+ dataset: A `tf.data.Dataset` object that produces tuples of scalars. The
80
+ first scalar is treated as a key and the second as value.
81
+ Raises: ValueError if `dataset` doesn't conform to specifications.
82
+ Returns: A `DatasetInitializer` object
83
+ """
84
+ # Assert that the dataset element spec is a tuple of TensorSpecs where
85
+ # each tensor is a scalar.
86
+ self.dataset = dataset
87
+ elem_spec = self.dataset.element_spec
88
+ _check_table_initializer_element_spec(elem_spec)
89
+
90
+ key_type = elem_spec[0].dtype
91
+ value_type = elem_spec[1].dtype
92
+ super(DatasetInitializer, self).__init__(key_type, value_type)
93
+
94
+ def initialize(self, table):
95
+ lookup_ops.check_table_dtypes(table, self._key_dtype, self._value_dtype)
96
+ init_op = ged_ops.initialize_table_from_dataset(
97
+ table.resource_handle, self.dataset._variant_tensor) # pylint: disable=protected-access
98
+ ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
99
+ return init_op
100
+
101
+
102
+ @tf_export("data.experimental.table_from_dataset")
103
+ def table_from_dataset(dataset=None,
104
+ num_oov_buckets=0,
105
+ vocab_size=None,
106
+ default_value=None,
107
+ hasher_spec=lookup_ops.FastHashSpec,
108
+ key_dtype=dtypes.string,
109
+ name=None):
110
+ """Returns a lookup table based on the given dataset.
111
+
112
+ This operation constructs a lookup table based on the given dataset of pairs
113
+ of (key, value).
114
+
115
+ Any lookup of an out-of-vocabulary token will return a bucket ID based on its
116
+ hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
117
+ `default_value`.
118
+ The bucket ID range is
119
+ `[vocabulary size, vocabulary size + num_oov_buckets - 1]`.
120
+
121
+ Sample Usages:
122
+
123
+ >>> keys = tf.data.Dataset.range(100)
124
+ >>> values = tf.data.Dataset.range(100).map(
125
+ ... lambda x: tf.strings.as_string(x * 2))
126
+ >>> ds = tf.data.Dataset.zip((keys, values))
127
+ >>> table = tf.data.experimental.table_from_dataset(
128
+ ... ds, default_value='n/a', key_dtype=tf.int64)
129
+ >>> table.lookup(tf.constant([0, 1, 2], dtype=tf.int64)).numpy()
130
+ array([b'0', b'2', b'4'], dtype=object)
131
+
132
+ Args:
133
+ dataset: A dataset containing (key, value) pairs.
134
+ num_oov_buckets: The number of out-of-vocabulary buckets.
135
+ vocab_size: Number of the elements in the vocabulary, if known.
136
+ default_value: The value to use for out-of-vocabulary feature values.
137
+ Defaults to -1.
138
+ hasher_spec: A `HasherSpec` to specify the hash function to use for
139
+ assignation of out-of-vocabulary buckets.
140
+ key_dtype: The `key` data type.
141
+ name: A name for this op (optional).
142
+
143
+ Returns:
144
+ The lookup table based on the given dataset.
145
+
146
+ Raises:
147
+ ValueError: If
148
+ * `dataset` does not contain pairs
149
+ * The 2nd item in the `dataset` pairs has a dtype which is incompatible
150
+ with `default_value`
151
+ * `num_oov_buckets` is negative
152
+ * `vocab_size` is not greater than zero
153
+ * The `key_dtype` is not integer or string
154
+ """
155
+ elem_spec = dataset.element_spec
156
+ _check_table_initializer_element_spec(elem_spec)
157
+ if default_value is None:
158
+ default_value = -1
159
+ if not (elem_spec[1].dtype.is_integer or elem_spec[1].dtype.is_floating):
160
+ raise ValueError("`default_value` must be specified when creating a "
161
+ "table from a dataset that produces values of type "
162
+ f"{elem_spec[1].dtype}.")
163
+ if num_oov_buckets < 0:
164
+ raise ValueError("`num_oov_buckets` must be greater than or equal to 0, "
165
+ f"got {num_oov_buckets}.")
166
+ if (not isinstance(vocab_size, tensor.Tensor) and vocab_size is not None and
167
+ vocab_size < 1):
168
+ raise ValueError(f"`vocab_size` must be greater than 0, got {vocab_size}.")
169
+ if (not key_dtype.is_integer) and (dtypes.string != key_dtype.base_dtype):
170
+ raise TypeError("`key_dtype` must be either an integer or string type, "
171
+ f"but got {key_dtype}")
172
+ if vocab_size is not None:
173
+ if isinstance(vocab_size, tensor.Tensor):
174
+ vocab_size = math_ops.cast(vocab_size, dtypes.int64)
175
+ dataset = dataset.take(vocab_size)
176
+ dataset = dataset.apply(assert_cardinality(vocab_size))
177
+ with ops.name_scope(name, "string_to_index"):
178
+ initializer = DatasetInitializer(dataset)
179
+ with ops.name_scope(None, "hash_table"):
180
+ table = lookup_ops.StaticHashTableV1(initializer, default_value)
181
+ if num_oov_buckets:
182
+ table = lookup_ops.IdTableWithHashBuckets(
183
+ table,
184
+ num_oov_buckets=num_oov_buckets,
185
+ hasher_spec=hasher_spec,
186
+ key_dtype=key_dtype)
187
+ return table
188
+
189
+
190
+ @tf_export("data.experimental.index_table_from_dataset")
191
+ def index_table_from_dataset(dataset=None,
192
+ num_oov_buckets=0,
193
+ vocab_size=None,
194
+ default_value=-1,
195
+ hasher_spec=lookup_ops.FastHashSpec,
196
+ key_dtype=dtypes.string,
197
+ name=None):
198
+ """Returns an index lookup table based on the given dataset.
199
+
200
+ This operation constructs a lookup table based on the given dataset of keys.
201
+
202
+ Any lookup of an out-of-vocabulary token will return a bucket ID based on its
203
+ hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
204
+ `default_value`.
205
+ The bucket ID range is
206
+ `[vocabulary size, vocabulary size + num_oov_buckets - 1]`.
207
+
208
+ Sample Usages:
209
+
210
+ >>> ds = tf.data.Dataset.range(100).map(lambda x: tf.strings.as_string(x * 2))
211
+ >>> table = tf.data.experimental.index_table_from_dataset(
212
+ ... ds, key_dtype=dtypes.int64)
213
+ >>> table.lookup(tf.constant(['0', '2', '4'], dtype=tf.string)).numpy()
214
+ array([0, 1, 2])
215
+
216
+ Args:
217
+ dataset: A dataset of keys.
218
+ num_oov_buckets: The number of out-of-vocabulary buckets.
219
+ vocab_size: Number of the elements in the vocabulary, if known.
220
+ default_value: The value to use for out-of-vocabulary feature values.
221
+ Defaults to -1.
222
+ hasher_spec: A `HasherSpec` to specify the hash function to use for
223
+ assignation of out-of-vocabulary buckets.
224
+ key_dtype: The `key` data type.
225
+ name: A name for this op (optional).
226
+
227
+ Returns:
228
+ The lookup table based on the given dataset.
229
+
230
+ Raises:
231
+ ValueError: If
232
+ * `num_oov_buckets` is negative
233
+ * `vocab_size` is not greater than zero
234
+ * The `key_dtype` is not integer or string
235
+ """
236
+ return table_from_dataset(dataset.enumerate().map(lambda v, k: (k, v)),
237
+ num_oov_buckets, vocab_size, default_value,
238
+ hasher_spec, key_dtype, name)
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/map_defun.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Experimental API for optimizing `tf.data` pipelines."""
16
+
17
+ from tensorflow.python.framework import ops
18
+ from tensorflow.python.framework import tensor_shape
19
+ from tensorflow.python.ops import gen_dataset_ops
20
+
21
+
22
+ def map_defun(fn,
23
+ elems,
24
+ output_dtypes,
25
+ output_shapes,
26
+ max_intra_op_parallelism=1):
27
+ """Map a function on the list of tensors unpacked from `elems` on dimension 0.
28
+
29
+ Args:
30
+ fn: A function (`function.defun`) that takes a list of tensors and returns
31
+ another list of tensors. The output list has the same types as
32
+ output_dtypes. The elements of the output list have the same dimension 0
33
+ as `elems`, and the remaining dimensions correspond to those of
34
+ `fn_output_shapes`.
35
+ elems: A list of tensors.
36
+ output_dtypes: A list of dtypes corresponding to the output types of the
37
+ function.
38
+ output_shapes: A list of `TensorShape`s corresponding to the output shapes
39
+ from each invocation of the function on slices of inputs.
40
+ max_intra_op_parallelism: An integer. If positive, sets the max parallelism
41
+ limit of each function call to this.
42
+
43
+ Raises:
44
+ ValueError: if any of the inputs are malformed.
45
+
46
+ Returns:
47
+ A list of `Tensor` objects with the same types as `output_dtypes`.
48
+ """
49
+ if not isinstance(elems, list):
50
+ raise ValueError(f"`elems` must be a list of tensors, but was {elems}.")
51
+ if not isinstance(output_dtypes, list):
52
+ raise ValueError("`output_dtypes` must be a list of `tf.DType` objects, "
53
+ f"but was {output_dtypes}.")
54
+ if not isinstance(output_shapes, list):
55
+ raise ValueError("`output_shapes` must be a list of `tf.TensorShape` "
56
+ f"objects, but was {output_shapes}.")
57
+
58
+ concrete_fn = fn.get_concrete_function() # pylint: disable=protected-access
59
+ # TODO(shivaniagrawal/rachelim): what about functions created without
60
+ # input_signature.
61
+ elems = [ops.convert_to_tensor(e) for e in elems]
62
+ output_shapes = [tensor_shape.TensorShape(s) for s in output_shapes]
63
+ return gen_dataset_ops.map_defun(elems, concrete_fn.captured_inputs,
64
+ output_dtypes, output_shapes, concrete_fn,
65
+ max_intra_op_parallelism)
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/matching_files.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Experimental API for matching input filenames."""
16
+
17
+ from tensorflow.python.data.ops import dataset_ops
18
+ from tensorflow.python.framework import dtypes
19
+ from tensorflow.python.framework import ops
20
+ from tensorflow.python.framework import tensor_spec
21
+ from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
22
+
23
+
24
+ class MatchingFilesDataset(dataset_ops.DatasetSource):
25
+ """A `Dataset` that list the files according to the input patterns."""
26
+
27
+ def __init__(self, patterns):
28
+ self._patterns = ops.convert_to_tensor(
29
+ patterns, dtype=dtypes.string, name="patterns")
30
+ variant_tensor = ged_ops.matching_files_dataset(self._patterns)
31
+ super(MatchingFilesDataset, self).__init__(variant_tensor)
32
+
33
+ @property
34
+ def element_spec(self):
35
+ return tensor_spec.TensorSpec([], dtypes.string)
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/scan_ops.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Scan dataset transformation."""
16
+ from tensorflow.python.util import deprecation
17
+ from tensorflow.python.util.tf_export import tf_export
18
+
19
+
20
+ @deprecation.deprecated(None, "Use `tf.data.Dataset.scan(...) instead")
21
+ @tf_export("data.experimental.scan")
22
+ def scan(initial_state, scan_func):
23
+ """A transformation that scans a function across an input dataset.
24
+
25
+ This transformation is a stateful relative of `tf.data.Dataset.map`.
26
+ In addition to mapping `scan_func` across the elements of the input dataset,
27
+ `scan()` accumulates one or more state tensors, whose initial values are
28
+ `initial_state`.
29
+
30
+ Args:
31
+ initial_state: A nested structure of tensors, representing the initial state
32
+ of the accumulator.
33
+ scan_func: A function that maps `(old_state, input_element)` to
34
+ `(new_state, output_element)`. It must take two arguments and return a
35
+ pair of nested structures of tensors. The `new_state` must match the
36
+ structure of `initial_state`.
37
+
38
+ Returns:
39
+ A `Dataset` transformation function, which can be passed to
40
+ `tf.data.Dataset.apply`.
41
+ """
42
+ def _apply_fn(dataset):
43
+ return dataset.scan(initial_state=initial_state, scan_func=scan_func)
44
+
45
+ return _apply_fn
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/snapshot.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Dataset snapshot and related functionality."""
16
+ from tensorflow.python.data.ops import dataset_ops
17
+ from tensorflow.python.framework import dtypes
18
+ from tensorflow.python.framework import ops
19
+ from tensorflow.python.framework import random_seed
20
+ from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
21
+ from tensorflow.python.util import deprecation
22
+ from tensorflow.python.util.tf_export import tf_export
23
+
24
+ COMPRESSION_GZIP = "GZIP"
25
+ COMPRESSION_SNAPPY = "SNAPPY"
26
+ COMPRESSION_NONE = None
27
+
28
+
29
+ class _LegacySnapshotDataset(dataset_ops.UnaryUnchangedStructureDataset):
30
+ """A Dataset that captures a snapshot or reads from a snapshot."""
31
+
32
+ def __init__(self,
33
+ input_dataset,
34
+ path,
35
+ compression=None,
36
+ reader_path_prefix=None,
37
+ writer_path_prefix=None,
38
+ shard_size_bytes=None,
39
+ pending_snapshot_expiry_seconds=None,
40
+ num_reader_threads=None,
41
+ reader_buffer_size=None,
42
+ num_writer_threads=None,
43
+ writer_buffer_size=None,
44
+ shuffle_on_read=None,
45
+ shuffle_seed=None,
46
+ mode=None,
47
+ snapshot_name=None):
48
+
49
+ self._compression = compression if compression is not None else ""
50
+ self._reader_path_prefix = (
51
+ reader_path_prefix if reader_path_prefix is not None else "")
52
+ self._writer_path_prefix = (
53
+ writer_path_prefix if writer_path_prefix is not None else "")
54
+ self._shard_size_bytes = (
55
+ shard_size_bytes if shard_size_bytes is not None else -1)
56
+ self._pending_snapshot_expiry_seconds = (
57
+ pending_snapshot_expiry_seconds
58
+ if pending_snapshot_expiry_seconds is not None else -1)
59
+ self._num_reader_threads = (
60
+ num_reader_threads if num_reader_threads is not None else -1)
61
+ self._reader_buffer_size = (
62
+ reader_buffer_size if reader_buffer_size is not None else -1)
63
+ self._num_writer_threads = (
64
+ num_writer_threads if num_writer_threads is not None else -1)
65
+ self._writer_buffer_size = (
66
+ writer_buffer_size if writer_buffer_size is not None else -1)
67
+ self._shuffle_on_read = (
68
+ shuffle_on_read if shuffle_on_read is not None else False)
69
+ self._mode = (mode if mode is not None else "auto")
70
+ self._snapshot_name = (snapshot_name if snapshot_name is not None else "")
71
+
72
+ self._seed, self._seed2 = random_seed.get_seed(shuffle_seed)
73
+
74
+ self._input_dataset = input_dataset
75
+ self._path = ops.convert_to_tensor(path, dtype=dtypes.string, name="path")
76
+
77
+ variant_tensor = ged_ops.snapshot_dataset(
78
+ self._input_dataset._variant_tensor, # pylint: disable=protected-access
79
+ path=self._path,
80
+ compression=self._compression,
81
+ reader_path_prefix=self._reader_path_prefix,
82
+ writer_path_prefix=self._writer_path_prefix,
83
+ shard_size_bytes=self._shard_size_bytes,
84
+ pending_snapshot_expiry_seconds=self._pending_snapshot_expiry_seconds,
85
+ num_reader_threads=self._num_reader_threads,
86
+ reader_buffer_size=self._reader_buffer_size,
87
+ num_writer_threads=self._num_writer_threads,
88
+ writer_buffer_size=self._writer_buffer_size,
89
+ shuffle_on_read=self._shuffle_on_read,
90
+ seed=self._seed,
91
+ seed2=self._seed2,
92
+ mode=self._mode,
93
+ snapshot_name=self._snapshot_name,
94
+ **self._flat_structure)
95
+
96
+ super(_LegacySnapshotDataset, self).__init__(input_dataset, variant_tensor)
97
+
98
+
99
+ @deprecation.deprecated(None, "Use `tf.data.Dataset.shapshot(...)` instead.")
100
+ def legacy_snapshot(path,
101
+ compression=None,
102
+ reader_path_prefix=None,
103
+ writer_path_prefix=None,
104
+ shard_size_bytes=None,
105
+ pending_snapshot_expiry_seconds=None,
106
+ num_reader_threads=None,
107
+ reader_buffer_size=None,
108
+ num_writer_threads=None,
109
+ writer_buffer_size=None,
110
+ shuffle_on_read=None,
111
+ shuffle_seed=None,
112
+ mode=None,
113
+ snapshot_name=None):
114
+ """Writes to/reads from a snapshot of a dataset.
115
+
116
+ This function attempts to determine whether a valid snapshot exists at the
117
+ `path`, and reads from the snapshot if so. If not, it will run the
118
+ preprocessing pipeline as usual, and write out a snapshot of the data
119
+ processed for future use.
120
+
121
+ Args:
122
+ path: A directory where we want to save our snapshots and/or read from a
123
+ previously saved snapshot.
124
+ compression: The type of compression to apply to the Dataset. Currently
125
+ supports "GZIP" or None. Defaults to None (no compression).
126
+ reader_path_prefix: A prefix to add to the path when reading from snapshots.
127
+ Defaults to None.
128
+ writer_path_prefix: A prefix to add to the path when writing to snapshots.
129
+ Defaults to None.
130
+ shard_size_bytes: The size of each shard to be written by the snapshot
131
+ dataset op. Defaults to 10 GiB.
132
+ pending_snapshot_expiry_seconds: How long to wait (in seconds) before the
133
+ snapshot op considers a previously unfinished snapshot to be stale.
134
+ num_reader_threads: Number of threads to parallelize reading from snapshot.
135
+ Especially useful if compression is turned on since the decompression
136
+ operation tends to be intensive. Defaults to 1. If > 1, then this might
137
+ introduce non-determinism i.e. the order in which the elements are read
138
+ from the snapshot are different from the order they're written.
139
+ reader_buffer_size: Maximum number of elements we can prefetch reading from
140
+ the snapshot. Defaults to 1. Increasing this might improve performance but
141
+ will increase memory consumption.
142
+ num_writer_threads: Number of threads to parallelize writing from snapshot.
143
+ We'll open up `num_writer_threads` files and write to them in parallel.
144
+ Especially useful if compression is turned on since the compression
145
+ operation tends to be intensive. Defaults to 1. If > 1, then this might
146
+ introduce non-determinism i.e. the order in which the elements are read
147
+ from the upstream iterator are different from the order they're written.
148
+ writer_buffer_size: Maximum number of pipeline elements to fill up the
149
+ buffer before writing them out using `num_writer_threads`.
150
+ shuffle_on_read: If this is True, then the order in which examples are
151
+ produced when reading from a snapshot will be random. Defaults to False.
152
+ shuffle_seed: Optional. If shuffle_seed is set, the random number generator
153
+ used for shuffling (when shuffle_on_read is turned on) is seeded by the
154
+ given seed. Otherwise, it is seeded by a random seed that differs for
155
+ every run.
156
+ mode: The mode at which snapshot should operate. Valid options are "auto",
157
+ "read", "write", and "passthrough". The default mode is "auto", where the
158
+ snapshot op will automatically determine what mode to operate in.
159
+ snapshot_name: If set, use the supplied string as a named snapshot name
160
+ instead of introspecting the data pipeline and automatically generating a
161
+ unique identifier for the snapshot.
162
+
163
+ Returns:
164
+ A `Dataset` transformation function, which can be passed to
165
+ `tf.data.Dataset.apply`.
166
+ """
167
+
168
+ def _apply_fn(dataset):
169
+ return _LegacySnapshotDataset(
170
+ input_dataset=dataset,
171
+ path=path,
172
+ compression=compression,
173
+ reader_path_prefix=reader_path_prefix,
174
+ writer_path_prefix=writer_path_prefix,
175
+ shard_size_bytes=shard_size_bytes,
176
+ pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds,
177
+ num_reader_threads=num_reader_threads,
178
+ reader_buffer_size=reader_buffer_size,
179
+ num_writer_threads=num_writer_threads,
180
+ writer_buffer_size=writer_buffer_size,
181
+ shuffle_on_read=shuffle_on_read,
182
+ shuffle_seed=shuffle_seed,
183
+ mode=mode,
184
+ snapshot_name=snapshot_name)
185
+
186
+ return _apply_fn
187
+
188
+
189
+ @deprecation.deprecated(None, "Use `tf.data.Dataset.snapshot(...)`.")
190
+ @tf_export("data.experimental.snapshot")
191
+ def snapshot(path, compression="AUTO", reader_func=None, shard_func=None):
192
+ """API to persist the output of the input dataset.
193
+
194
+ The snapshot API allows users to transparently persist the output of their
195
+ preprocessing pipeline to disk, and materialize the pre-processed data on a
196
+ different training run.
197
+
198
+ This API enables repeated preprocessing steps to be consolidated, and allows
199
+ re-use of already processed data, trading off disk storage and network
200
+ bandwidth for freeing up more valuable CPU resources and accelerator compute
201
+ time.
202
+
203
+ https://github.com/tensorflow/community/blob/master/rfcs/20200107-tf-data-snapshot.md
204
+ has detailed design documentation of this feature.
205
+
206
+ Users can specify various options to control the behavior of snapshot,
207
+ including how snapshots are read from and written to by passing in
208
+ user-defined functions to the `reader_func` and `shard_func` parameters.
209
+
210
+ `shard_func` is a user specified function that maps input elements to snapshot
211
+ shards.
212
+
213
+ Users may want to specify this function to control how snapshot files should
214
+ be written to disk. Below is an example of how a potential shard_func could
215
+ be written.
216
+
217
+ ```python
218
+ dataset = ...
219
+ dataset = dataset.enumerate()
220
+ dataset = dataset.apply(tf.data.Dataset.shapshot("/path/to/snapshot/dir",
221
+ shard_func=lambda x, y: x % NUM_SHARDS, ...))
222
+ dataset = dataset.map(lambda x, y: y)
223
+ ```
224
+
225
+ `reader_func` is a user specified function that accepts a single argument:
226
+ (1) a Dataset of Datasets, each representing a "split" of elements of the
227
+ original dataset. The cardinality of the input dataset matches the
228
+ number of the shards specified in the `shard_func` (see above). The function
229
+ should return a Dataset of elements of the original dataset.
230
+
231
+ Users may want specify this function to control how snapshot files should be
232
+ read from disk, including the amount of shuffling and parallelism.
233
+
234
+ Here is an example of a standard reader function a user can define. This
235
+ function enables both dataset shuffling and parallel reading of datasets:
236
+
237
+ ```python
238
+ def user_reader_func(datasets):
239
+ # shuffle the datasets splits
240
+ datasets = datasets.shuffle(NUM_CORES)
241
+ # read datasets in parallel and interleave their elements
242
+ return datasets.interleave(lambda x: x, num_parallel_calls=AUTOTUNE)
243
+
244
+ dataset = dataset.apply(tf.data.Dataset.shapshot("/path/to/snapshot/dir",
245
+ reader_func=user_reader_func))
246
+ ```
247
+
248
+ By default, snapshot parallelizes reads by the number of cores available on
249
+ the system, but will not attempt to shuffle the data.
250
+
251
+ Args:
252
+ path: Required. A directory to use for storing / loading the snapshot to /
253
+ from.
254
+ compression: Optional. The type of compression to apply to the snapshot
255
+ written to disk. Supported options are `GZIP`, `SNAPPY`, `AUTO` or None.
256
+ Defaults to AUTO, which attempts to pick an appropriate compression
257
+ algorithm for the dataset.
258
+ reader_func: Optional. A function to control how to read data from snapshot
259
+ shards.
260
+ shard_func: Optional. A function to control how to shard data when writing a
261
+ snapshot.
262
+
263
+ Returns:
264
+ A `Dataset` transformation function, which can be passed to
265
+ `tf.data.Dataset.apply`.
266
+ """
267
+
268
+ def _apply_fn(dataset):
269
+ """Actual dataset transformation."""
270
+ return dataset.snapshot(
271
+ path=path,
272
+ compression=compression,
273
+ reader_func=reader_func,
274
+ shard_func=shard_func)
275
+
276
+ return _apply_fn
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/unique.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Unique element dataset transformations."""
16
+ from tensorflow.python.util import deprecation
17
+ from tensorflow.python.util.tf_export import tf_export
18
+
19
+
20
+ @deprecation.deprecated(None, "Use `tf.data.Dataset.unique(...)")
21
+ @tf_export("data.experimental.unique")
22
+ def unique():
23
+ """Creates a `Dataset` from another `Dataset`, discarding duplicates.
24
+
25
+ Use this transformation to produce a dataset that contains one instance of
26
+ each unique element in the input. For example:
27
+
28
+ ```python
29
+ dataset = tf.data.Dataset.from_tensor_slices([1, 37, 2, 37, 2, 1])
30
+
31
+ # Using `unique()` will drop the duplicate elements.
32
+ dataset = dataset.apply(tf.data.experimental.unique()) # ==> { 1, 37, 2 }
33
+ ```
34
+
35
+ Returns:
36
+ A `Dataset` transformation function, which can be passed to
37
+ `tf.data.Dataset.apply`.
38
+ """
39
+
40
+ def _apply_fn(dataset):
41
+ return dataset.unique()
42
+
43
+ return _apply_fn
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/ops/writers.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Python wrappers for tf.data writers."""
16
+ from tensorflow.python.data.ops import dataset_ops
17
+ from tensorflow.python.data.util import convert
18
+ from tensorflow.python.framework import dtypes
19
+ from tensorflow.python.framework import ops
20
+ from tensorflow.python.framework import tensor_spec
21
+ from tensorflow.python.ops import gen_experimental_dataset_ops
22
+ from tensorflow.python.types import data as data_types
23
+ from tensorflow.python.util import deprecation
24
+ from tensorflow.python.util.tf_export import tf_export
25
+
26
+
27
+ @tf_export("data.experimental.TFRecordWriter")
28
+ @deprecation.deprecated(
29
+ None, "To write TFRecords to disk, use `tf.io.TFRecordWriter`. To save "
30
+ "and load the contents of a dataset, use `tf.data.experimental.save` "
31
+ "and `tf.data.experimental.load`")
32
+ class TFRecordWriter:
33
+ """Writes a dataset to a TFRecord file.
34
+
35
+ The elements of the dataset must be scalar strings. To serialize dataset
36
+ elements as strings, you can use the `tf.io.serialize_tensor` function.
37
+
38
+ ```python
39
+ dataset = tf.data.Dataset.range(3)
40
+ dataset = dataset.map(tf.io.serialize_tensor)
41
+ writer = tf.data.experimental.TFRecordWriter("/path/to/file.tfrecord")
42
+ writer.write(dataset)
43
+ ```
44
+
45
+ To read back the elements, use `TFRecordDataset`.
46
+
47
+ ```python
48
+ dataset = tf.data.TFRecordDataset("/path/to/file.tfrecord")
49
+ dataset = dataset.map(lambda x: tf.io.parse_tensor(x, tf.int64))
50
+ ```
51
+
52
+ To shard a `dataset` across multiple TFRecord files:
53
+
54
+ ```python
55
+ dataset = ... # dataset to be written
56
+
57
+ def reduce_func(key, dataset):
58
+ filename = tf.strings.join([PATH_PREFIX, tf.strings.as_string(key)])
59
+ writer = tf.data.experimental.TFRecordWriter(filename)
60
+ writer.write(dataset.map(lambda _, x: x))
61
+ return tf.data.Dataset.from_tensors(filename)
62
+
63
+ dataset = dataset.enumerate()
64
+ dataset = dataset.apply(tf.data.experimental.group_by_window(
65
+ lambda i, _: i % NUM_SHARDS, reduce_func, tf.int64.max
66
+ ))
67
+
68
+ # Iterate through the dataset to trigger data writing.
69
+ for _ in dataset:
70
+ pass
71
+ ```
72
+ """
73
+
74
+ def __init__(self, filename, compression_type=None):
75
+ """Initializes a `TFRecordWriter`.
76
+
77
+ Args:
78
+ filename: a string path indicating where to write the TFRecord data.
79
+ compression_type: (Optional.) a string indicating what type of compression
80
+ to use when writing the file. See `tf.io.TFRecordCompressionType` for
81
+ what types of compression are available. Defaults to `None`.
82
+ """
83
+ self._filename = ops.convert_to_tensor(
84
+ filename, dtypes.string, name="filename")
85
+ self._compression_type = convert.optional_param_to_tensor(
86
+ "compression_type",
87
+ compression_type,
88
+ argument_default="",
89
+ argument_dtype=dtypes.string)
90
+
91
+ def write(self, dataset):
92
+ """Writes a dataset to a TFRecord file.
93
+
94
+ An operation that writes the content of the specified dataset to the file
95
+ specified in the constructor.
96
+
97
+ If the file exists, it will be overwritten.
98
+
99
+ Args:
100
+ dataset: a `tf.data.Dataset` whose elements are to be written to a file
101
+
102
+ Returns:
103
+ In graph mode, this returns an operation which when executed performs the
104
+ write. In eager mode, the write is performed by the method itself and
105
+ there is no return value.
106
+
107
+ Raises
108
+ TypeError: if `dataset` is not a `tf.data.Dataset`.
109
+ TypeError: if the elements produced by the dataset are not scalar strings.
110
+ """
111
+ if not isinstance(dataset, data_types.DatasetV2):
112
+ raise TypeError(
113
+ f"Invalid `dataset.` Expected a `tf.data.Dataset` object but got "
114
+ f"{type(dataset)}."
115
+ )
116
+ if not dataset_ops.get_structure(dataset).is_compatible_with(
117
+ tensor_spec.TensorSpec([], dtypes.string)):
118
+ raise TypeError(
119
+ f"Invalid `dataset`. Expected a`dataset` that produces scalar "
120
+ f"`tf.string` elements, but got a dataset which produces elements "
121
+ f"with shapes {dataset_ops.get_legacy_output_shapes(dataset)} and "
122
+ f"types {dataset_ops.get_legacy_output_types(dataset)}.")
123
+ # pylint: disable=protected-access
124
+ dataset = dataset._apply_debug_options()
125
+ return gen_experimental_dataset_ops.dataset_to_tf_record(
126
+ dataset._variant_tensor, self._filename, self._compression_type)
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/__init__.py ADDED
@@ -0,0 +1,426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """API for using the tf.data service.
16
+
17
+ This module contains:
18
+
19
+ 1. tf.data server implementations for running the tf.data service.
20
+ 2. APIs for registering datasets with the tf.data service and reading from
21
+ the registered datasets.
22
+
23
+ The tf.data service provides the following benefits:
24
+
25
+ - Horizontal scaling of tf.data input pipeline processing to solve input
26
+ bottlenecks.
27
+ - Data coordination for distributed training. Coordinated reads
28
+ enable all replicas to train on similar-length examples across each global
29
+ training step, improving step times in synchronous training.
30
+ - Dynamic balancing of data across training replicas.
31
+
32
+ >>> dispatcher = tf.data.experimental.service.DispatchServer()
33
+ >>> dispatcher_address = dispatcher.target.split("://")[1]
34
+ >>> worker = tf.data.experimental.service.WorkerServer(
35
+ ... tf.data.experimental.service.WorkerConfig(
36
+ ... dispatcher_address=dispatcher_address))
37
+ >>> dataset = tf.data.Dataset.range(10)
38
+ >>> dataset = dataset.apply(tf.data.experimental.service.distribute(
39
+ ... processing_mode=tf.data.experimental.service.ShardingPolicy.OFF,
40
+ ... service=dispatcher.target))
41
+ >>> print(list(dataset.as_numpy_iterator()))
42
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
43
+
44
+ ## Setup
45
+
46
+ This section goes over how to set up the tf.data service.
47
+
48
+ ### Run tf.data servers
49
+
50
+ The tf.data service consists of one dispatch server and `n` worker servers.
51
+ tf.data servers should be brought up alongside your training jobs, then brought
52
+ down when the jobs are finished.
53
+ Use `tf.data.experimental.service.DispatchServer` to start a dispatch server,
54
+ and `tf.data.experimental.service.WorkerServer` to start worker servers. Servers
55
+ can be run in the same process for testing purposes, or scaled up on separate
56
+ machines.
57
+
58
+ See https://github.com/tensorflow/ecosystem/tree/master/data_service for an
59
+ example of using Google Kubernetes Engine (GKE) to manage the tf.data service.
60
+ Note that the server implementation in
61
+ [tf_std_data_server.py](https://github.com/tensorflow/ecosystem/blob/master/data_service/tf_std_data_server.py)
62
+ is not GKE-specific, and can be used to run the tf.data service in other
63
+ contexts.
64
+
65
+ ### Custom ops
66
+
67
+ If your dataset uses custom ops, these ops need to be made available to tf.data
68
+ servers by calling
69
+ [load_op_library](https://www.tensorflow.org/api_docs/python/tf/load_op_library)
70
+ from the dispatcher and worker processes at startup.
71
+
72
+ ## Usage
73
+
74
+ Users interact with tf.data service by programmatically registering their
75
+ datasets with tf.data service, then creating datasets that read from the
76
+ registered datasets. The
77
+ [register_dataset](https://www.tensorflow.org/api_docs/python/tf/data/experimental/service/register_dataset)
78
+ function registers a dataset, then the
79
+ [from_dataset_id](https://www.tensorflow.org/api_docs/python/tf/data/experimental/service/from_dataset_id)
80
+ function creates a new dataset which reads from the registered dataset.
81
+ The
82
+ [distribute](https://www.tensorflow.org/api_docs/python/tf/data/experimental/service/distribute)
83
+ function wraps `register_dataset` and `from_dataset_id` into a single convenient
84
+ transformation which registers its input dataset and then reads from it.
85
+ `distribute` enables tf.data service to be used with a one-line code change.
86
+ However, it assumes that the dataset is created and consumed by the same entity
87
+ and this assumption might not always be valid or desirable. In particular, in
88
+ certain scenarios, such as distributed training, it might be desirable to
89
+ decouple the creation and consumption of the dataset (via `register_dataset`
90
+ and `from_dataset_id` respectively) to avoid having to create the dataset on
91
+ each of the training workers.
92
+
93
+ ### Example
94
+
95
+ #### `distribute`
96
+
97
+ To use the `distribute` transformation, apply the transformation after the
98
+ prefix of your input pipeline that you would like to be executed using tf.data
99
+ service (typically at the end).
100
+
101
+ ```
102
+ dataset = ... # Define your dataset here.
103
+ # Move dataset processing from the local machine to the tf.data service
104
+ dataset = dataset.apply(
105
+ tf.data.experimental.service.distribute(
106
+ processing_mode=tf.data.experimental.service.ShardingPolicy.OFF,
107
+ service=FLAGS.tf_data_service_address,
108
+ job_name="shared_job"))
109
+ # Any transformations added after `distribute` will be run on the local machine.
110
+ dataset = dataset.prefetch(1)
111
+ ```
112
+
113
+ The above code will create a tf.data service "job", which iterates through the
114
+ dataset to generate data. To share the data from a job across multiple clients
115
+ (e.g. when using TPUStrategy or MultiWorkerMirroredStrategy), set a common
116
+ `job_name` across all clients.
117
+
118
+ #### `register_dataset` and `from_dataset_id`
119
+
120
+ `register_dataset` registers a dataset with the tf.data service, returning a
121
+ dataset id for the registered dataset. `from_dataset_id` creates a dataset that
122
+ reads from the registered dataset. These APIs can be used to reduce dataset
123
+ building time for distributed training. Instead of building the dataset on all
124
+ training workers, we can build the dataset just once and then register the
125
+ dataset using `register_dataset`. Then all workers can call `from_dataset_id`
126
+ without needing to build the dataset themselves.
127
+
128
+ ```
129
+ dataset = ... # Define your dataset here.
130
+ dataset_id = tf.data.experimental.service.register_dataset(
131
+ service=FLAGS.tf_data_service_address,
132
+ dataset=dataset)
133
+ # Use `from_dataset_id` to create per-worker datasets.
134
+ per_worker_datasets = {}
135
+ for worker in workers:
136
+ per_worker_datasets[worker] = tf.data.experimental.service.from_dataset_id(
137
+ processing_mode=tf.data.experimental.service.ShardingPolicy.OFF,
138
+ service=FLAGS.tf_data_service_address,
139
+ dataset_id=dataset_id,
140
+ job_name="shared_job")
141
+ ```
142
+
143
+ ### Processing Modes
144
+
145
+ `processing_mode` specifies how to shard a dataset among tf.data service
146
+ workers. tf.data service supports `OFF`, `DYNAMIC`, `FILE`, `DATA`,
147
+ `FILE_OR_DATA`, `HINT` sharding policies.
148
+
149
+ OFF: No sharding will be performed. The entire input dataset will be processed
150
+ independently by each of the tf.data service workers. For this reason, it is
151
+ important to shuffle data (e.g. filenames) non-deterministically, so that each
152
+ worker will process the elements of the dataset in a different order. This mode
153
+ can be used to distribute datasets that aren't splittable.
154
+
155
+ If a worker is added or restarted during ShardingPolicy.OFF processing, the
156
+ worker will instantiate a new copy of the dataset and begin producing data from
157
+ the beginning.
158
+
159
+ #### Dynamic Sharding
160
+
161
+ DYNAMIC: In this mode, tf.data service divides the dataset into two components:
162
+ a source component that generates "splits" such as filenames, and a processing
163
+ component that takes splits and outputs dataset elements. The source component
164
+ is executed in a centralized fashion by the tf.data service dispatcher, which
165
+ generates different splits of input data. The processing component is executed
166
+ in a parallel fashion by the tf.data service workers, each operating on a
167
+ different set of input data splits.
168
+
169
+ For example, consider the following dataset:
170
+
171
+ ```
172
+ dataset = tf.data.Dataset.from_tensor_slices(filenames)
173
+ dataset = dataset.interleave(TFRecordDataset)
174
+ dataset = dataset.map(preprocess_fn)
175
+ dataset = dataset.batch(batch_size)
176
+ dataset = dataset.apply(
177
+ tf.data.experimental.service.distribute(
178
+ processing_mode=tf.data.experimental.service.ShardingPolicy.DYNAMIC,
179
+ ...))
180
+ ```
181
+
182
+ The `from_tensor_slices` will be run on the dispatcher, while the `interleave`,
183
+ `map`, and `batch` will be run on tf.data service workers. The workers will pull
184
+ filenames from the dispatcher for processing. To process a dataset with
185
+ dynamic sharding, the dataset must have a splittable source, and all of
186
+ its transformations must be compatible with splitting. While most sources and
187
+ transformations support splitting, there are exceptions, such as custom datasets
188
+ which may not implement the splitting API. Please file a Github issue if you
189
+ would like to use distributed epoch processing for a currently unsupported
190
+ dataset source or transformation.
191
+
192
+ If no workers are restarted during training, dynamic sharding mode will visit
193
+ every example exactly once. If workers are restarted during training, the splits
194
+ they were processing will not be fully visited. The dispatcher maintains a
195
+ cursor through the dataset's splits. Assuming fault tolerance is enabled (See
196
+ "Fault Tolerance" below), the dispatcher will store cursor state in write-ahead
197
+ logs so that the cursor can be restored in case the dispatcher is restarted
198
+ mid-training. This provides an at-most-once visitation guarantee in the presence
199
+ of server restarts.
200
+
201
+ #### Static Sharding
202
+
203
+ The following are static sharding policies. The semantics are similar to
204
+ `tf.data.experimental.AutoShardPolicy`. These policies require:
205
+
206
+ * The tf.data service cluster is configured with a fixed list of workers
207
+ in DispatcherConfig.
208
+ * Each client only reads from the local tf.data service worker.
209
+
210
+ If a worker is restarted while performing static sharding, the worker will
211
+ begin processing its shard again from the beginning.
212
+
213
+ FILE: Shards by input files (i.e. each worker will get a fixed set of files to
214
+ process). When this option is selected, make sure that there is at least as
215
+ many files as workers. If there are fewer input files than workers, a runtime
216
+ error will be raised.
217
+
218
+ DATA: Shards by elements produced by the dataset. Each worker will process the
219
+ whole dataset and discard the portion that is not for itself. Note that for
220
+ this mode to correctly partition the dataset elements, the dataset needs to
221
+ produce elements in a deterministic order.
222
+
223
+ FILE_OR_DATA: Attempts FILE-based sharding, falling back to DATA-based
224
+ sharding on failure.
225
+
226
+ HINT: Looks for the presence of `shard(SHARD_HINT, ...)` which is treated as a
227
+ placeholder to replace with `shard(num_workers, worker_index)`.
228
+
229
+ For backwards compatibility, `processing_mode` may also be set to the strings
230
+ `"parallel_epochs"` or `"distributed_epoch"`, which are respectively equivalent
231
+ to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`.
232
+
233
+ ### Coordinated Data Read
234
+
235
+ By default, when multiple consumers read from the same job, they receive data on
236
+ a first-come first-served basis. In some use cases, it is advantageous to
237
+ coordinate the consumers. At each step, consumers read data from the same
238
+ worker.
239
+
240
+ For example, the tf.data service can be used to coordinate example sizes across
241
+ a cluster during synchronous training, so that during each step all replicas
242
+ train on similar-sized elements. To achieve this, define a dataset which
243
+ generates rounds of `num_consumers` consecutive similar-sized batches, then
244
+ enable coordinated reads by setting `consumer_index` and `num_consumers`.
245
+
246
+ NOTE: To keep consumers in sync, coordinated reads require that the dataset have
247
+ infinite cardinality. You can get this by adding `.repeat()` at the end of the
248
+ dataset definition.
249
+
250
+ ### Jobs
251
+
252
+ A tf.data service "job" refers to the process of reading from a dataset managed
253
+ by the tf.data service, using one or more data consumers. Jobs are created when
254
+ iterating over datasets that read from tf.data service. The data produced by a
255
+ job is determined by (1) dataset associated with the job and (2) the job's
256
+ processing mode. For example, if a job is created for the dataset
257
+ `Dataset.range(5)`, and the processing mode is `ShardingPolicy.OFF`, each
258
+ tf.data worker will produce the elements `{0, 1, 2, 3, 4}` for the job,
259
+ resulting in the
260
+ job producing `5 * num_workers` elements. If the processing mode is
261
+ `ShardingPolicy.DYNAMIC`, the job will only produce `5` elements.
262
+
263
+ One or more consumers can consume data from a job. By default, jobs are
264
+ "anonymous", meaning that only the consumer which created the job can read from
265
+ it. To share the output of a job across multiple consumers, you can set a common
266
+ `job_name`.
267
+
268
+ ### Fault Tolerance
269
+
270
+ By default, the tf.data dispatch server stores its state in-memory, making it a
271
+ single point of failure during training. To avoid this, pass
272
+ `fault_tolerant_mode=True` when creating your `DispatchServer`. Dispatcher
273
+ fault tolerance requires `work_dir` to be configured and accessible from the
274
+ dispatcher both before and after restart (e.g. a GCS path). With fault tolerant
275
+ mode enabled, the dispatcher will journal its state to the work directory so
276
+ that no state is lost when the dispatcher is restarted.
277
+
278
+ WorkerServers may be freely restarted, added, or removed during training. At
279
+ startup, workers will register with the dispatcher and begin processing all
280
+ outstanding jobs from the beginning.
281
+
282
+ ### Usage with tf.distribute
283
+
284
+ tf.distribute is the TensorFlow API for distributed training. There are
285
+ several ways to use tf.data with tf.distribute:
286
+ `strategy.experimental_distribute_dataset`,
287
+ `strategy.distribute_datasets_from_function`, and (for PSStrategy)
288
+ `coordinator.create_per_worker_dataset`. The following sections give code
289
+ examples for each.
290
+
291
+ In general we recommend using
292
+ `tf.data.experimental.service.{register_dataset,from_dataset_id}` over
293
+ `tf.data.experimental.service.distribute` for two reasons:
294
+
295
+ - The dataset only needs to be constructed and optimized once, instead of once
296
+ per worker. This can significantly reduce startup time, because the current
297
+ `experimental_distribute_dataset` and `distribute_datasets_from_function`
298
+ implementations create and optimize worker datasets sequentially.
299
+ - If a dataset depends on lookup tables or variables that are only present on
300
+ one host, the dataset needs to be registered from that host. Typically this
301
+ only happens when resources are placed on the chief or worker 0. Registering
302
+ the dataset from the chief will avoid issues with depending on remote
303
+ resources.
304
+
305
+ #### strategy.experimental_distribute_dataset
306
+
307
+ Nothing special is required when using
308
+ `strategy.experimental_distribute_dataset`, just apply `register_dataset` and
309
+ `from_dataset_id` as above, making sure to specify a `job_name` so that all
310
+ workers consume from the same tf.data service job.
311
+
312
+ ```
313
+ dataset = ... # Define your dataset here.
314
+ dataset_id = tf.data.experimental.service.register_dataset(
315
+ service=FLAGS.tf_data_service_address,
316
+ dataset=dataset)
317
+ dataset = tf.data.experimental.service.from_dataset_id(
318
+ processing_mode=tf.data.experimental.service.ShardingPolicy.OFF,
319
+ service=FLAGS.tf_data_service_address,
320
+ dataset_id=dataset_id,
321
+ job_name="shared_job")
322
+
323
+ dataset = strategy.experimental_distribute_dataset(dataset)
324
+ ```
325
+
326
+ #### strategy.distribute_datasets_from_function
327
+
328
+ First, make sure the dataset produced by the `dataset_fn` does not depend on the
329
+ `input_context` for the training worker on which it is run. Instead of each
330
+ worker building its own (sharded) dataset, one worker should register an
331
+ unsharded dataset, and the remaining workers should consume data from that
332
+ dataset.
333
+
334
+ ```
335
+ dataset = dataset_fn()
336
+ dataset_id = tf.data.experimental.service.register_dataset(
337
+ service=FLAGS.tf_data_service_address,
338
+ dataset=dataset)
339
+
340
+ def new_dataset_fn(input_context):
341
+ del input_context
342
+ return tf.data.experimental.service.from_dataset_id(
343
+ processing_mode=tf.data.experimental.service.ShardingPolicy.OFF,
344
+ service=FLAGS.tf_data_service_address,
345
+ dataset_id=dataset_id,
346
+ job_name="shared_job")
347
+
348
+ dataset = strategy.distribute_datasets_from_function(new_dataset_fn)
349
+ ```
350
+
351
+ #### coordinator.create_per_worker_dataset
352
+
353
+ `create_per_worker_dataset` works the same as
354
+ `distribute_datasets_from_function`.
355
+
356
+ ```
357
+ dataset = dataset_fn()
358
+ dataset_id = tf.data.experimental.service.register_dataset(
359
+ service=FLAGS.tf_data_service_address,
360
+ dataset=dataset)
361
+
362
+ def new_dataset_fn(input_context):
363
+ del input_context
364
+ return tf.data.experimental.service.from_dataset_id(
365
+ processing_mode=tf.data.experimental.service.ShardingPolicy.OFF,
366
+ service=FLAGS.tf_data_service_address,
367
+ dataset_id=dataset_id,
368
+ job_name="shared_job")
369
+
370
+ dataset = coordinator.create_per_worker_dataset(new_dataset_fn)
371
+ ```
372
+
373
+ ### Sharing tf.data service with concurrent trainers
374
+
375
+ If you run multiple trainers concurrently using the same training data, it could
376
+ save resources to cache the data in one tf.data service cluster and share the
377
+ cluster with the trainers. For example, if you use Vizier to tune
378
+ hyperparameters, the Vizier jobs can run concurrently and share one tf.data
379
+ service cluster.
380
+
381
+ To enable this feature, each trainer needs to generate a unique trainer ID, and
382
+ you pass the trainer ID to `tf.data.experimental.service.distribute`. Once a job
383
+ has consumed data, the data remains in the cache and is re-used by jobs with
384
+ different `trainer_id`s. Requests with the same `trainer_id` do not re-use data.
385
+ For example:
386
+
387
+ ```
388
+ dataset = expensive_computation()
389
+ dataset = dataset.apply(tf.data.experimental.service.distribute(
390
+ processing_mode=tf.data.experimental.service.ShardingPolicy.OFF,
391
+ service=FLAGS.tf_data_service_address,
392
+ job_name="job",
393
+ cross_trainer_cache=data_service_ops.CrossTrainerCache(
394
+ trainer_id=trainer_id())))
395
+ ```
396
+
397
+ tf.data service uses a sliding-window cache to store shared data. When one
398
+ trainer consumes data, the data remains in the cache. When other trainers need
399
+ data, they can get data from the cache instead of repeating the expensive
400
+ computation. The cache has a bounded size, so some workers may not read the full
401
+ dataset. To ensure all the trainers get sufficient training data, we require the
402
+ input dataset to be infinite. This can be achieved, for example, by repeating
403
+ the dataset and performing random augmentation on the training instances.
404
+
405
+ ## Limitations
406
+
407
+ - Python-based data processing: Datasets which use Python-based data processing
408
+ (e.g. `tf.py_function`, `tf.numpy_function`, or
409
+ `tf.data.Dataset.from_generator`) are currently not supported.
410
+ - Non-Serializable Resources: Datasets may only depend on TF resources that
411
+ support serialization. Serialization is currently supported for lookup
412
+ tables and variables. If your dataset depends on a TF resource that cannot be
413
+ serialized, please file a Github issue.
414
+ - Remote Resources: If a dataset depends on a resource, the dataset must be
415
+ registered from the same process that created the resource (e.g. the "chief"
416
+ job of ParameterServerStrategy).
417
+ """
418
+
419
+ from tensorflow.python.data.experimental.ops.data_service_ops import distribute
420
+ from tensorflow.python.data.experimental.ops.data_service_ops import from_dataset_id
421
+ from tensorflow.python.data.experimental.ops.data_service_ops import register_dataset
422
+ from tensorflow.python.data.experimental.ops.data_service_ops import ShardingPolicy
423
+ from tensorflow.python.data.experimental.service.server_lib import DispatcherConfig
424
+ from tensorflow.python.data.experimental.service.server_lib import DispatchServer
425
+ from tensorflow.python.data.experimental.service.server_lib import WorkerConfig
426
+ from tensorflow.python.data.experimental.service.server_lib import WorkerServer
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (18.8 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/__pycache__/server_lib.cpython-310.pyc ADDED
Binary file (15.7 kB). View file
 
videochat2/lib/python3.10/site-packages/tensorflow/python/data/experimental/service/_pywrap_server_lib.pyi ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+
16
+ from typing import Any
17
+
18
+ class DispatchGrpcDataServer:
19
+ def __init__(self, *args, **kwargs) -> None: ...
20
+ def bound_port(self) -> int: ...
21
+ def join(self) -> None: ...
22
+ def num_workers(self) -> int: ...
23
+ def snapshot_streams(self, *args, **kwargs) -> Any: ...
24
+ def start(self) -> Status: ...
25
+ def stop(self) -> None: ...
26
+
27
+ class SnapshotStreamInfoWrapper:
28
+ def __init__(self) -> None: ...
29
+ @property
30
+ def index(self) -> int: ...
31
+ @property
32
+ def state(self) -> int: ...
33
+
34
+ class SnapshotTaskProgressWrapper:
35
+ def __init__(self) -> None: ...
36
+ @property
37
+ def completed(self) -> bool: ...
38
+ @property
39
+ def snapshot_task_base_path(self) -> bytes: ...
40
+ @property
41
+ def snapshot_task_stream_index(self) -> int: ...
42
+
43
+ class WorkerGrpcDataServer:
44
+ def __init__(self, *args, **kwargs) -> None: ...
45
+ def bound_port(self) -> int: ...
46
+ def join(self) -> None: ...
47
+ def num_tasks(self) -> int: ...
48
+ def snapshot_task_progresses(self, *args, **kwargs) -> Any: ...
49
+ def start(self) -> Status: ...
50
+ def stop(self) -> None: ...
51
+
52
+ def TF_DATA_GetDataServiceMetadataByID(*args, **kwargs) -> Any: ...
53
+ def TF_DATA_NewDispatchServer(arg0: str) -> DispatchGrpcDataServer: ...
54
+ def TF_DATA_NewWorkerServer(arg0: str) -> WorkerGrpcDataServer: ...