ZTWHHH commited on
Commit
d6cfa7e
·
verified ·
1 Parent(s): 482e085

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. llava_next/share/terminfo/a/aaa-30-rv +0 -0
  3. llava_next/share/terminfo/a/aaa-30-s-ctxt +0 -0
  4. llava_next/share/terminfo/a/aaa-s-ctxt +0 -0
  5. llava_next/share/terminfo/a/act5 +0 -0
  6. llava_next/share/terminfo/a/adm42-ns +0 -0
  7. llava_next/share/terminfo/a/alt4 +0 -0
  8. llava_next/share/terminfo/a/amiga-8bit +0 -0
  9. llava_next/share/terminfo/a/ampex210 +0 -0
  10. llava_next/share/terminfo/a/ansi+arrows +0 -0
  11. llava_next/share/terminfo/a/ansi+sgrdim +0 -0
  12. llava_next/share/terminfo/a/ansi-color-3-emx +0 -0
  13. llava_next/share/terminfo/a/ansi77 +0 -0
  14. llava_next/share/terminfo/a/ansi80x25 +0 -0
  15. llava_next/share/terminfo/a/appleII +0 -0
  16. llava_next/share/terminfo/a/att4415-rv +0 -0
  17. llava_next/share/terminfo/a/att4415-w +0 -0
  18. llava_next/share/terminfo/a/att505 +0 -0
  19. llava_next/share/terminfo/a/att5420-rv +0 -0
  20. llava_next/share/terminfo/a/att5430 +0 -0
  21. llava_next/share/terminfo/a/avt-w-s +0 -0
  22. parrot/lib/python3.10/site-packages/torch/_library/__pycache__/__init__.cpython-310.pyc +0 -0
  23. parrot/lib/python3.10/site-packages/torch/ao/pruning/__init__.py +19 -0
  24. parrot/lib/python3.10/site-packages/torch/ao/pruning/__pycache__/__init__.cpython-310.pyc +0 -0
  25. parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/__init__.py +0 -0
  26. parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/data_norm_sparsifier.py +154 -0
  27. parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/_data_sparstity_utils.py +40 -0
  28. parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/quantization_utils.py +131 -0
  29. parrot/lib/python3.10/site-packages/torch/ao/pruning/scheduler/cubic_scheduler.py +108 -0
  30. parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__init__.py +0 -0
  31. parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/__init__.cpython-310.pyc +0 -0
  32. parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/base_sparsifier.cpython-310.pyc +0 -0
  33. parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/nearly_diagonal_sparsifier.cpython-310.pyc +0 -0
  34. parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/utils.cpython-310.pyc +0 -0
  35. parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/base_sparsifier.py +354 -0
  36. parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/nearly_diagonal_sparsifier.py +56 -0
  37. parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/utils.py +137 -0
  38. parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/weight_norm_sparsifier.py +201 -0
  39. parrot/lib/python3.10/site-packages/torch/ao/quantization/_equalize.py +183 -0
  40. parrot/lib/python3.10/site-packages/torch/ao/quantization/fuse_modules.py +176 -0
  41. parrot/lib/python3.10/site-packages/torch/ao/quantization/qconfig.py +569 -0
  42. parrot/lib/python3.10/site-packages/torch/ao/quantization/stubs.py +65 -0
  43. videochat2/lib/python3.10/site-packages/tensorflow/python/framework/__pycache__/ops.cpython-310.pyc +3 -0
  44. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/array_ops_stack.py +214 -0
  45. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/clustering_ops.py +774 -0
  46. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/collective_ops.py +578 -0
  47. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_grad.py +247 -0
  48. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_util.py +367 -0
  49. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/data_flow_ops.py +2518 -0
  50. videochat2/lib/python3.10/site-packages/tensorflow/python/ops/embedding_ops.py +1184 -0
.gitattributes CHANGED
@@ -831,3 +831,4 @@ videochat2/lib/python3.10/site-packages/tensorflow/python/framework/_op_def_regi
831
  videochat2/lib/python3.10/site-packages/tensorflow/python/framework/_proto_comparators.so filter=lfs diff=lfs merge=lfs -text
832
  videochat2/lib/python3.10/site-packages/tensorflow/python/framework/_pywrap_python_op_gen.so filter=lfs diff=lfs merge=lfs -text
833
  videochat2/lib/python3.10/site-packages/tensorflow/python/framework/_op_def_library_pybind.so filter=lfs diff=lfs merge=lfs -text
 
 
831
  videochat2/lib/python3.10/site-packages/tensorflow/python/framework/_proto_comparators.so filter=lfs diff=lfs merge=lfs -text
832
  videochat2/lib/python3.10/site-packages/tensorflow/python/framework/_pywrap_python_op_gen.so filter=lfs diff=lfs merge=lfs -text
833
  videochat2/lib/python3.10/site-packages/tensorflow/python/framework/_op_def_library_pybind.so filter=lfs diff=lfs merge=lfs -text
834
+ videochat2/lib/python3.10/site-packages/tensorflow/python/framework/__pycache__/ops.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
llava_next/share/terminfo/a/aaa-30-rv ADDED
Binary file (1.33 kB). View file
 
llava_next/share/terminfo/a/aaa-30-s-ctxt ADDED
Binary file (1.37 kB). View file
 
llava_next/share/terminfo/a/aaa-s-ctxt ADDED
Binary file (1.37 kB). View file
 
llava_next/share/terminfo/a/act5 ADDED
Binary file (498 Bytes). View file
 
llava_next/share/terminfo/a/adm42-ns ADDED
Binary file (521 Bytes). View file
 
llava_next/share/terminfo/a/alt4 ADDED
Binary file (1.2 kB). View file
 
llava_next/share/terminfo/a/amiga-8bit ADDED
Binary file (719 Bytes). View file
 
llava_next/share/terminfo/a/ampex210 ADDED
Binary file (822 Bytes). View file
 
llava_next/share/terminfo/a/ansi+arrows ADDED
Binary file (260 Bytes). View file
 
llava_next/share/terminfo/a/ansi+sgrdim ADDED
Binary file (463 Bytes). View file
 
llava_next/share/terminfo/a/ansi-color-3-emx ADDED
Binary file (1.71 kB). View file
 
llava_next/share/terminfo/a/ansi77 ADDED
Binary file (543 Bytes). View file
 
llava_next/share/terminfo/a/ansi80x25 ADDED
Binary file (1.5 kB). View file
 
llava_next/share/terminfo/a/appleII ADDED
Binary file (428 Bytes). View file
 
llava_next/share/terminfo/a/att4415-rv ADDED
Binary file (1.4 kB). View file
 
llava_next/share/terminfo/a/att4415-w ADDED
Binary file (1.39 kB). View file
 
llava_next/share/terminfo/a/att505 ADDED
Binary file (1.18 kB). View file
 
llava_next/share/terminfo/a/att5420-rv ADDED
Binary file (1.4 kB). View file
 
llava_next/share/terminfo/a/att5430 ADDED
Binary file (1.18 kB). View file
 
llava_next/share/terminfo/a/avt-w-s ADDED
Binary file (1.23 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_library/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (400 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/pruning/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Variables
2
+ from ._mappings import get_dynamic_sparse_quantized_mapping
3
+ from ._mappings import get_static_sparse_quantized_mapping
4
+
5
+ # Sparsifier
6
+ from .sparsifier.base_sparsifier import BaseSparsifier
7
+ from .sparsifier.weight_norm_sparsifier import WeightNormSparsifier
8
+ from .sparsifier.nearly_diagonal_sparsifier import NearlyDiagonalSparsifier
9
+
10
+ # Scheduler
11
+ from .scheduler.base_scheduler import BaseScheduler
12
+ from .scheduler.lambda_scheduler import LambdaSL
13
+ from .scheduler.cubic_scheduler import CubicSL
14
+
15
+ # Parametrizations
16
+ from .sparsifier.utils import FakeSparsity
17
+ from .sparsifier.utils import module_to_fqn
18
+ from .sparsifier.utils import fqn_to_module
19
+ from .sparsifier.utils import get_arg_info_from_tensor_fqn
parrot/lib/python3.10/site-packages/torch/ao/pruning/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (879 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/__init__.py ADDED
File without changes
parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/data_norm_sparsifier.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ from torch.nn import functional as F
4
+ from functools import reduce
5
+ from typing import Any, List, Optional, Tuple
6
+
7
+ from .base_data_sparsifier import BaseDataSparsifier
8
+ import operator
9
+
10
+ __all__ = ['DataNormSparsifier']
11
+
12
+
13
+ class DataNormSparsifier(BaseDataSparsifier):
14
+ r"""L1-Norm Sparsifier
15
+ This sparsifier computes the *L1-norm* of every sparse block and "zeroes-out" the
16
+ ones with the lowest norm. The level of sparsity defines how many of the
17
+ blocks is removed.
18
+ This sparsifier is controlled by three variables:
19
+ 1. `sparsity_level` defines the number of *sparse blocks* that are zeroed-out
20
+ 2. `sparse_block_shape` defines the shape of the sparse blocks. Note that
21
+ the sparse blocks originate at the zero-index of the tensor.
22
+ 3. `zeros_per_block` is the number of zeros that we are expecting in each
23
+ sparse block. By default we assume that all elements within a block are
24
+ zeroed-out. However, setting this variable sets the target number of
25
+ zeros per block. The zeros within each block are chosen as the *smallest
26
+ absolute values*.
27
+ Args:
28
+ sparsity_level: The target level of sparsity
29
+ sparse_block_shape: The shape of a sparse block
30
+ zeros_per_block: Number of zeros in a sparse block
31
+ Note::
32
+ All arguments to the DataNormSparsifier constructor are "default"
33
+ arguments and could be overriden by the configuration provided in the
34
+ `add_data` step.
35
+ """
36
+ def __init__(self, data_list: Optional[List[Tuple[str, Any]]] = None, sparsity_level: float = 0.5,
37
+ sparse_block_shape: Tuple[int, int] = (1, 4),
38
+ zeros_per_block: Optional[int] = None, norm: str = 'L1'):
39
+ if zeros_per_block is None:
40
+ zeros_per_block = reduce(operator.mul, sparse_block_shape)
41
+
42
+ assert norm in ['L1', 'L2'], "only L1 and L2 norm supported at the moment"
43
+
44
+ defaults = {'sparsity_level': sparsity_level, 'sparse_block_shape': sparse_block_shape,
45
+ 'zeros_per_block': zeros_per_block}
46
+ self.norm = norm
47
+ super().__init__(data_list=data_list, **defaults)
48
+
49
+ def __get_scatter_folded_mask(self, data, dim, indices, output_size, sparse_block_shape):
50
+ mask = torch.ones_like(data)
51
+ mask.scatter_(dim=dim, index=indices, value=0) # zeroing out
52
+ mask = F.fold(mask, output_size=output_size, kernel_size=sparse_block_shape,
53
+ stride=sparse_block_shape)
54
+ mask = mask.to(torch.int8)
55
+ return mask
56
+
57
+ def __get_block_level_mask(self, data,
58
+ sparse_block_shape, zeros_per_block):
59
+
60
+ # Assume data is a squeezed tensor
61
+ height, width = data.shape[-2], data.shape[-1]
62
+ block_height, block_width = sparse_block_shape
63
+ values_per_block = block_height * block_width
64
+
65
+ # just return zeros if zeroing all elements in block
66
+ if values_per_block == zeros_per_block:
67
+ return torch.zeros_like(data, dtype=torch.int8)
68
+
69
+ # creating additional height and width to support padding
70
+ dh = (block_height - height % block_height) % block_height
71
+ dw = (block_width - width % block_width) % block_width
72
+
73
+ # create a new padded tensor like data (to match the block_shape)
74
+ padded_data = torch.ones(height + dh, width + dw, dtype=data.dtype, device=data.device)
75
+ padded_data = padded_data * torch.nan # can also be replaced with 0 to stop the removal of edge data
76
+ padded_data[0:height, 0:width] = data
77
+ unfolded_data = F.unfold(padded_data[None, None, :], kernel_size=sparse_block_shape,
78
+ stride=sparse_block_shape)
79
+
80
+ _, sorted_idx = torch.sort(unfolded_data, dim=1)
81
+ sorted_idx = sorted_idx[:, :zeros_per_block, :] # zero out zeros_per_block number of elements
82
+
83
+ mask = self.__get_scatter_folded_mask(data=unfolded_data, dim=1, indices=sorted_idx, output_size=padded_data.shape,
84
+ sparse_block_shape=sparse_block_shape)
85
+
86
+ mask = mask.squeeze(0).squeeze(0)[:height, :width].contiguous() # remove padding and make contiguous
87
+ return mask
88
+
89
+ def __get_data_level_mask(self, data, sparsity_level,
90
+ sparse_block_shape):
91
+
92
+ height, width = data.shape[-2], data.shape[-1]
93
+ block_height, block_width = sparse_block_shape
94
+ dh = (block_height - height % block_height) % block_height
95
+ dw = (block_width - width % block_width) % block_width
96
+
97
+ data_norm = F.avg_pool2d(data[None, None, :], kernel_size=sparse_block_shape,
98
+ stride=sparse_block_shape, ceil_mode=True)
99
+
100
+ values_per_block = reduce(operator.mul, sparse_block_shape)
101
+
102
+ data_norm = data_norm.flatten()
103
+ num_blocks = len(data_norm)
104
+
105
+ data_norm = data_norm.repeat(1, values_per_block, 1) # get similar shape after unfold
106
+ _, sorted_idx = torch.sort(data_norm, dim=2)
107
+
108
+ threshold_idx = round(sparsity_level * num_blocks) # number of blocks to remove
109
+ sorted_idx = sorted_idx[:, :, :threshold_idx]
110
+
111
+ mask = self.__get_scatter_folded_mask(data=data_norm, dim=2, indices=sorted_idx,
112
+ output_size=(height + dh, width + dw),
113
+ sparse_block_shape=sparse_block_shape)
114
+
115
+ mask = mask.squeeze(0).squeeze(0)[:height, :width] # squeeze only the first 2 dimension
116
+ return mask
117
+
118
+ def update_mask(self, name, data, sparsity_level,
119
+ sparse_block_shape, zeros_per_block, **kwargs):
120
+
121
+ values_per_block = reduce(operator.mul, sparse_block_shape)
122
+ if zeros_per_block > values_per_block:
123
+ raise ValueError("Number of zeros per block cannot be more than "
124
+ "the total number of elements in that block.")
125
+ if zeros_per_block < 0:
126
+ raise ValueError("Number of zeros per block should be positive.")
127
+
128
+ if self.norm == 'L1':
129
+ data_norm = torch.abs(data).squeeze() # absolute value based (L1)
130
+ else:
131
+ data_norm = (data * data).squeeze() # square every element for L2
132
+
133
+ if len(data_norm.shape) > 2: # only supports 2 dimensional data at the moment
134
+ raise ValueError("only supports 2-D at the moment")
135
+
136
+ elif len(data_norm.shape) == 1: # in case the data is bias (or 1D)
137
+ data_norm = data_norm[None, :]
138
+
139
+ mask = self.get_mask(name)
140
+ if sparsity_level <= 0 or zeros_per_block == 0:
141
+ mask.data = torch.ones_like(mask)
142
+ elif sparsity_level >= 1.0 and (zeros_per_block == values_per_block):
143
+ mask.data = torch.zeros_like(mask)
144
+
145
+ # Fetch the high level mask that zeros out entire blocks
146
+ data_lvl_mask = self.__get_data_level_mask(data=data_norm, sparsity_level=sparsity_level,
147
+ sparse_block_shape=sparse_block_shape)
148
+
149
+ # Fetch block level mask that zeros out 'zeros_per_block' number of elements in every block
150
+ block_lvl_mask = self.__get_block_level_mask(data=data_norm, sparse_block_shape=sparse_block_shape,
151
+ zeros_per_block=zeros_per_block)
152
+
153
+ # zero out the entries inside those blocks whose block is sparsified
154
+ mask.data = torch.where(data_lvl_mask == 1, data_lvl_mask, block_lvl_mask)
parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/_data_sparstity_utils.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import logging
3
+ from torch.ao.pruning._experimental.data_sparsifier.base_data_sparsifier import SUPPORTED_TYPES
4
+
5
+ logger: logging.Logger = logging.getLogger(__name__)
6
+
7
+
8
+ def _attach_model_to_data_sparsifier(module, data_sparsifier, config=None):
9
+ """Attaches a data sparsifier to all the layers of the module.
10
+ Essentially, loop over all the weight parameters in the module and
11
+ attach it to the data sparsifier.
12
+ Note::
13
+ The '.' in the layer names are replaced with '_' (refer to _get_valid_name() below)
14
+ before attaching to the sparsifier. This is because, the data
15
+ sparsifier uses a dummy model inside to store the weight parameters.
16
+ """
17
+ if config is None:
18
+ config = {}
19
+ for name, parameter in module.named_parameters():
20
+ if type(parameter) in SUPPORTED_TYPES:
21
+ valid_name = _get_valid_name(name)
22
+ # will be defaulted to default configs
23
+ data_sparsifier.add_data(name=valid_name, data=parameter, **config.get(valid_name, {}))
24
+
25
+
26
+ def _get_valid_name(name):
27
+ return name.replace('.', '_') # . is not allowed as a name
28
+
29
+
30
+ def _log_sparsified_level(model, data_sparsifier) -> None:
31
+ # Show the level of sparsity AFTER step:
32
+ for name, parameter in model.named_parameters():
33
+ if type(parameter) not in SUPPORTED_TYPES:
34
+ continue
35
+ valid_name = _get_valid_name(name)
36
+ mask = data_sparsifier.get_mask(name=valid_name)
37
+ sparsity_level = 1.0 - mask.float().mean()
38
+ logger.info(
39
+ "Sparsity in layer %s = % .2%", name, sparsity_level
40
+ )
parrot/lib/python3.10/site-packages/torch/ao/pruning/_experimental/data_sparsifier/quantization_utils.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ import torch.nn as nn
4
+ from torch.ao.pruning.sparsifier.utils import module_to_fqn, fqn_to_module
5
+ from typing import Dict, List, Optional
6
+
7
+ SUPPORTED_MODULES = {
8
+ nn.Embedding,
9
+ nn.EmbeddingBag
10
+ }
11
+
12
+
13
+ def _fetch_all_embeddings(model):
14
+ """Fetches Embedding and EmbeddingBag modules from the model
15
+ """
16
+ embedding_modules = []
17
+ stack = [model]
18
+ while stack:
19
+ module = stack.pop()
20
+ for _, child in module.named_children():
21
+ fqn_name = module_to_fqn(model, child)
22
+ if type(child) in SUPPORTED_MODULES:
23
+ embedding_modules.append((fqn_name, child))
24
+ else:
25
+ stack.append(child)
26
+ return embedding_modules
27
+
28
+
29
+ def post_training_sparse_quantize(model,
30
+ data_sparsifier_class,
31
+ sparsify_first=True,
32
+ select_embeddings: Optional[List[nn.Module]] = None,
33
+ **sparse_config):
34
+ """Takes in a model and applies sparsification and quantization to only embeddings & embeddingbags.
35
+ The quantization step can happen before or after sparsification depending on the `sparsify_first` argument.
36
+
37
+ Args:
38
+ - model (nn.Module)
39
+ model whose embeddings needs to be sparsified
40
+ - data_sparsifier_class (type of data sparsifier)
41
+ Type of sparsification that needs to be applied to model
42
+ - sparsify_first (bool)
43
+ if true, sparsifies first and then quantizes
44
+ otherwise, quantizes first and then sparsifies.
45
+ - select_embeddings (List of Embedding modules)
46
+ List of embedding modules to in the model to be sparsified & quantized.
47
+ If None, all embedding modules with be sparsified
48
+ - sparse_config (Dict)
49
+ config that will be passed to the constructor of data sparsifier object.
50
+
51
+ Note:
52
+ 1. When `sparsify_first=False`, quantization occurs first followed by sparsification.
53
+ - before sparsifying, the embedding layers are dequantized.
54
+ - scales and zero-points are saved
55
+ - embedding layers are sparsified and `squash_mask` is applied
56
+ - embedding weights are requantized using the saved scales and zero-points
57
+ 2. When `sparsify_first=True`, sparsification occurs first followed by quantization.
58
+ - embeddings are sparsified first
59
+ - quantization is applied on the sparsified embeddings
60
+ """
61
+ data_sparsifier = data_sparsifier_class(**sparse_config)
62
+
63
+ # if select_embeddings is None, perform it on all embeddings
64
+ if select_embeddings is None:
65
+ embedding_modules = _fetch_all_embeddings(model)
66
+
67
+ else:
68
+ embedding_modules = []
69
+ assert isinstance(select_embeddings, List), "the embedding_modules must be a list of embedding modules"
70
+ for emb in select_embeddings:
71
+ assert type(emb) in SUPPORTED_MODULES, "the embedding_modules list must be an embedding or embedding bags"
72
+ fqn_name = module_to_fqn(model, emb)
73
+ assert fqn_name is not None, "the embedding modules must be part of input model"
74
+ embedding_modules.append((fqn_name, emb))
75
+
76
+ if sparsify_first:
77
+ # sparsify
78
+ for name, emb_module in embedding_modules:
79
+ valid_name = name.replace('.', '_')
80
+ data_sparsifier.add_data(name=valid_name, data=emb_module)
81
+
82
+ data_sparsifier.step()
83
+ data_sparsifier.squash_mask()
84
+
85
+ # quantize
86
+ for _, emb_module in embedding_modules:
87
+ emb_module.qconfig = torch.ao.quantization.float_qparams_weight_only_qconfig
88
+
89
+ torch.ao.quantization.prepare(model, inplace=True)
90
+ torch.ao.quantization.convert(model, inplace=True)
91
+
92
+ else:
93
+ # quantize
94
+ for _, emb_module in embedding_modules:
95
+ emb_module.qconfig = torch.ao.quantization.float_qparams_weight_only_qconfig
96
+
97
+ torch.ao.quantization.prepare(model, inplace=True)
98
+ torch.ao.quantization.convert(model, inplace=True)
99
+
100
+ # retrieve scale & zero_points
101
+ quantize_params: Dict[str, Dict] = {'scales': {}, 'zero_points': {},
102
+ 'dequant_weights': {}, 'axis': {},
103
+ 'dtype': {}}
104
+
105
+ for name, _ in embedding_modules:
106
+ quantized_emb = fqn_to_module(model, name)
107
+ assert quantized_emb is not None # satisfy mypy
108
+
109
+ quantized_weight = quantized_emb.weight() # type: ignore[operator]
110
+ quantize_params['scales'][name] = quantized_weight.q_per_channel_scales()
111
+ quantize_params['zero_points'][name] = quantized_weight.q_per_channel_zero_points()
112
+ quantize_params['dequant_weights'][name] = torch.dequantize(quantized_weight)
113
+ quantize_params['axis'][name] = quantized_weight.q_per_channel_axis()
114
+ quantize_params['dtype'][name] = quantized_weight.dtype
115
+
116
+ # attach data to sparsifier
117
+ data_sparsifier.add_data(name=name.replace('.', '_'), data=quantize_params['dequant_weights'][name])
118
+
119
+ data_sparsifier.step()
120
+ data_sparsifier.squash_mask()
121
+
122
+ for name, _ in embedding_modules:
123
+ quantized_emb = fqn_to_module(model, name)
124
+ assert quantized_emb is not None # satisfy mypy
125
+ requantized_vector = torch.quantize_per_channel(quantize_params['dequant_weights'][name],
126
+ scales=quantize_params['scales'][name],
127
+ zero_points=quantize_params['zero_points'][name],
128
+ dtype=quantize_params['dtype'][name],
129
+ axis=quantize_params['axis'][name])
130
+
131
+ quantized_emb.set_weight(requantized_vector) # type: ignore[operator]
parrot/lib/python3.10/site-packages/torch/ao/pruning/scheduler/cubic_scheduler.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import warnings
3
+
4
+ from .base_scheduler import BaseScheduler
5
+
6
+ __all__ = ["CubicSL"]
7
+
8
+ def _clamp(x, lo, hi):
9
+ return max(lo, min(hi, x))
10
+
11
+
12
+ class CubicSL(BaseScheduler):
13
+ r"""Sets the sparsity level of each parameter group to the final sl
14
+ plus a given exponential function.
15
+
16
+ .. math::
17
+
18
+ s_i = s_f + (s_0 - s_f) \cdot \left( 1 - \frac{t - t_0}{n\Delta t} \right)^3
19
+
20
+ where :math:`s_i` is the sparsity at epoch :math:`t`, :math;`s_f` is the final
21
+ sparsity level, :math:`f(i)` is the function to be applied to the current epoch
22
+ :math:`t`, initial epoch :math:`t_0`, and final epoch :math:`t_f`.
23
+ :math:`\Delta t` is used to control how often the update of the sparsity level
24
+ happens. By default,
25
+
26
+ Args:
27
+ sparsifier (BaseSparsifier): Wrapped sparsifier.
28
+ init_sl (int, list): Initial level of sparsity
29
+ init_t (int, list): Initial step, when pruning starts
30
+ delta_t (int, list): Pruning frequency
31
+ total_t (int, list): Total number of pruning steps
32
+ initially_zero (bool, list): If True, sets the level of sparsity to 0
33
+ before init_t (:math:`t_0`). Otherwise, the sparsity level before
34
+ init_t (:math:`t_0`) is set to init_sl(:math:`s_0`)
35
+ last_epoch (int): The index of last epoch. Default: -1.
36
+ verbose (bool): If ``True``, prints a message to stdout for
37
+ each update. Default: ``False``.
38
+ """
39
+ def __init__(self,
40
+ sparsifier,
41
+ init_sl=0.0,
42
+ init_t=0,
43
+ delta_t=10,
44
+ total_t=100,
45
+ initially_zero=False,
46
+ last_epoch=-1,
47
+ verbose=False
48
+ ):
49
+ self.sparsifier = sparsifier
50
+
51
+ self.init_sl = self._make_sure_a_list(init_sl)
52
+ self.init_t = self._make_sure_a_list(init_t)
53
+ self.delta_t = self._make_sure_a_list(delta_t)
54
+ self.total_t = self._make_sure_a_list(total_t)
55
+
56
+ self.initially_zero = self._make_sure_a_list(initially_zero)
57
+
58
+ super().__init__(sparsifier, last_epoch, verbose)
59
+
60
+ @staticmethod
61
+ def sparsity_compute_fn(s_0, s_f, t, t_0, dt, n, initially_zero=False):
62
+ r""""Computes the current level of sparsity.
63
+
64
+ Based on https://arxiv.org/pdf/1710.01878.pdf
65
+
66
+ Args:
67
+ s_0: Initial level of sparsity, :math:`s_i`
68
+ s_f: Target level of sparsity, :math:`s_f`
69
+ t: Current step, :math:`t`
70
+ t_0: Initial step, :math:`t_0`
71
+ dt: Pruning frequency, :math:`\Delta T`
72
+ n: Pruning steps, :math:`n`
73
+ initially_zero: Sets the level of sparsity to 0 before t_0.
74
+ If False, sets to s_0
75
+
76
+ Returns:
77
+ The sparsity level :math:`s_t` at the current step :math:`t`
78
+ """
79
+ if initially_zero and t < t_0:
80
+ return 0
81
+ s_t = s_f + (s_0 - s_f) * (1.0 - (t - t_0) / (dt * n)) ** 3
82
+ s_t = _clamp(s_t, s_0, s_f)
83
+ return s_t
84
+
85
+ def get_sl(self):
86
+ if not self._get_sl_called_within_step:
87
+ warnings.warn(
88
+ "To get the last sparsity level computed by the scheduler, "
89
+ "please use `get_last_sl()`.")
90
+ return [
91
+ self.sparsity_compute_fn(
92
+ s_0=initial_sparsity,
93
+ s_f=final_sparsity,
94
+ t=self.last_epoch,
95
+ t_0=initial_epoch,
96
+ dt=delta_epoch,
97
+ n=interval_epochs,
98
+ initially_zero=initially_zero
99
+ ) for initial_sparsity, final_sparsity, initial_epoch, delta_epoch, interval_epochs, initially_zero in
100
+ zip(
101
+ self.init_sl,
102
+ self.base_sl,
103
+ self.init_t,
104
+ self.delta_t,
105
+ self.total_t,
106
+ self.initially_zero
107
+ )
108
+ ]
parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__init__.py ADDED
File without changes
parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (179 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/base_sparsifier.cpython-310.pyc ADDED
Binary file (11.7 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/nearly_diagonal_sparsifier.cpython-310.pyc ADDED
Binary file (2.44 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/__pycache__/utils.cpython-310.pyc ADDED
Binary file (4.51 kB). View file
 
parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/base_sparsifier.py ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import abc
3
+ import copy
4
+ from collections import defaultdict
5
+ from typing import Any, Dict, Optional, Set, Tuple, List, Type
6
+
7
+ import torch
8
+ from torch import nn
9
+ from torch.nn.utils import parametrize
10
+ from torch.nn.utils.parametrize import type_before_parametrizations
11
+
12
+ from .utils import (
13
+ module_contains_param,
14
+ swap_module,
15
+ FakeSparsity,
16
+ get_arg_info_from_tensor_fqn,
17
+ module_to_fqn,
18
+ )
19
+
20
+ __all__ = ["BaseSparsifier"]
21
+
22
+ SUPPORTED_MODULES = {nn.Linear}
23
+
24
+ KEYS_NOT_IN_STATE_DICT = ["module", "module_fqn", "tensor_name"]
25
+
26
+ __all__ = ["BaseSparsifier"]
27
+
28
+
29
+ # TODO update desc with new config args
30
+ class BaseSparsifier(abc.ABC):
31
+ r"""Base class for all sparsifiers.
32
+
33
+ Abstract methods that need to be implemented:
34
+
35
+ - update_mask: Function to compute a new mask for all keys in the
36
+ `groups`.
37
+
38
+ Args:
39
+ - model [nn.Module]: model to configure. The model itself is not saved
40
+ but used for the state_dict saving / loading.
41
+ - config [list]: configuration elements should be a dict map that includes
42
+ `tensor_fqn` of tensors to sparsify
43
+ - defaults [dict]: default configurations will be attached to the
44
+ configuration. Only the keys that don't exist in the `config` will
45
+ be updated.
46
+
47
+ Example::
48
+
49
+ >>> # xdoctest: +SKIP("Can't instantiate abstract class BaseSparsifier with abstract method update_mask")
50
+ >>> config = [{'tensor_fqn': 'layer1.weight', 'tensor_fqn': 'linear2.weight2', 'sparsity_level': 0.5}]
51
+ >>> defaults = {'sparsity_level': 0.7}
52
+ >>> # model.layer1.weight will have `sparsity_level` = 0.7 (getting default)
53
+ >>> sparsifier = BaseSparsifier(config, defaults)
54
+ """
55
+
56
+ def __init__(self, defaults: Optional[Dict[str, Any]] = None):
57
+ super().__init__()
58
+ self.defaults: Dict[str, Any] = defaults or {}
59
+
60
+ self.state: Dict[str, Dict] = defaultdict(dict)
61
+ self.groups: List[Dict[str, Any]] = []
62
+ self.enable_mask_update = True
63
+
64
+ def __getstate__(self) -> Dict[str, Any]:
65
+ return {
66
+ "defaults": self.defaults,
67
+ "state": self.state,
68
+ "groups": self.groups,
69
+ }
70
+
71
+ def __setstate__(self, state: Dict[str, Dict[str, Any]]) -> None:
72
+ self.__dict__.update(state)
73
+
74
+ def __repr__(self):
75
+ format_string = self.__class__.__name__ + " ("
76
+ for i, sparse_args in enumerate(self.groups):
77
+ module = sparse_args["module"]
78
+ format_string += "\n"
79
+ format_string += f"\tGroup {i}\n"
80
+ format_string += f"\t module: {module}\n"
81
+ for key in sorted(sparse_args.keys()):
82
+ if key == "module":
83
+ continue
84
+ format_string += f"\t {key}: {sparse_args[key]}\n"
85
+ format_string += ")"
86
+ return format_string
87
+
88
+ def state_dict(self) -> Dict[str, Any]:
89
+ r"""Returns the state of the optimizer as a :class:`dict`.
90
+
91
+ It contains:
92
+ * state - current state of the sparsification.
93
+ * groups - a list containing all sparsity configuration groups
94
+ with the key 'tensor_fqn' specifying the path to the sparsified tensor within a model
95
+
96
+ TODO: Need a clean way of loading the state of the "prepared" module
97
+ """
98
+
99
+ groups: List[Dict[str, Any]] = [
100
+ dict(
101
+ filter(
102
+ lambda key_value: key_value[0] not in KEYS_NOT_IN_STATE_DICT,
103
+ mg.items(),
104
+ )
105
+ )
106
+ for mg in self.groups
107
+ ]
108
+
109
+ return {
110
+ "state": self.state,
111
+ "groups": groups,
112
+ }
113
+
114
+ def load_state_dict(self, state_dict: Dict[str, Any], strict: bool = True):
115
+ groups = copy.deepcopy(state_dict["groups"])
116
+ states = state_dict["state"]
117
+ for tensor_fqn, s in states.items():
118
+ arg_info = get_arg_info_from_tensor_fqn(self.model, tensor_fqn)
119
+ module = arg_info["module"]
120
+ tensor_name = arg_info["tensor_name"]
121
+ if strict and module is None:
122
+ raise RuntimeError(f"Error loading {tensor_fqn} into the model")
123
+
124
+ found = False
125
+ for p in module.parametrizations[tensor_name]:
126
+ if isinstance(p, FakeSparsity):
127
+ found = True
128
+ break
129
+ if not found:
130
+ p = FakeSparsity(torch.ones(getattr(module, tensor_name).shape))
131
+ parametrize.register_parametrization(module, tensor_name, p)
132
+ if s.get("mask", None) is not None:
133
+ mask = s.pop("mask")
134
+ p.mask = mask
135
+
136
+ for mg in groups:
137
+ if mg["tensor_fqn"] == tensor_fqn:
138
+ mg.update(arg_info)
139
+ self.__setstate__({"state": states, "groups": groups})
140
+
141
+ def make_config_from_model(
142
+ self,
143
+ model: nn.Module,
144
+ SUPPORTED_MODULES: Set[Type] = SUPPORTED_MODULES,
145
+ ) -> None:
146
+ self.config = []
147
+ stack = [model]
148
+ while stack:
149
+ module = stack.pop()
150
+ for name, child in module.named_children():
151
+ if type(child) in SUPPORTED_MODULES:
152
+ module_fqn = module_to_fqn(model, child)
153
+ assert isinstance(module_fqn, str) # for mypy
154
+ self.config.append({"tensor_fqn": module_fqn + ".weight"})
155
+ else:
156
+ stack.append(child)
157
+
158
+ def prepare(self, model, config):
159
+ r"""Prepares a model, by adding the parametrizations.
160
+
161
+ Note::
162
+
163
+ The model is modified inplace. If you need to preserve the original
164
+ model, use copy.deepcopy.
165
+ """
166
+ self.model = model # TODO: Need to figure out how to load without this.
167
+ self.config = config
168
+
169
+ # If no config -- try getting all the supported layers
170
+ if self.config is None:
171
+ self.make_config_from_model(model)
172
+
173
+ # TODO: Remove the configuration by reference ('module')
174
+ for module_config in self.config:
175
+ assert isinstance(module_config, dict), (
176
+ "config elements should be dicts not modules i.e.:"
177
+ "[{`tensor_fqn`: `foo.bar.weight`}, {`tensor_fqn`: ... }, ...]"
178
+ )
179
+
180
+ assert isinstance(self.defaults, Dict) # for mypy
181
+ local_args = copy.deepcopy(self.defaults)
182
+ local_args.update(module_config)
183
+
184
+ tensor_fqn = local_args.get("tensor_fqn", None)
185
+ assert tensor_fqn is not None, (
186
+ "tensor_fqn is a required argument in the sparsity config which"
187
+ "replaces previous `module` and [module]`fqn` arguments"
188
+ )
189
+
190
+ # populate all information from tensor_fqn
191
+ info_from_tensor_fqn = get_arg_info_from_tensor_fqn(model, tensor_fqn)
192
+
193
+ # check that whatever was put into local_args agrees with what was obtained
194
+ # from tensor_fqn
195
+ for key in info_from_tensor_fqn.keys():
196
+ if key in local_args:
197
+ assert (
198
+ info_from_tensor_fqn[key] == local_args[key]
199
+ or (
200
+ key == "tensor_fqn"
201
+ and "." + info_from_tensor_fqn[key] == local_args[key]
202
+ )
203
+ # info_from_tensor_fqn will chop leading '.' from tensor_fqn so ignore that
204
+ ), (
205
+ f"Given both `{key}` and `tensor_fqn` in the config, it is expected them to agree!"
206
+ )
207
+ local_args.update(info_from_tensor_fqn)
208
+ self.groups.append(local_args)
209
+ self._prepare()
210
+
211
+ def _prepare(self, *args, **kwargs):
212
+ r"""Adds mask parametrization to the layer weight"""
213
+ for config in self.groups:
214
+ module = config["module"]
215
+ tensor_name = config["tensor_name"]
216
+ parametrization = config.get("parametrization", FakeSparsity)
217
+ mask = config.get("mask", torch.ones_like(getattr(module, tensor_name)))
218
+ self.state[config["tensor_fqn"]]["mask"] = mask
219
+ parametrize.register_parametrization(
220
+ module, tensor_name, parametrization(mask)
221
+ )
222
+
223
+ def squash_mask(
224
+ self,
225
+ params_to_keep: Optional[Tuple[str, ...]] = None,
226
+ params_to_keep_per_layer: Optional[Dict[str, Tuple[str, ...]]] = None,
227
+ *args,
228
+ **kwargs,
229
+ ):
230
+ r"""Squashes the sparse masks into the appropriate tensors.
231
+
232
+ If either the `params_to_keep` or `params_to_keep_per_layer` is set,
233
+ the module will have a `sparse_params` dict attached to it.
234
+
235
+ Args:
236
+ params_to_keep: List of keys to save in the module or a dict
237
+ representing the modules and keys that will have
238
+ sparsity parameters saved
239
+ params_to_keep_per_layer: Dict to specify the params that should be
240
+ saved for specific layers. The keys in the dict
241
+ should be the module fqn, while the values should
242
+ be a list of strings with the names of the variables
243
+ to save in the `sparse_params`
244
+
245
+ Examples:
246
+ >>> # xdoctest: +SKIP("locals are undefined")
247
+ >>> # Don't save any sparse params
248
+ >>> sparsifier.squash_mask()
249
+ >>> hasattr(model.submodule1, 'sparse_params')
250
+ False
251
+
252
+ >>> # Keep sparse params per layer
253
+ >>> sparsifier.squash_mask(
254
+ ... params_to_keep_per_layer={
255
+ ... 'submodule1.linear1': ('foo', 'bar'),
256
+ ... 'submodule2.linear42': ('baz',)
257
+ ... })
258
+ >>> print(model.submodule1.linear1.sparse_params)
259
+ {'foo': 42, 'bar': 24}
260
+ >>> print(model.submodule2.linear42.sparse_params)
261
+ {'baz': 0.1}
262
+
263
+ >>> # Keep sparse params for all layers
264
+ >>> sparsifier.squash_mask(params_to_keep=('foo', 'bar'))
265
+ >>> print(model.submodule1.linear1.sparse_params)
266
+ {'foo': 42, 'bar': 24}
267
+ >>> print(model.submodule2.linear42.sparse_params)
268
+ {'foo': 42, 'bar': 24}
269
+
270
+ >>> # Keep some sparse params for all layers, and specific ones for
271
+ >>> # some other layers
272
+ >>> sparsifier.squash_mask(
273
+ ... params_to_keep=('foo', 'bar'),
274
+ ... params_to_keep_per_layer={
275
+ ... 'submodule2.linear42': ('baz',)
276
+ ... })
277
+ >>> print(model.submodule1.linear1.sparse_params)
278
+ {'foo': 42, 'bar': 24}
279
+ >>> print(model.submodule2.linear42.sparse_params)
280
+ {'foo': 42, 'bar': 24, 'baz': 0.1}
281
+ """
282
+ for config in self.groups:
283
+ module = config["module"]
284
+ tensor_name = config["tensor_name"]
285
+ parametrize.remove_parametrizations(
286
+ module, tensor_name, leave_parametrized=True
287
+ )
288
+ sparse_params = {}
289
+ if params_to_keep is not None:
290
+ global_params = {k: config[k] for k in params_to_keep}
291
+ sparse_params.update(global_params)
292
+ if params_to_keep_per_layer is not None:
293
+ params = params_to_keep_per_layer.get(config["module_fqn"], None)
294
+ if params is not None:
295
+ per_layer_params = {k: config[k] for k in params}
296
+ sparse_params.update(per_layer_params)
297
+ if sparse_params:
298
+ # TODO handle multiple tensor being quantized on a single module, where to store sparse_params?
299
+ module.sparse_params = sparse_params
300
+
301
+ def convert(
302
+ self,
303
+ module: nn.Module,
304
+ mapping: Optional[Dict[Type[nn.Module], Type[nn.Module]]] = None,
305
+ inplace: bool = False,
306
+ parameterization: Type[nn.Module] = FakeSparsity,
307
+ ):
308
+ r"""Converts submodules in input module to a different module according to `mapping`
309
+ by calling `from_dense` method on the target module class
310
+ Args:
311
+ module: input module
312
+ mapping: a dictionary that maps from source module type to target
313
+ module type, can be overwritten to allow swapping user defined
314
+ Modules
315
+ inplace: carry out model transformations in-place, the original module
316
+ is mutated
317
+ """
318
+ if mapping is None:
319
+ raise NotImplementedError("Need to auto generate mapping ")
320
+ if not inplace:
321
+ module = copy.deepcopy(module)
322
+
323
+ reassign = {}
324
+ for name, mod in module.named_children():
325
+ # leaf node
326
+ if (
327
+ module_contains_param(mod, parameterization)
328
+ and type_before_parametrizations(mod) in mapping
329
+ ):
330
+ reassign[name] = swap_module(mod, mapping)
331
+ else:
332
+ # recurse
333
+ reassign[name] = self.convert(
334
+ mod,
335
+ mapping=mapping,
336
+ inplace=True,
337
+ parameterization=parameterization,
338
+ )
339
+
340
+ for key, value in reassign.items():
341
+ module._modules[key] = value
342
+
343
+ return module
344
+
345
+ def step(self, use_path: bool = True) -> None:
346
+ if not self.enable_mask_update:
347
+ return
348
+ with torch.no_grad():
349
+ for config in self.groups:
350
+ self.update_mask(**config)
351
+
352
+ @abc.abstractmethod
353
+ def update_mask(self, module: nn.Module, tensor_name: str, **kwargs):
354
+ pass
parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/nearly_diagonal_sparsifier.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+
4
+ from . import base_sparsifier
5
+
6
+
7
+ class NearlyDiagonalSparsifier(base_sparsifier.BaseSparsifier):
8
+ r"""Nearly Diagonal Sparsifier
9
+
10
+ This sparsifier creates a nearly diagonal mask to be applied to the weight matrix.
11
+ Nearly Diagonal Matrix is a matrix that contains non-zero elements near the diagonal and the rest are zero.
12
+ An example of a nearly diagonal matrix with degree (or nearliness) 3 and 5 are follows respectively.
13
+ 1 1 0 0 1 1 1 0
14
+ 1 1 1 0 1 1 1 1
15
+ 0 1 1 1 1 1 1 1
16
+ 0 0 1 1 0 1 1 1
17
+ Note that a nearly diagonal matrix with degree 1 is just a matrix with main diagonal populated
18
+
19
+ This sparsifier is controlled by one variable:
20
+ 1. `nearliness` defines the number of non-zero diagonal lines that are closest to the main diagonal.
21
+ Currently - supports only odd number
22
+
23
+ Note:
24
+ This can be accelerated (vectorized) once the Spdiagonal feature (PR: #78439) is landed or the banded matrix
25
+ feature is landed: https://stackoverflow.com/questions/52463972/generating-banded-matrices-using-numpy
26
+
27
+ Args:
28
+ nearliness: The degree of nearliness (default = 1)
29
+
30
+ """
31
+ def __init__(self, nearliness: int = 1):
32
+ defaults = {'nearliness': nearliness}
33
+ super().__init__(defaults=defaults)
34
+
35
+ def update_mask(self, module, tensor_name, nearliness,
36
+ **kwargs):
37
+ mask = getattr(module.parametrizations, tensor_name)[0].mask
38
+ mask.data = torch.zeros_like(mask)
39
+ if nearliness <= 0:
40
+ return
41
+
42
+ tensor = getattr(module, tensor_name)
43
+ height, width = tensor.shape
44
+
45
+ if nearliness % 2 == 0:
46
+ raise ValueError("nearliness can only be an odd number")
47
+ dist_to_diagonal = nearliness // 2
48
+ # check
49
+ if dist_to_diagonal >= min(height, width):
50
+ raise ValueError("nearliness cannot be larger than the dimensions of tensor.")
51
+
52
+ for row in range(0, height):
53
+ # Bounds of entries that needs to be set to 1
54
+ low = max(0, row - dist_to_diagonal)
55
+ high = min(width, row + dist_to_diagonal + 1)
56
+ mask[row, low:high].fill_(1)
parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/utils.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from typing import Any, Dict, Optional, Type
3
+ from torch.nn.utils.parametrize import type_before_parametrizations, is_parametrized
4
+ from itertools import chain
5
+
6
+ from torch import nn
7
+
8
+ __all__ = [
9
+ "module_contains_param",
10
+ "swap_module",
11
+ "module_to_fqn",
12
+ "fqn_to_module",
13
+ "get_arg_info_from_tensor_fqn",
14
+ "FakeSparsity",
15
+ ]
16
+
17
+
18
+ def module_contains_param(module: nn.Module, parametrization: Type[nn.Module]) -> bool:
19
+ if is_parametrized(module):
20
+ # see if any of the module tensors have a parametriztion attached that matches the one passed in
21
+ return any(
22
+ any(isinstance(param, parametrization) for param in param_list)
23
+ for key, param_list in module.parametrizations.items() # type: ignore[union-attr,operator]
24
+ )
25
+ return False
26
+
27
+
28
+ def swap_module(
29
+ mod: nn.Module, mapping: Dict[Type[nn.Module], Type[nn.Module]]
30
+ ) -> nn.Module:
31
+ r"""Swaps the module using from_dense according to the mapping passed in.
32
+ Args:
33
+ mod: input module
34
+ mapping: a dictionary that maps from nn module to sparse nn module
35
+ Return:
36
+ The corresponding sparse module of `mod` according to mapping, created using from_dense
37
+ """
38
+ if type_before_parametrizations(mod) in mapping:
39
+ sparse_mod = mapping[type_before_parametrizations(mod)]
40
+
41
+ # TODO Fix this typing, as Type[Module] has no attribute "from_dense"
42
+ new_mod = sparse_mod.from_dense(mod) # type: ignore[attr-defined]
43
+
44
+ # Preserve module's pre forward hooks. They'll be called on quantized input
45
+ for pre_hook_fn in mod._forward_pre_hooks.values():
46
+ new_mod.register_forward_pre_hook(pre_hook_fn)
47
+ # Preserve module's post forward hooks except _observer_forward_hook
48
+ # After convert they'll work with quantized output
49
+ for hook_fn in mod._forward_hooks.values():
50
+ new_mod.register_forward_hook(hook_fn)
51
+
52
+ # respect device affinity when swapping modules
53
+ devices = {p.device for p in chain(mod.parameters(), mod.buffers())}
54
+ assert len(devices) <= 1, (
55
+ f"swap_module only works with cpu or single-device CUDA modules, but got devices {devices}"
56
+ )
57
+ device = next(iter(devices)) if len(devices) > 0 else None
58
+ if device:
59
+ new_mod.to(device)
60
+
61
+ return new_mod
62
+
63
+ else:
64
+ return mod
65
+
66
+
67
+ def module_to_fqn(
68
+ model: nn.Module, module: nn.Module, prefix: str = ""
69
+ ) -> Optional[str]:
70
+ """
71
+ Returns the fqn for a module or None if module not a descendent of model.
72
+ """
73
+ if module is model:
74
+ return ""
75
+ for name, child in model.named_children():
76
+ fqn = module_to_fqn(child, module, ".")
77
+ if isinstance(fqn, str):
78
+ return prefix + name + fqn
79
+ return None
80
+
81
+
82
+ def fqn_to_module(model: Optional[nn.Module], path: str) -> Optional[nn.Module]:
83
+ """
84
+ Given an fqn, returns the corresponding module or tensor or None if the fqn given by `path`
85
+ doesn't correspond to anything. Similar to model.get_submodule(path) but works for tensors.
86
+ """
87
+ if path != "":
88
+ for name in path.split("."):
89
+ model = getattr(model, name, None)
90
+ return model
91
+
92
+
93
+ def get_arg_info_from_tensor_fqn(model: nn.Module, tensor_fqn: str) -> Dict[str, Any]:
94
+ """
95
+ Uses tensor_fqn to obtain a dict containing module_fqn, module and tensor_name
96
+ """
97
+ # string manip to split tensor_fqn into module_fqn and tensor_name
98
+ # if tensor_fqn is 'weight' then module_fqn and tensor_name are '' and 'weight'
99
+ # if tensor_fqn is 'linear.weight' then module_fqn and tensor_name are 'linear' and 'weight'
100
+ tensor_name = tensor_fqn.split(".")[-1]
101
+ module_fqn = tensor_fqn[: -len(tensor_name) - ("." in tensor_fqn)]
102
+
103
+ module = fqn_to_module(model, module_fqn)
104
+
105
+ return {
106
+ "module_fqn": module_fqn,
107
+ "module": module,
108
+ "tensor_name": tensor_name,
109
+ "tensor_fqn": tensor_fqn,
110
+ }
111
+
112
+
113
+ # Parametrizations
114
+ class FakeSparsity(nn.Module):
115
+ r"""Parametrization for the weights. Should be attached to the 'weight' or
116
+ any other parameter that requires a mask applied to it.
117
+
118
+ Note::
119
+
120
+ Once the mask is passed, the variable should not change the id. The
121
+ contents of the mask can change, but the mask reference itself should
122
+ not.
123
+ """
124
+
125
+ def __init__(self, mask):
126
+ super().__init__()
127
+ self.register_buffer("mask", mask)
128
+
129
+ def forward(self, x):
130
+ assert self.mask.shape == x.shape
131
+ return self.mask * x
132
+
133
+ def state_dict(self, *args, **kwargs):
134
+ # We don't want to let the parametrizations to save the mask.
135
+ # That way we make sure that the linear module doesn't store the masks
136
+ # alongside their parametrizations.
137
+ return {}
parrot/lib/python3.10/site-packages/torch/ao/pruning/sparsifier/weight_norm_sparsifier.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from functools import reduce
3
+ from typing import Callable, Optional, Tuple, Union
4
+
5
+ import torch
6
+ import torch.nn.functional as F
7
+
8
+ from .base_sparsifier import BaseSparsifier
9
+ import operator
10
+
11
+ __all__ = ["WeightNormSparsifier"]
12
+
13
+ def _flat_idx_to_2d(idx, shape):
14
+ rows = idx // shape[1]
15
+ cols = idx % shape[1]
16
+ return rows, cols
17
+
18
+ class WeightNormSparsifier(BaseSparsifier):
19
+ r"""Weight-Norm Sparsifier
20
+
21
+ This sparsifier computes the norm of every sparse block and "zeroes-out" the
22
+ ones with the lowest norm. The level of sparsity defines how many of the
23
+ blocks is removed.
24
+
25
+ This sparsifier is controlled by three variables:
26
+ 1. `sparsity_level` defines the number of *sparse blocks* that are zeroed-out
27
+ 2. `sparse_block_shape` defines the shape of the sparse blocks. Note that
28
+ the sparse blocks originate at the zero-index of the tensor.
29
+ 3. `zeros_per_block` is the number of zeros that we are expecting in each
30
+ sparse block. By default we assume that all elements within a block are
31
+ zeroed-out. However, setting this variable sets the target number of
32
+ zeros per block. The zeros within each block are chosen as the *smallest
33
+ absolute values*.
34
+
35
+ Args:
36
+
37
+ sparsity_level: The target level of sparsity
38
+ sparse_block_shape: The shape of a sparse block (see note below)
39
+ zeros_per_block: Number of zeros in a sparse block
40
+ norm: Norm to use. Could be either `int` or a callable.
41
+ If `int`, only L1 and L2 are implemented.
42
+
43
+ Note::
44
+ The `sparse_block_shape` is tuple representing (block_ROWS, block_COLS),
45
+ irrespective of what the rows / cols mean in the data tensor. That means,
46
+ if you were to sparsify a weight tensor in the nn.Linear, which has a
47
+ weight shape `(Cout, Cin)`, the `block_ROWS` would refer to the output
48
+ channels, while the `block_COLS` would refer to the input channels.
49
+
50
+ Note::
51
+ All arguments to the WeightNormSparsifier constructor are "default"
52
+ arguments and could be overriden by the configuration provided in the
53
+ `prepare` step.
54
+ """
55
+ def __init__(self,
56
+ sparsity_level: float = 0.5,
57
+ sparse_block_shape: Tuple[int, int] = (1, 4),
58
+ zeros_per_block: Optional[int] = None,
59
+ norm: Optional[Union[Callable, int]] = None):
60
+ if zeros_per_block is None:
61
+ zeros_per_block = reduce(operator.mul, sparse_block_shape)
62
+ defaults = {
63
+ "sparsity_level": sparsity_level,
64
+ "sparse_block_shape": sparse_block_shape,
65
+ "zeros_per_block": zeros_per_block,
66
+ }
67
+ if norm is None:
68
+ norm = 2
69
+ if callable(norm):
70
+ self.norm_fn = norm
71
+ elif norm == 1:
72
+ self.norm_fn = lambda T: T.abs()
73
+ elif norm == 2:
74
+ self.norm_fn = lambda T: T * T
75
+ else:
76
+ raise NotImplementedError(f"L-{norm} is not yet implemented.")
77
+ super().__init__(defaults=defaults)
78
+
79
+ def _scatter_fold_block_mask(self, output_shape, dim, indices, block_shape,
80
+ mask=None, input_shape=None, device=None):
81
+ r"""Creates patches of size `block_shape` after scattering the indices."""
82
+ if mask is None:
83
+ assert input_shape is not None
84
+ mask = torch.ones(input_shape, device=device)
85
+ mask.scatter_(dim=dim, index=indices, value=0)
86
+ mask.data = F.fold(mask, output_size=output_shape, kernel_size=block_shape, stride=block_shape)
87
+ return mask
88
+
89
+ def _make_tensor_mask(self, data, input_shape, sparsity_level, sparse_block_shape, mask=None):
90
+ r"""Creates a tensor-level mask.
91
+
92
+ Tensor-level mask is described as a mask, where the granularity of sparsification of the
93
+ smallest patch is the sparse_block_shape. That means, that for a given mask and a
94
+ sparse_block_shape, the smallest "patch" of zeros/ones could be the sparse_block_shape.
95
+
96
+ In this context, `sparsity_level` describes the fraction of sparse patches.
97
+ """
98
+ h, w = data.shape[-2:]
99
+ block_h, block_w = sparse_block_shape
100
+ dh = (block_h - h % block_h) % block_h
101
+ dw = (block_w - w % block_w) % block_w
102
+
103
+ if mask is None:
104
+ mask = torch.ones(h + dh, w + dw, device=data.device)
105
+
106
+ if sparsity_level >= 1.0:
107
+ mask.data = torch.zeros_like(mask)
108
+ return mask
109
+ elif sparsity_level <= 0.0:
110
+ mask.data = torch.ones_like(mask)
111
+ return mask
112
+
113
+ values_per_block = reduce(operator.mul, sparse_block_shape)
114
+ if values_per_block > 1:
115
+ # Reduce the data
116
+ data = F.avg_pool2d(
117
+ data[None, None, :], kernel_size=sparse_block_shape, stride=sparse_block_shape, ceil_mode=True
118
+ )
119
+ data = data.flatten()
120
+ num_blocks = len(data)
121
+
122
+ data = data.repeat(1, values_per_block, 1)
123
+
124
+ threshold_idx = int(round(sparsity_level * num_blocks))
125
+ threshold_idx = max(0, min(num_blocks - 1, threshold_idx)) # Sanity check
126
+ _, sorted_idx = torch.topk(data, k=threshold_idx, dim=2, largest=False)
127
+
128
+ # Temp reshape for mask
129
+ mask_reshape = mask.reshape(data.shape) # data might be reshaped
130
+ self._scatter_fold_block_mask(
131
+ dim=2, output_shape=(h + dh, w + dw),
132
+ indices=sorted_idx, block_shape=sparse_block_shape, mask=mask_reshape
133
+ )
134
+ mask.data = mask_reshape.squeeze().reshape(mask.shape)[:h, :w].contiguous()
135
+ return mask
136
+
137
+ def _make_block_mask(self, data, sparse_block_shape, zeros_per_block, mask=None):
138
+ r"""Creates a block-level mask.
139
+
140
+ Block-level mask is described as a mask, where the granularity of sparsification of the
141
+ largest patch is the sparse_block_shape. That means that for a given mask and a
142
+ sparse_block_shape, the sparsity is computed only within a patch of a size sparse_block_shape.
143
+
144
+ In this context the `zeros_per_block` describes the number of zeroed-out elements within a patch.
145
+ """
146
+ h, w = data.shape[-2:]
147
+ block_h, block_w = sparse_block_shape
148
+ dh = (block_h - h % block_h) % block_h
149
+ dw = (block_w - w % block_w) % block_w
150
+ values_per_block = reduce(operator.mul, sparse_block_shape)
151
+
152
+ if mask is None:
153
+ mask = torch.ones((h + dh, w + dw), device=data.device)
154
+
155
+ if values_per_block == zeros_per_block:
156
+ # Everything should be sparsified
157
+ mask.data = torch.zeros_like(mask)
158
+ return mask
159
+
160
+ # create a new padded tensor like data (to match the block_shape)
161
+ padded_data = torch.ones(h + dh, w + dw, dtype=data.dtype, device=data.device)
162
+ padded_data.fill_(torch.nan)
163
+ padded_data[:h, :w] = data
164
+ unfolded_data = F.unfold(padded_data[None, None, :], kernel_size=sparse_block_shape, stride=sparse_block_shape)
165
+
166
+ # Temp reshape for mask
167
+ mask_reshape = mask.reshape(unfolded_data.shape)
168
+ _, sorted_idx = torch.topk(unfolded_data, k=zeros_per_block, dim=1, largest=False)
169
+
170
+ self._scatter_fold_block_mask(
171
+ dim=1, indices=sorted_idx, output_shape=padded_data.shape, block_shape=sparse_block_shape, mask=mask_reshape
172
+ )
173
+
174
+ mask.data = mask_reshape.squeeze().reshape(mask.shape).contiguous()
175
+ return mask
176
+
177
+ def update_mask(self, module, tensor_name, sparsity_level, sparse_block_shape,
178
+ zeros_per_block, **kwargs):
179
+ values_per_block = reduce(operator.mul, sparse_block_shape)
180
+ if zeros_per_block > values_per_block:
181
+ raise ValueError(
182
+ "Number of zeros per block cannot be more than the total number of elements in that block."
183
+ )
184
+ if zeros_per_block < 0:
185
+ raise ValueError("Number of zeros per block should be positive.")
186
+
187
+ mask = getattr(module.parametrizations, tensor_name)[0].mask
188
+ if sparsity_level <= 0 or zeros_per_block == 0:
189
+ mask.data = torch.ones_like(mask)
190
+ elif sparsity_level >= 1.0 and (zeros_per_block == values_per_block):
191
+ mask.data = torch.zeros_like(mask)
192
+ else:
193
+ ww = self.norm_fn(getattr(module, tensor_name))
194
+ tensor_mask = self._make_tensor_mask(
195
+ data=ww, input_shape=ww.shape, sparsity_level=sparsity_level, sparse_block_shape=sparse_block_shape
196
+ )
197
+ if values_per_block != zeros_per_block:
198
+ block_mask = self._make_block_mask(data=ww, sparse_block_shape=sparse_block_shape,
199
+ zeros_per_block=zeros_per_block)
200
+ tensor_mask = torch.logical_or(tensor_mask, block_mask)
201
+ mask.data = tensor_mask
parrot/lib/python3.10/site-packages/torch/ao/quantization/_equalize.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+ import copy
4
+ from typing import Dict, Any
5
+
6
+ __all__ = [
7
+ "set_module_weight",
8
+ "set_module_bias",
9
+ "get_module_weight",
10
+ "get_module_bias",
11
+ "max_over_ndim",
12
+ "min_over_ndim",
13
+ "channel_range",
14
+ "cross_layer_equalization",
15
+ "equalize",
16
+ "converged",
17
+ ]
18
+
19
+ _supported_types = {torch.nn.Conv2d, torch.nn.Linear}
20
+ _supported_intrinsic_types = {torch.ao.nn.intrinsic.ConvReLU2d, torch.ao.nn.intrinsic.LinearReLU}
21
+ _all_supported_types = _supported_types.union(_supported_intrinsic_types)
22
+
23
+ def set_module_weight(module, weight) -> None:
24
+ if type(module) in _supported_types:
25
+ module.weight = torch.nn.Parameter(weight)
26
+ else:
27
+ module[0].weight = torch.nn.Parameter(weight)
28
+
29
+ def set_module_bias(module, bias) -> None:
30
+ if type(module) in _supported_types:
31
+ module.bias = torch.nn.Parameter(bias)
32
+ else:
33
+ module[0].bias = torch.nn.Parameter(bias)
34
+
35
+ def get_module_weight(module):
36
+ if type(module) in _supported_types:
37
+ return module.weight
38
+ else:
39
+ return module[0].weight
40
+
41
+ def get_module_bias(module):
42
+ if type(module) in _supported_types:
43
+ return module.bias
44
+ else:
45
+ return module[0].bias
46
+
47
+ def max_over_ndim(input, axis_list, keepdim=False):
48
+ """Apply 'torch.max' over the given axes."""
49
+ axis_list.sort(reverse=True)
50
+ for axis in axis_list:
51
+ input, _ = input.max(axis, keepdim)
52
+ return input
53
+
54
+ def min_over_ndim(input, axis_list, keepdim=False):
55
+ """Apply 'torch.min' over the given axes."""
56
+ axis_list.sort(reverse=True)
57
+ for axis in axis_list:
58
+ input, _ = input.min(axis, keepdim)
59
+ return input
60
+
61
+ def channel_range(input, axis=0):
62
+ """Find the range of weights associated with a specific channel."""
63
+ size_of_tensor_dim = input.ndim
64
+ axis_list = list(range(size_of_tensor_dim))
65
+ axis_list.remove(axis)
66
+
67
+ mins = min_over_ndim(input, axis_list)
68
+ maxs = max_over_ndim(input, axis_list)
69
+
70
+ assert mins.size(0) == input.size(axis), "Dimensions of resultant channel range does not match size of requested axis"
71
+ return maxs - mins
72
+
73
+ def cross_layer_equalization(module1, module2, output_axis=0, input_axis=1):
74
+ """Scale the range of Tensor1.output to equal Tensor2.input.
75
+
76
+ Given two adjacent tensors', the weights are scaled such that
77
+ the ranges of the first tensors' output channel are equal to the
78
+ ranges of the second tensors' input channel
79
+ """
80
+ if type(module1) not in _all_supported_types or type(module2) not in _all_supported_types:
81
+ raise ValueError("module type not supported:", type(module1), " ", type(module2))
82
+
83
+ weight1 = get_module_weight(module1)
84
+ weight2 = get_module_weight(module2)
85
+
86
+ if weight1.size(output_axis) != weight2.size(input_axis):
87
+ raise TypeError("Number of output channels of first arg do not match \
88
+ number input channels of second arg")
89
+
90
+ bias = get_module_bias(module1)
91
+
92
+ weight1_range = channel_range(weight1, output_axis)
93
+ weight2_range = channel_range(weight2, input_axis)
94
+
95
+ # producing scaling factors to applied
96
+ weight2_range += 1e-9
97
+ scaling_factors = torch.sqrt(weight1_range / weight2_range)
98
+ inverse_scaling_factors = torch.reciprocal(scaling_factors)
99
+
100
+ bias = bias * inverse_scaling_factors
101
+
102
+ # formatting the scaling (1D) tensors to be applied on the given argument tensors
103
+ # pads axis to (1D) tensors to then be broadcasted
104
+ size1 = [1] * weight1.ndim
105
+ size1[output_axis] = weight1.size(output_axis)
106
+ size2 = [1] * weight2.ndim
107
+ size2[input_axis] = weight2.size(input_axis)
108
+
109
+ scaling_factors = torch.reshape(scaling_factors, size2)
110
+ inverse_scaling_factors = torch.reshape(inverse_scaling_factors, size1)
111
+
112
+ weight1 = weight1 * inverse_scaling_factors
113
+ weight2 = weight2 * scaling_factors
114
+
115
+ set_module_weight(module1, weight1)
116
+ set_module_bias(module1, bias)
117
+ set_module_weight(module2, weight2)
118
+
119
+ def equalize(model, paired_modules_list, threshold=1e-4, inplace=True):
120
+ """Equalize modules until convergence is achieved.
121
+
122
+ Given a list of adjacent modules within a model, equalization will
123
+ be applied between each pair, this will repeated until convergence is achieved
124
+
125
+ Keeps a copy of the changing modules from the previous iteration, if the copies
126
+ are not that different than the current modules (determined by converged_test),
127
+ then the modules have converged enough that further equalizing is not necessary
128
+
129
+ Implementation of this referced section 4.1 of this paper https://arxiv.org/pdf/1906.04721.pdf
130
+
131
+ Args:
132
+ model: a model (nn.module) that equalization is to be applied on
133
+ paired_modules_list: a list of lists where each sublist is a pair of two
134
+ submodules found in the model, for each pair the two submodules generally
135
+ have to be adjacent in the model to get expected/reasonable results
136
+ threshold: a number used by the converged function to determine what degree
137
+ similarity between models is necessary for them to be called equivalent
138
+ inplace: determines if function is inplace or not
139
+ """
140
+ if not inplace:
141
+ model = copy.deepcopy(model)
142
+
143
+ name_to_module : Dict[str, torch.nn.Module] = {}
144
+ previous_name_to_module: Dict[str, Any] = {}
145
+ name_set = {name for pair in paired_modules_list for name in pair}
146
+
147
+ for name, module in model.named_modules():
148
+ if name in name_set:
149
+ name_to_module[name] = module
150
+ previous_name_to_module[name] = None
151
+ while not converged(name_to_module, previous_name_to_module, threshold):
152
+ for pair in paired_modules_list:
153
+ previous_name_to_module[pair[0]] = copy.deepcopy(name_to_module[pair[0]])
154
+ previous_name_to_module[pair[1]] = copy.deepcopy(name_to_module[pair[1]])
155
+
156
+ cross_layer_equalization(name_to_module[pair[0]], name_to_module[pair[1]])
157
+
158
+ return model
159
+
160
+ def converged(curr_modules, prev_modules, threshold=1e-4):
161
+ """Test whether modules are converged to a specified threshold.
162
+
163
+ Tests for the summed norm of the differences between each set of modules
164
+ being less than the given threshold
165
+
166
+ Takes two dictionaries mapping names to modules, the set of names for each dictionary
167
+ should be the same, looping over the set of names, for each name take the difference
168
+ between the associated modules in each dictionary
169
+
170
+ """
171
+ if curr_modules.keys() != prev_modules.keys():
172
+ raise ValueError("The keys to the given mappings must have the same set of names of modules")
173
+
174
+ summed_norms = torch.tensor(0.)
175
+ if None in prev_modules.values():
176
+ return False
177
+ for name in curr_modules.keys():
178
+ curr_weight = get_module_weight(curr_modules[name])
179
+ prev_weight = get_module_weight(prev_modules[name])
180
+
181
+ difference = curr_weight.sub(prev_weight)
182
+ summed_norms += torch.norm(difference)
183
+ return bool(summed_norms < threshold)
parrot/lib/python3.10/site-packages/torch/ao/quantization/fuse_modules.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import copy
3
+
4
+ import torch.nn as nn
5
+
6
+ from torch.ao.quantization.fuser_method_mappings import get_fuser_method
7
+ # for backward compatibility
8
+ from torch.ao.quantization.fuser_method_mappings import fuse_conv_bn # noqa: F401
9
+ from torch.ao.quantization.fuser_method_mappings import fuse_conv_bn_relu # noqa: F401
10
+ from torch.nn.utils.parametrize import type_before_parametrizations
11
+
12
+ from typing import List, Optional
13
+
14
+ __all__ = [
15
+ "fuse_known_modules",
16
+ "fuse_modules",
17
+ "fuse_modules_qat",
18
+ ]
19
+
20
+ # Generalization of getattr
21
+ def _get_module(model, submodule_key):
22
+ tokens = submodule_key.split('.')
23
+ cur_mod = model
24
+ for s in tokens:
25
+ cur_mod = getattr(cur_mod, s)
26
+ return cur_mod
27
+
28
+ # Generalization of setattr
29
+ def _set_module(model, submodule_key, module):
30
+ tokens = submodule_key.split('.')
31
+ sub_tokens = tokens[:-1]
32
+ cur_mod = model
33
+ for s in sub_tokens:
34
+ cur_mod = getattr(cur_mod, s)
35
+
36
+ setattr(cur_mod, tokens[-1], module)
37
+
38
+ def fuse_known_modules(mod_list, is_qat, additional_fuser_method_mapping=None):
39
+ r"""Return a list of known fuse modules.
40
+
41
+ Returns a list of modules that fuses the operations specified
42
+ in the input module list.
43
+
44
+ Fuses only the following sequence of modules:
45
+ conv, bn
46
+ conv, bn, relu
47
+ conv, relu
48
+ linear, bn
49
+ linear, relu
50
+ For these sequences, the first element in the output module list performs
51
+ the fused operation. The rest of the elements are set to nn.Identity()
52
+ """
53
+ types = tuple(type_before_parametrizations(m) for m in mod_list)
54
+ fuser_method = get_fuser_method(types, additional_fuser_method_mapping)
55
+ if fuser_method is None:
56
+ raise NotImplementedError(f"Cannot fuse modules: {types}")
57
+ new_mod : List[Optional[nn.Module]] = [None] * len(mod_list)
58
+ fused = fuser_method(is_qat, *mod_list)
59
+ # NOTE: forward hooks not processed in the two following for loops will be lost after the fusion
60
+ # Move pre forward hooks of the base module to resulting fused module
61
+ for pre_hook_fn in mod_list[0]._forward_pre_hooks.values():
62
+ fused.register_forward_pre_hook(pre_hook_fn)
63
+ mod_list[0]._forward_pre_hooks.clear()
64
+ # Move post forward hooks of the last module to resulting fused module
65
+ for hook_fn in mod_list[-1]._forward_hooks.values():
66
+ fused.register_forward_hook(hook_fn)
67
+ mod_list[-1]._forward_hooks.clear()
68
+ new_mod[0] = fused
69
+
70
+ for i in range(1, len(mod_list)):
71
+ identity = nn.Identity()
72
+ identity.training = mod_list[0].training
73
+ new_mod[i] = identity
74
+
75
+ return new_mod
76
+
77
+ def _fuse_modules_helper(model, modules_to_fuse, is_qat, fuser_func=fuse_known_modules, fuse_custom_config_dict=None):
78
+ if fuse_custom_config_dict is None:
79
+ fuse_custom_config_dict = {}
80
+ additional_fuser_method_mapping = fuse_custom_config_dict.get("additional_fuser_method_mapping", {})
81
+ mod_list = []
82
+ for item in modules_to_fuse:
83
+ mod_list.append(_get_module(model, item))
84
+
85
+ # Fuse list of modules
86
+ new_mod_list = fuser_func(mod_list, is_qat, additional_fuser_method_mapping)
87
+
88
+ # Replace original module list with fused module list
89
+ for i, item in enumerate(modules_to_fuse):
90
+ _set_module(model, item, new_mod_list[i])
91
+
92
+ def _fuse_modules(model, modules_to_fuse, is_qat, inplace=False, fuser_func=fuse_known_modules, fuse_custom_config_dict=None):
93
+ if not inplace:
94
+ model = copy.deepcopy(model)
95
+
96
+ if all(isinstance(module_element, str) for module_element in modules_to_fuse):
97
+ # Handle case of modules_to_fuse being a list
98
+ _fuse_modules_helper(model, modules_to_fuse, is_qat, fuser_func, fuse_custom_config_dict)
99
+ else:
100
+ # Handle case of modules_to_fuse being a list of lists
101
+ for module_list in modules_to_fuse:
102
+ _fuse_modules_helper(model, module_list, is_qat, fuser_func, fuse_custom_config_dict)
103
+ return model
104
+
105
+ def fuse_modules(model, modules_to_fuse, inplace=False, fuser_func=fuse_known_modules, fuse_custom_config_dict=None):
106
+ r"""Fuse a list of modules into a single module.
107
+
108
+ Fuses only the following sequence of modules:
109
+ conv, bn
110
+ conv, bn, relu
111
+ conv, relu
112
+ linear, relu
113
+ bn, relu
114
+ All other sequences are left unchanged.
115
+ For these sequences, replaces the first item in the list
116
+ with the fused module, replacing the rest of the modules
117
+ with identity.
118
+
119
+ Args:
120
+ model: Model containing the modules to be fused
121
+ modules_to_fuse: list of list of module names to fuse. Can also be a list
122
+ of strings if there is only a single list of modules to fuse.
123
+ inplace: bool specifying if fusion happens in place on the model, by default
124
+ a new model is returned
125
+ fuser_func: Function that takes in a list of modules and outputs a list of fused modules
126
+ of the same length. For example,
127
+ fuser_func([convModule, BNModule]) returns the list [ConvBNModule, nn.Identity()]
128
+ Defaults to torch.ao.quantization.fuse_known_modules
129
+ `fuse_custom_config_dict`: custom configuration for fusion
130
+
131
+ .. code-block:: python
132
+
133
+ # Example of fuse_custom_config_dict
134
+ fuse_custom_config_dict = {
135
+ # Additional fuser_method mapping
136
+ "additional_fuser_method_mapping": {
137
+ (torch.nn.Conv2d, torch.nn.BatchNorm2d): fuse_conv_bn
138
+ },
139
+ }
140
+
141
+ Returns:
142
+ model with fused modules. A new copy is created if inplace=True.
143
+
144
+ Examples::
145
+
146
+ >>> # xdoctest: +SKIP
147
+ >>> m = M().eval()
148
+ >>> # m is a module containing the sub-modules below
149
+ >>> modules_to_fuse = [ ['conv1', 'bn1', 'relu1'], ['submodule.conv', 'submodule.relu']]
150
+ >>> fused_m = torch.ao.quantization.fuse_modules(m, modules_to_fuse)
151
+ >>> output = fused_m(input)
152
+
153
+ >>> m = M().eval()
154
+ >>> # Alternately provide a single list of modules to fuse
155
+ >>> modules_to_fuse = ['conv1', 'bn1', 'relu1']
156
+ >>> fused_m = torch.ao.quantization.fuse_modules(m, modules_to_fuse)
157
+ >>> output = fused_m(input)
158
+
159
+ """
160
+ return _fuse_modules(
161
+ model,
162
+ modules_to_fuse,
163
+ is_qat=False,
164
+ inplace=inplace,
165
+ fuser_func=fuser_func,
166
+ fuse_custom_config_dict=fuse_custom_config_dict)
167
+
168
+ def fuse_modules_qat(model, modules_to_fuse, inplace=False, fuser_func=fuse_known_modules, fuse_custom_config_dict=None):
169
+ """QAT version for `fuse_modules`."""
170
+ return _fuse_modules(
171
+ model,
172
+ modules_to_fuse,
173
+ is_qat=True,
174
+ inplace=inplace,
175
+ fuser_func=fuser_func,
176
+ fuse_custom_config_dict=fuse_custom_config_dict)
parrot/lib/python3.10/site-packages/torch/ao/quantization/qconfig.py ADDED
@@ -0,0 +1,569 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from collections import namedtuple
3
+ from typing import Optional, Any, Union, Type
4
+ from typing_extensions import deprecated
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ from torch.ao.quantization.fake_quantize import (
9
+ FakeQuantize,
10
+ FakeQuantizeBase,
11
+ default_fake_quant,
12
+ default_dynamic_fake_quant,
13
+ default_per_channel_weight_fake_quant,
14
+ default_weight_fake_quant,
15
+ default_fused_act_fake_quant,
16
+ default_fused_wt_fake_quant,
17
+ FusedMovingAvgObsFakeQuantize,
18
+ default_fused_per_channel_wt_fake_quant,
19
+ default_embedding_fake_quant,
20
+ default_embedding_fake_quant_4bit,
21
+ fused_wt_fake_quant_range_neg_127_to_127,
22
+ fused_per_channel_wt_fake_quant_range_neg_127_to_127,
23
+ )
24
+
25
+ from .observer import (
26
+ _PartialWrapper,
27
+ MinMaxObserver,
28
+ HistogramObserver,
29
+ MovingAverageMinMaxObserver,
30
+ NoopObserver,
31
+ PlaceholderObserver,
32
+ ReuseInputObserver,
33
+ default_debug_observer,
34
+ default_dynamic_quant_observer,
35
+ default_float_qparams_observer,
36
+ default_float_qparams_observer_4bit,
37
+ default_observer,
38
+ default_per_channel_weight_observer,
39
+ default_placeholder_observer,
40
+ default_weight_observer,
41
+ weight_observer_range_neg_127_to_127,
42
+ per_channel_weight_observer_range_neg_127_to_127,
43
+ default_reuse_input_observer,
44
+ ObserverBase,
45
+ )
46
+ import warnings
47
+ import copy
48
+
49
+ __all__ = [
50
+ "QConfig",
51
+ # TODO: deprecated, remove
52
+ "QConfigDynamic",
53
+ "default_qconfig",
54
+ "default_debug_qconfig",
55
+ "default_per_channel_qconfig",
56
+ "default_dynamic_qconfig",
57
+ "float16_dynamic_qconfig",
58
+ "float16_static_qconfig",
59
+ "per_channel_dynamic_qconfig",
60
+ "float_qparams_weight_only_qconfig",
61
+ "float_qparams_weight_only_qconfig_4bit",
62
+ "default_quint8_weight_qconfig",
63
+ "default_qat_qconfig",
64
+ "default_dynamic_qat_qconfig",
65
+ "default_weight_only_qconfig",
66
+ "default_activation_only_qconfig",
67
+ "default_qat_qconfig_v2",
68
+ "default_reuse_input_qconfig",
69
+ "default_symmetric_qnnpack_qconfig",
70
+ "default_per_channel_symmetric_qnnpack_qconfig",
71
+ "default_symmetric_qnnpack_qat_qconfig",
72
+ "default_per_channel_symmetric_qnnpack_qat_qconfig",
73
+ "default_embedding_qat_qconfig",
74
+ "default_embedding_qat_qconfig_4bit",
75
+ "get_default_qconfig",
76
+ "get_default_qat_qconfig",
77
+ "get_default_qconfig_dict",
78
+ "get_default_qat_qconfig_dict",
79
+ "QConfigAny",
80
+ "qconfig_equals",
81
+
82
+ ]
83
+
84
+ class QConfig(namedtuple('QConfig', ['activation', 'weight'])):
85
+ """
86
+ Describes how to quantize a layer or a part of the network by providing
87
+ settings (observer classes) for activations and weights respectively.
88
+
89
+
90
+ Note that QConfig needs to contain observer **classes** (like MinMaxObserver) or a callable that returns
91
+ instances on invocation, not the concrete observer instances themselves.
92
+ Quantization preparation function will instantiate observers multiple times for each of the layers.
93
+
94
+
95
+ Observer classes have usually reasonable default arguments, but they can be overwritten with `with_args`
96
+ method (that behaves like functools.partial)::
97
+
98
+ my_qconfig = QConfig(
99
+ activation=MinMaxObserver.with_args(dtype=torch.qint8),
100
+ weight=default_observer.with_args(dtype=torch.qint8))
101
+
102
+ """
103
+ def __new__(cls, activation, weight):
104
+ # catch common mistakes
105
+ if isinstance(activation, nn.Module) or isinstance(weight, nn.Module):
106
+ raise ValueError("QConfig received observer instance, please pass observer class instead. " +
107
+ "Use MyObserver.with_args(x=1) to override arguments to constructor if needed")
108
+ return super().__new__(cls, activation, weight)
109
+
110
+
111
+ @deprecated(
112
+ "`QConfigDynamic` is going to be deprecated in PyTorch 1.12, please use `QConfig` instead",
113
+ category=FutureWarning,
114
+ )
115
+ class QConfigDynamic(namedtuple('QConfigDynamic', ['activation', 'weight'])):
116
+ """
117
+ Describes how to dynamically quantize a layer or a part of the network by providing
118
+ settings (observer classes) for weights.
119
+
120
+ It's like QConfig, but for dynamic quantization.
121
+
122
+ Note that QConfigDynamic needs to contain observer **classes** (like MinMaxObserver) or a callable that returns
123
+ instances on invocation, not the concrete observer instances themselves.
124
+ Quantization function will instantiate observers multiple times for each of the layers.
125
+
126
+ Observer classes have usually reasonable default arguments, but they can be overwritten with `with_args`
127
+ method (that behaves like functools.partial)::
128
+
129
+ my_qconfig = QConfigDynamic(weight=default_observer.with_args(dtype=torch.qint8))
130
+ """
131
+ def __new__(cls, activation=torch.nn.Identity, weight=torch.nn.Identity):
132
+ # catch common mistakes
133
+ if isinstance(weight, nn.Module):
134
+ raise ValueError("QConfigDynamic received observer instance, please pass observer class instead. " +
135
+ "Use MyObserver.with_args(x=1) to override arguments to constructor if needed")
136
+ return super().__new__(cls, activation, weight)
137
+
138
+
139
+ default_qconfig = QConfig(activation=default_observer,
140
+ weight=default_weight_observer)
141
+ """
142
+ Default qconfig configuration.
143
+ """
144
+
145
+ default_debug_qconfig = QConfig(weight=default_weight_observer,
146
+ activation=default_debug_observer)
147
+ """
148
+ Default qconfig configuration for debugging.
149
+ """
150
+
151
+ default_per_channel_qconfig = QConfig(activation=default_observer,
152
+ weight=default_per_channel_weight_observer)
153
+ """
154
+ Default qconfig configuration for per channel weight quantization.
155
+ """
156
+
157
+ default_dynamic_qconfig = QConfig(activation=default_dynamic_quant_observer,
158
+ weight=default_weight_observer)
159
+ """
160
+ Default dynamic qconfig.
161
+ """
162
+
163
+ float16_dynamic_qconfig = QConfig(activation=PlaceholderObserver.with_args(dtype=torch.float16, is_dynamic=True),
164
+ weight=PlaceholderObserver.with_args(dtype=torch.float16))
165
+ """
166
+ Dynamic qconfig with weights quantized to `torch.float16`.
167
+ """
168
+
169
+ float16_static_qconfig = QConfig(activation=PlaceholderObserver.with_args(dtype=torch.float16),
170
+ weight=PlaceholderObserver.with_args(dtype=torch.float16))
171
+ """
172
+ Dynamic qconfig with both activations and weights quantized to `torch.float16`.
173
+ """
174
+
175
+ per_channel_dynamic_qconfig = QConfig(activation=default_dynamic_quant_observer,
176
+ weight=default_per_channel_weight_observer)
177
+ """
178
+ Dynamic qconfig with weights quantized per channel.
179
+ """
180
+
181
+ float_qparams_weight_only_qconfig = QConfig(
182
+ activation=default_placeholder_observer,
183
+ weight=default_float_qparams_observer)
184
+ """
185
+ Dynamic qconfig with weights quantized with a floating point zero_point.
186
+ """
187
+
188
+ float_qparams_weight_only_qconfig_4bit = QConfig(
189
+ activation=default_placeholder_observer,
190
+ weight=default_float_qparams_observer_4bit)
191
+
192
+ default_qat_qconfig = QConfig(activation=default_fake_quant,
193
+ weight=default_weight_fake_quant)
194
+ """
195
+ Default qconfig for QAT.
196
+ """
197
+
198
+ default_dynamic_qat_qconfig = QConfig(activation=default_dynamic_fake_quant,
199
+ weight=default_weight_fake_quant)
200
+ """
201
+ Default qconfig for dynamic QAT.
202
+ """
203
+
204
+ default_weight_only_qconfig = QConfig(activation=torch.nn.Identity,
205
+ weight=default_weight_fake_quant)
206
+ """
207
+ Default qconfig for quantizing weights only.
208
+ """
209
+
210
+ default_activation_only_qconfig = QConfig(activation=default_fake_quant,
211
+ weight=torch.nn.Identity)
212
+ """
213
+ Default qconfig for quantizing activations only.
214
+ """
215
+
216
+ # QAT config that uses a fused observer + fake quant modules for optimized training performance.
217
+ # to modify the activation/weight observers, the default entries in fake_quantize.py can be modified.
218
+ default_qat_qconfig_v2 = QConfig(activation=default_fused_act_fake_quant, weight=default_fused_wt_fake_quant)
219
+ """
220
+ Fused version of `default_qat_config`, has performance benefits.
221
+ """
222
+
223
+ default_reuse_input_qconfig = QConfig(activation=default_reuse_input_observer,
224
+ weight=NoopObserver)
225
+ """
226
+ Default qconfig for operators that reuse the observers from input Tensor, e.g. reshape
227
+ """
228
+
229
+ def get_default_qconfig(backend='x86', version=0):
230
+ """
231
+ Returns the default PTQ qconfig for the specified backend.
232
+
233
+ Args:
234
+ * `backend` (str): a string representing the target backend. Currently supports
235
+ `x86` (default), `fbgemm`, `qnnpack` and `onednn`.
236
+
237
+ Return:
238
+ qconfig
239
+ """
240
+ supported_backends = ["fbgemm", "x86", "qnnpack", "onednn"]
241
+ if backend not in supported_backends:
242
+ raise AssertionError(
243
+ "backend: " + str(backend) +
244
+ f" not supported. backend must be one of {supported_backends}"
245
+ )
246
+
247
+ if version == 0:
248
+ if backend == 'fbgemm':
249
+ qconfig = QConfig(activation=HistogramObserver.with_args(reduce_range=True),
250
+ weight=default_per_channel_weight_observer)
251
+ elif backend == 'qnnpack':
252
+ # TODO: make this compatible with xnnpack constraints
253
+ qconfig = QConfig(activation=HistogramObserver.with_args(reduce_range=False),
254
+ weight=default_weight_observer)
255
+ elif backend == 'onednn':
256
+ if not torch.cpu._is_cpu_support_vnni():
257
+ warnings.warn(
258
+ "Default qconfig of oneDNN backend with reduce_range of false may have accuracy issues "
259
+ "on CPU without Vector Neural Network Instruction support.")
260
+ qconfig = QConfig(activation=HistogramObserver.with_args(reduce_range=False),
261
+ weight=default_per_channel_weight_observer)
262
+ elif backend == 'x86':
263
+ qconfig = QConfig(activation=HistogramObserver.with_args(reduce_range=True),
264
+ weight=default_per_channel_weight_observer)
265
+ else:
266
+ # won't reach
267
+ qconfig = default_qconfig
268
+ else:
269
+ raise AssertionError("Version number: " + str(version) +
270
+ " in get_default_qconfig is not supported. Version number must be 0")
271
+
272
+ return qconfig
273
+
274
+ """
275
+ Default, symmetric PTQ qconfig for the specified backend. And a per_channel
276
+ variant of the same.
277
+
278
+ Symmetric here applies to signed weights with zero point = 0, and additional
279
+ value restrictions. The activations are also signed 8-bit integers with this
280
+ qconfig.
281
+
282
+ * Once this change is merged [as of 3/17/22], with backend or qengine =
283
+ 'qnnpack', some quantized operators with this symmetric qconfig may use
284
+ operators from xnnpack library.
285
+
286
+ ** Support to use xnnpack ops with `qnnpack` backed for asymmetric
287
+ qconfig (returned by get_default_qconfig()) is not available yet.
288
+
289
+ * This qconfig uses signed activations and weights. Weights have added
290
+ restrictions such as zero point is forced to be 0, making the weights
291
+ symmetric, hence the name. And the 8-bit quantized values are
292
+ restricting to to [-127, +127], excluding -128.
293
+
294
+ * xnnpack has a requantization scale value restriction, 0x1p-32 <=
295
+ requantization_scale < 256.0 where, `requantization_scale = (input_scale
296
+ * kernel_scale) / (output_scale)`. Using this eps (w/ assumed max value
297
+ of 256) is to prevent requantization_scale to go below xnnpack lower
298
+ threshold.
299
+ """
300
+ default_symmetric_qnnpack_qconfig = QConfig(activation=HistogramObserver.with_args(dtype=torch.qint8,
301
+ reduce_range=False,
302
+ eps=2 ** -12),
303
+ weight=weight_observer_range_neg_127_to_127)
304
+
305
+ default_per_channel_symmetric_qnnpack_qconfig = QConfig(activation=HistogramObserver.with_args(dtype=torch.qint8,
306
+ reduce_range=False,
307
+ eps=2 ** -12),
308
+ weight=per_channel_weight_observer_range_neg_127_to_127)
309
+
310
+ default_embedding_qat_qconfig = QConfig(activation=NoopObserver.with_args(dtype=torch.float32),
311
+ weight=default_embedding_fake_quant)
312
+
313
+ default_embedding_qat_qconfig_4bit = QConfig(activation=NoopObserver.with_args(dtype=torch.float32),
314
+ weight=default_embedding_fake_quant_4bit)
315
+
316
+ default_quint8_weight_qconfig = QConfig(activation=HistogramObserver, weight=MinMaxObserver)
317
+
318
+ def get_default_qat_qconfig(backend='x86', version=1):
319
+ """
320
+ Returns the default QAT qconfig for the specified backend.
321
+
322
+ Args:
323
+ * `backend` (str): a string representing the target backend. Currently supports
324
+ `x86` (default), `fbgemm`, `qnnpack` and `onednn`.
325
+ * `version`: version, for backwards compatibility. Can be `None` or `1`.
326
+
327
+ Return:
328
+ qconfig
329
+ """
330
+ supported_backends = ["fbgemm", "x86", "qnnpack", "onednn"]
331
+ if backend not in supported_backends:
332
+ raise AssertionError(
333
+ "backend: " + str(backend) +
334
+ f" not supported. backend must be one of {supported_backends}"
335
+ )
336
+
337
+ # Histogram observer is too slow for quantization aware training
338
+ if version == 0:
339
+ if backend == 'fbgemm':
340
+ qconfig = QConfig(activation=FakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
341
+ quant_min=0,
342
+ quant_max=255,
343
+ reduce_range=True),
344
+ weight=default_per_channel_weight_fake_quant)
345
+ elif backend == 'qnnpack':
346
+ qconfig = QConfig(activation=FakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
347
+ quant_min=0,
348
+ quant_max=255,
349
+ reduce_range=False),
350
+ weight=default_weight_fake_quant)
351
+ elif backend == 'onednn':
352
+ qconfig = QConfig(activation=FakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
353
+ quant_min=0,
354
+ quant_max=255),
355
+ weight=default_per_channel_weight_fake_quant)
356
+ elif backend == 'x86':
357
+ qconfig = QConfig(activation=FakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
358
+ quant_min=0,
359
+ quant_max=255,
360
+ reduce_range=True),
361
+ weight=default_per_channel_weight_fake_quant)
362
+ else:
363
+ qconfig = default_qat_qconfig
364
+ # Use the fused observe + fake_quant modules for doing QAT.
365
+ elif version == 1:
366
+ if backend == 'fbgemm':
367
+ qconfig = QConfig(activation=FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
368
+ quant_min=0,
369
+ quant_max=255,
370
+ reduce_range=True),
371
+ weight=default_fused_per_channel_wt_fake_quant)
372
+ elif backend == 'qnnpack':
373
+ # TODO: make this compatible with xnnpack constraints
374
+ qconfig = QConfig(activation=FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
375
+ quant_min=0,
376
+ quant_max=255,
377
+ reduce_range=False),
378
+ weight=default_fused_wt_fake_quant)
379
+ elif backend == 'onednn':
380
+ qconfig = QConfig(activation=FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
381
+ quant_min=0,
382
+ quant_max=255),
383
+ weight=default_fused_per_channel_wt_fake_quant)
384
+ elif backend == 'x86':
385
+ qconfig = QConfig(activation=FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
386
+ quant_min=0,
387
+ quant_max=255,
388
+ reduce_range=True),
389
+ weight=default_fused_per_channel_wt_fake_quant)
390
+ else:
391
+ qconfig = default_qat_qconfig_v2
392
+ else:
393
+ raise AssertionError("Version number: " + str(version) +
394
+ "in get_default_qat_qconfig is not supported. Version number must be 0 or 1")
395
+
396
+ return qconfig
397
+
398
+ """
399
+ Default symmetric QAT qconfig for qnnpack. And its per channel weight variant.
400
+ """
401
+ default_symmetric_qnnpack_qat_qconfig = QConfig(
402
+ activation=FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
403
+ quant_min=-128,
404
+ quant_max=127,
405
+ dtype=torch.qint8,
406
+ reduce_range=False,
407
+ eps=2 ** -12),
408
+ weight=fused_wt_fake_quant_range_neg_127_to_127)
409
+
410
+ default_per_channel_symmetric_qnnpack_qat_qconfig = QConfig(
411
+ activation=FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
412
+ quant_min=-128,
413
+ quant_max=127,
414
+ dtype=torch.qint8,
415
+ reduce_range=False,
416
+ eps=2 ** -12),
417
+ weight=fused_per_channel_wt_fake_quant_range_neg_127_to_127)
418
+
419
+ _default_fp32_placeholder_qconfig = QConfig(
420
+ activation=PlaceholderObserver.with_args(dtype=torch.float32),
421
+ weight=PlaceholderObserver.with_args(dtype=torch.float32)
422
+ )
423
+
424
+ _default_quint8_placeholder_qconfig = QConfig(
425
+ activation=PlaceholderObserver.with_args(dtype=torch.quint8),
426
+ # operators using this qconfig doesn't have weights
427
+ weight=None,
428
+ )
429
+
430
+ @deprecated(
431
+ "`torch.ao.quantization.get_default_qconfig_dict` is deprecated and will be removed in "
432
+ "a future version. Please use `torch.ao.quantization.get_default_qconfig_mapping` instead.",
433
+ category=FutureWarning,
434
+ )
435
+ def get_default_qconfig_dict(backend='x86', version=0):
436
+ return torch.ao.quantization.get_default_qconfig_mapping(backend, version).to_dict()
437
+
438
+ @deprecated(
439
+ "`torch.ao.quantization.get_default_qat_qconfig_dict` is deprecated and will be removed in "
440
+ "a future version. Please use `torch.ao.quantization.get_default_qat_qconfig_mapping` instead.",
441
+ category=FutureWarning,
442
+ )
443
+ def get_default_qat_qconfig_dict(backend='x86', version=1):
444
+ return torch.ao.quantization.get_default_qat_qconfig_mapping(backend, version).to_dict()
445
+
446
+ def _assert_valid_qconfig(qconfig: Optional[QConfig],
447
+ mod: torch.nn.Module) -> None:
448
+ """
449
+ Verifies that this `qconfig` is valid.
450
+ """
451
+ if qconfig is None:
452
+ return
453
+ is_conv_transpose_mod = (
454
+ isinstance(mod, (torch.nn.ConvTranspose1d, torch.nn.ConvTranspose2d, torch.nn.ConvTranspose3d)))
455
+ if is_conv_transpose_mod:
456
+ if qconfig.weight is None:
457
+ # for now, we assume that any qconfig for ConvTranspose without a weight is valid
458
+ return
459
+ example_observer = qconfig.weight()
460
+ is_per_channel = (
461
+ isinstance(example_observer, (torch.ao.quantization.PerChannelMinMaxObserver,
462
+ torch.ao.quantization.MovingAveragePerChannelMinMaxObserver))
463
+ )
464
+ assert not is_per_channel, \
465
+ 'Per channel weight observer is not supported yet for ConvTranspose{n}d.'
466
+
467
+ QConfigAny = Optional[QConfig]
468
+ QConfigAny.__module__ = "torch.ao.quantization.qconfig"
469
+
470
+ def _add_module_to_qconfig_obs_ctr(
471
+ qconfig: QConfigAny,
472
+ module: Optional[nn.Module]) -> Any:
473
+ r"""This is a helper function for use in quantization prepare that updates a qconfig so that
474
+ the constructors stored in the qconfig will create observers on the same device that
475
+ 'module' is on. This is intended to be used when the qconfigs are propagated to each
476
+ module in order to avoid potential device alignment issues.
477
+
478
+ Args:
479
+ qconfig: QConfig with obs constructors stored in activation and weight
480
+ module: module which the qconfig is related to
481
+
482
+ Return:
483
+ qconfig: configured so that obs constructors set to construct on the same device as module
484
+ """
485
+
486
+ if module is None or qconfig is None or qconfig._fields != ('activation', 'weight'):
487
+ return qconfig
488
+
489
+ def get_factory_kwargs_based_on_module_device():
490
+ assert isinstance(module, torch.nn.Module)
491
+ devices = {p.device for p in module.parameters()} | \
492
+ {p.device for p in module.buffers()}
493
+ device = next(iter(devices)) if len(devices) > 0 else None
494
+ return None if device is None else {'device': device}
495
+
496
+ def configure_constructor_to_put_obs_on_module_device(original_constructor):
497
+ try:
498
+ # check if constructor can accept factory_kwargs
499
+ check = original_constructor.with_args(factory_kwargs=None)
500
+ check()
501
+ return original_constructor.with_callable_args(factory_kwargs=get_factory_kwargs_based_on_module_device)
502
+ except AttributeError: # qconfig doesn't have activation or weight
503
+ return original_constructor
504
+ except TypeError: # the class doesn't accept factory_kwargs argument
505
+ return original_constructor
506
+
507
+ activation = configure_constructor_to_put_obs_on_module_device(qconfig.activation)
508
+ weight = configure_constructor_to_put_obs_on_module_device(qconfig.weight)
509
+
510
+ return QConfig(activation, weight)
511
+
512
+ _ObserverOrFakeQuantizeConstructor = Union[_PartialWrapper, Type[ObserverBase], Type[FakeQuantizeBase]]
513
+
514
+ def _obs_or_fq_ctr_equals(obs_or_fq1: _ObserverOrFakeQuantizeConstructor, obs_or_fq2: _ObserverOrFakeQuantizeConstructor):
515
+ if isinstance(obs_or_fq1, _PartialWrapper) and isinstance(obs_or_fq2, _PartialWrapper):
516
+ return _partial_wrapper_equals(obs_or_fq1, obs_or_fq2)
517
+ return obs_or_fq1 == obs_or_fq2
518
+
519
+ def _partial_wrapper_equals(obs_or_fq1: _PartialWrapper, obs_or_fq2: _PartialWrapper):
520
+ """
521
+ Return whether the two partial wrappers are equal,
522
+ """
523
+ # functools.partial has no __eq__ operator defined so '==' defaults to 'is'
524
+ obs_or_fq1_keywords = copy.copy(obs_or_fq1.p.keywords)
525
+ obs_or_fq2_keywords = copy.copy(obs_or_fq2.p.keywords)
526
+ keywords_equal = True
527
+ # compare observer constructor with _obs_or_fq_ctr_equals since direct compare would fail
528
+ if "observer" in obs_or_fq1_keywords and "observer" in obs_or_fq2_keywords:
529
+ keywords_equal = keywords_equal and _obs_or_fq_ctr_equals(obs_or_fq1_keywords["observer"], obs_or_fq2_keywords["observer"])
530
+ obs_or_fq1_keywords.pop("observer")
531
+ obs_or_fq2_keywords.pop("observer")
532
+ keywords_equal = keywords_equal and obs_or_fq1_keywords == obs_or_fq2_keywords
533
+ return obs_or_fq1.p.func == obs_or_fq2.p.func and obs_or_fq1.p.args == obs_or_fq2.p.args and keywords_equal
534
+
535
+ def qconfig_equals(q1: QConfigAny, q2: QConfigAny):
536
+ """
537
+ Returns `True` if `q1` equals `q2`, and `False` otherwise.
538
+ """
539
+ if q1 is None or q2 is None:
540
+ return q1 == q2
541
+ else:
542
+ assert q1 is not None and q2 is not None
543
+ try:
544
+ # Qconfig weight and activation can be either a partial wrapper,
545
+ # or an observer class. Special handling is required (above) for
546
+ # comparing partial wrappers.
547
+ activation_same = _obs_or_fq_ctr_equals(q1.activation, q2.activation)
548
+ weight_same = _obs_or_fq_ctr_equals(q1.weight, q2.weight)
549
+ return activation_same and weight_same
550
+ except AttributeError:
551
+ return q1 == q2
552
+
553
+ def _activation_is_memoryless(qconfig: QConfig):
554
+ """
555
+ Return whether the observer for activations defined in the given QConfig is memoryless.
556
+ This means a MovingAverage observer with averaging constant equal to 1.
557
+ """
558
+ def _is_memoryless(observer):
559
+ return hasattr(observer, "averaging_constant") and observer.averaging_constant == 1
560
+ act = qconfig.activation()
561
+ if isinstance(act, FakeQuantizeBase) and hasattr(act, "activation_post_process"):
562
+ return _is_memoryless(act.activation_post_process)
563
+ else:
564
+ return _is_memoryless(act)
565
+
566
+ def _is_reuse_input_qconfig(qconfig: Optional[QConfig]):
567
+ return qconfig is not None and \
568
+ isinstance(qconfig.activation(), ReuseInputObserver) and \
569
+ isinstance(qconfig.weight(), NoopObserver)
parrot/lib/python3.10/site-packages/torch/ao/quantization/stubs.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ from torch import nn
4
+
5
+ class QuantStub(nn.Module):
6
+ r"""Quantize stub module, before calibration, this is same as an observer,
7
+ it will be swapped as `nnq.Quantize` in `convert`.
8
+
9
+ Args:
10
+ qconfig: quantization configuration for the tensor,
11
+ if qconfig is not provided, we will get qconfig from parent modules
12
+ """
13
+ def __init__(self, qconfig=None):
14
+ super().__init__()
15
+ if qconfig:
16
+ self.qconfig = qconfig
17
+
18
+ def forward(self, x):
19
+ return x
20
+
21
+
22
+ class DeQuantStub(nn.Module):
23
+ r"""Dequantize stub module, before calibration, this is same as identity,
24
+ this will be swapped as `nnq.DeQuantize` in `convert`.
25
+
26
+ Args:
27
+ qconfig: quantization configuration for the tensor,
28
+ if qconfig is not provided, we will get qconfig from parent modules
29
+ """
30
+ def __init__(self, qconfig=None):
31
+ super().__init__()
32
+ if qconfig:
33
+ self.qconfig = qconfig
34
+
35
+ def forward(self, x):
36
+ return x
37
+
38
+
39
+ class QuantWrapper(nn.Module):
40
+ r"""A wrapper class that wraps the input module, adds QuantStub and
41
+ DeQuantStub and surround the call to module with call to quant and dequant
42
+ modules.
43
+
44
+ This is used by the `quantization` utility functions to add the quant and
45
+ dequant modules, before `convert` function `QuantStub` will just be observer,
46
+ it observes the input tensor, after `convert`, `QuantStub`
47
+ will be swapped to `nnq.Quantize` which does actual quantization. Similarly
48
+ for `DeQuantStub`.
49
+ """
50
+ quant: QuantStub
51
+ dequant: DeQuantStub
52
+ module: nn.Module
53
+
54
+ def __init__(self, module):
55
+ super().__init__()
56
+ qconfig = getattr(module, "qconfig", None)
57
+ self.add_module('quant', QuantStub(qconfig))
58
+ self.add_module('dequant', DeQuantStub(qconfig))
59
+ self.add_module('module', module)
60
+ self.train(module.training)
61
+
62
+ def forward(self, X):
63
+ X = self.quant(X)
64
+ X = self.module(X)
65
+ return self.dequant(X)
videochat2/lib/python3.10/site-packages/tensorflow/python/framework/__pycache__/ops.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2093b0830151beaa118c5b910280286ea165a391c3f4ff76f0e28f40e485a0e
3
+ size 190757
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/array_ops_stack.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ # Tests for this file live in python/kernel_tests/array_ops_test.py
16
+ """Operations to stack and unstack tensors."""
17
+
18
+ from tensorflow.python.framework import ops
19
+ from tensorflow.python.ops import gen_array_ops
20
+ from tensorflow.python.util import dispatch
21
+ from tensorflow.python.util.tf_export import tf_export
22
+
23
+
24
+ @tf_export("stack")
25
+ @dispatch.add_dispatch_support
26
+ def stack(values, axis=0, name="stack"):
27
+ """Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.
28
+
29
+ See also `tf.concat`, `tf.tile`, `tf.repeat`.
30
+
31
+ Packs the list of tensors in `values` into a tensor with rank one higher than
32
+ each tensor in `values`, by packing them along the `axis` dimension.
33
+ Given a list of length `N` of tensors of shape `(A, B, C)`;
34
+
35
+ if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
36
+ if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
37
+ Etc.
38
+
39
+ For example:
40
+
41
+ >>> x = tf.constant([1, 4])
42
+ >>> y = tf.constant([2, 5])
43
+ >>> z = tf.constant([3, 6])
44
+ >>> tf.stack([x, y, z])
45
+ <tf.Tensor: shape=(3, 2), dtype=int32, numpy=
46
+ array([[1, 4],
47
+ [2, 5],
48
+ [3, 6]], dtype=int32)>
49
+ >>> tf.stack([x, y, z], axis=1)
50
+ <tf.Tensor: shape=(2, 3), dtype=int32, numpy=
51
+ array([[1, 2, 3],
52
+ [4, 5, 6]], dtype=int32)>
53
+
54
+ This is the opposite of unstack. The numpy equivalent is `np.stack`
55
+
56
+ >>> np.array_equal(np.stack([x, y, z]), tf.stack([x, y, z]))
57
+ True
58
+
59
+ Args:
60
+ values: A list of `Tensor` objects with the same shape and type.
61
+ axis: An `int`. The axis to stack along. Defaults to the first dimension.
62
+ Negative values wrap around, so the valid range is `[-(R+1), R+1)`.
63
+ name: A name for this operation (optional).
64
+
65
+ Returns:
66
+ output: A stacked `Tensor` with the same type as `values`.
67
+
68
+ Raises:
69
+ ValueError: If `axis` is out of the range [-(R+1), R+1).
70
+ """
71
+ if axis == 0:
72
+ try:
73
+ # If the input is a constant list, it can be converted to a constant op
74
+ return ops.convert_to_tensor(values, name=name)
75
+ except (TypeError, ValueError, NotImplementedError):
76
+ pass # Input list contains non-constant tensors
77
+
78
+ value_shape = ops.convert_to_tensor(values[0], name=name)._shape_tuple() # pylint: disable=protected-access
79
+ if value_shape is not None:
80
+ expanded_num_dims = len(value_shape) + 1
81
+ if axis < -expanded_num_dims or axis >= expanded_num_dims:
82
+ raise ValueError(f"Argument `axis` = {axis} not in range "
83
+ f"[{-expanded_num_dims}, {expanded_num_dims})")
84
+
85
+ return gen_array_ops.pack(values, axis=axis, name=name)
86
+
87
+
88
+ @tf_export("unstack")
89
+ @dispatch.add_dispatch_support
90
+ def unstack(value, num=None, axis=0, name="unstack"):
91
+ """Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
92
+
93
+ Unpacks tensors from `value` by chipping it along the `axis` dimension.
94
+
95
+ >>> x = tf.reshape(tf.range(12), (3,4))
96
+ >>>
97
+ >>> p, q, r = tf.unstack(x)
98
+ >>> p.shape.as_list()
99
+ [4]
100
+
101
+ >>> i, j, k, l = tf.unstack(x, axis=1)
102
+ >>> i.shape.as_list()
103
+ [3]
104
+
105
+ This is the opposite of stack.
106
+
107
+ >>> x = tf.stack([i, j, k, l], axis=1)
108
+
109
+ More generally if you have a tensor of shape `(A, B, C, D)`:
110
+
111
+ >>> A, B, C, D = [2, 3, 4, 5]
112
+ >>> t = tf.random.normal(shape=[A, B, C, D])
113
+
114
+ The number of tensor returned is equal to the length of the target `axis`:
115
+
116
+ >>> axis = 2
117
+ >>> items = tf.unstack(t, axis=axis)
118
+ >>> len(items) == t.shape[axis]
119
+ True
120
+
121
+ The shape of each result tensor is equal to the shape of the input tensor,
122
+ with the target `axis` removed.
123
+
124
+ >>> items[0].shape.as_list() # [A, B, D]
125
+ [2, 3, 5]
126
+
127
+ The value of each tensor `items[i]` is equal to the slice of `input` across
128
+ `axis` at index `i`:
129
+
130
+ >>> for i in range(len(items)):
131
+ ... slice = t[:,:,i,:]
132
+ ... assert tf.reduce_all(slice == items[i])
133
+
134
+ #### Python iterable unpacking
135
+
136
+ With eager execution you _can_ unstack the 0th axis of a tensor using python's
137
+ iterable unpacking:
138
+
139
+ >>> t = tf.constant([1,2,3])
140
+ >>> a,b,c = t
141
+
142
+ `unstack` is still necessary because Iterable unpacking doesn't work in
143
+ a `@tf.function`: Symbolic tensors are not iterable.
144
+
145
+ You need to use `tf.unstack` here:
146
+
147
+ >>> @tf.function
148
+ ... def bad(t):
149
+ ... a,b,c = t
150
+ ... return a
151
+ >>>
152
+ >>> bad(t)
153
+ Traceback (most recent call last):
154
+ ...
155
+ OperatorNotAllowedInGraphError: ...
156
+
157
+ >>> @tf.function
158
+ ... def good(t):
159
+ ... a,b,c = tf.unstack(t)
160
+ ... return a
161
+ >>>
162
+ >>> good(t).numpy()
163
+ 1
164
+
165
+ #### Unknown shapes
166
+
167
+ Eager tensors have concrete values, so their shape is always known.
168
+ Inside a `tf.function` the symbolic tensors may have unknown shapes.
169
+ If the length of `axis` is unknown `tf.unstack` will fail because it cannot
170
+ handle an unknown number of tensors:
171
+
172
+ >>> @tf.function(input_signature=[tf.TensorSpec([None], tf.float32)])
173
+ ... def bad(t):
174
+ ... tensors = tf.unstack(t)
175
+ ... return tensors[0]
176
+ >>>
177
+ >>> bad(tf.constant([1.0, 2.0, 3.0]))
178
+ Traceback (most recent call last):
179
+ ...
180
+ ValueError: Cannot infer argument `num` from shape (None,)
181
+
182
+ If you know the `axis` length you can pass it as the `num` argument. But this
183
+ must be a constant value.
184
+
185
+ If you actually need a variable number of tensors in a single `tf.function`
186
+ trace, you will need to use exlicit loops and a `tf.TensorArray` instead.
187
+
188
+ Args:
189
+ value: A rank `R > 0` `Tensor` to be unstacked.
190
+ num: An `int`. The length of the dimension `axis`. Automatically inferred if
191
+ `None` (the default).
192
+ axis: An `int`. The axis to unstack along. Defaults to the first dimension.
193
+ Negative values wrap around, so the valid range is `[-R, R)`.
194
+ name: A name for the operation (optional).
195
+
196
+ Returns:
197
+ The list of `Tensor` objects unstacked from `value`.
198
+
199
+ Raises:
200
+ ValueError: If `axis` is out of the range `[-R, R)`.
201
+ ValueError: If `num` is unspecified and cannot be inferred.
202
+ InvalidArgumentError: If `num` does not match the shape of `value`.
203
+ """
204
+ if num is None:
205
+ value = ops.convert_to_tensor(value)
206
+ value_shape = value.get_shape()
207
+ if value_shape.ndims is not None:
208
+ if axis < -value_shape.ndims or axis >= value_shape.ndims:
209
+ raise ValueError(f"Argument `axis` = {axis} not in range "
210
+ f"[{-value_shape.ndims}, {value_shape.ndims})")
211
+ num = value_shape.dims[axis].value
212
+ if num is None:
213
+ raise ValueError(f"Cannot infer argument `num` from shape {value_shape}")
214
+ return gen_array_ops.unpack(value, num=num, axis=axis, name=name)
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/clustering_ops.py ADDED
@@ -0,0 +1,774 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Clustering Operations."""
16
+
17
+ from tensorflow.python.framework import constant_op
18
+ from tensorflow.python.framework import dtypes
19
+ from tensorflow.python.framework import ops
20
+ from tensorflow.python.framework import random_seed as random_seed_ops
21
+ from tensorflow.python.ops import array_ops
22
+ from tensorflow.python.ops import check_ops
23
+ from tensorflow.python.ops import cond
24
+ from tensorflow.python.ops import control_flow_ops
25
+ from tensorflow.python.ops import gen_clustering_ops
26
+ from tensorflow.python.ops import math_ops
27
+ from tensorflow.python.ops import nn_impl
28
+ from tensorflow.python.ops import random_ops
29
+ from tensorflow.python.ops import state_ops
30
+ from tensorflow.python.ops import variable_v1
31
+ from tensorflow.python.ops import while_loop
32
+ from tensorflow.python.ops.embedding_ops import embedding_lookup
33
+ # go/tf-wildcard-import
34
+ # pylint: disable=wildcard-import
35
+ from tensorflow.python.ops.gen_clustering_ops import *
36
+ # pylint: enable=wildcard-import
37
+
38
+ # Euclidean distance between vectors U and V is defined as \\(||U - V||_F\\)
39
+ # which is the square root of the sum of the absolute squares of the elements
40
+ # difference.
41
+ SQUARED_EUCLIDEAN_DISTANCE = 'squared_euclidean'
42
+ # Cosine distance between vectors U and V is defined as
43
+ # \\(1 - (U \dot V) / (||U||_F ||V||_F)\\)
44
+ COSINE_DISTANCE = 'cosine'
45
+
46
+ RANDOM_INIT = 'random'
47
+ KMEANS_PLUS_PLUS_INIT = 'kmeans_plus_plus'
48
+ KMC2_INIT = 'kmc2'
49
+
50
+ CLUSTERS_VAR_NAME = 'clusters'
51
+
52
+
53
+ class KMeans:
54
+ """Creates the graph for k-means clustering."""
55
+
56
+ def __init__(self,
57
+ inputs,
58
+ num_clusters,
59
+ initial_clusters=RANDOM_INIT,
60
+ distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
61
+ use_mini_batch=False,
62
+ mini_batch_steps_per_iteration=1,
63
+ random_seed=0,
64
+ kmeans_plus_plus_num_retries=2,
65
+ kmc2_chain_length=200):
66
+ """Creates an object for generating KMeans clustering graph.
67
+
68
+ This class implements the following variants of K-means algorithm:
69
+
70
+ If use_mini_batch is False, it runs standard full batch K-means. Each step
71
+ runs a single iteration of K-Means. This step can be run sharded across
72
+ multiple workers by passing a list of sharded inputs to this class. Note
73
+ however that a single step needs to process the full input at once.
74
+
75
+ If use_mini_batch is True, it runs a generalization of the mini-batch
76
+ K-means algorithm. It runs multiple iterations, where each iteration is
77
+ composed of mini_batch_steps_per_iteration steps. Two copies of cluster
78
+ centers are maintained: one that is updated at the end of each iteration,
79
+ and one that is updated every step. The first copy is used to compute
80
+ cluster allocations for each step, and for inference, while the second copy
81
+ is the one updated each step using the mini-batch update rule. After each
82
+ iteration is complete, this second copy is copied back the first copy.
83
+
84
+ Note that for use_mini_batch=True, when mini_batch_steps_per_iteration=1,
85
+ the algorithm reduces to the standard mini-batch algorithm. Also by setting
86
+ mini_batch_steps_per_iteration = num_inputs / batch_size, the algorithm
87
+ becomes an asynchronous version of the full-batch algorithm. Note however
88
+ that there is no guarantee by this implementation that each input is seen
89
+ exactly once per iteration. Also, different updates are applied
90
+ asynchronously without locking. So this asynchronous version may not behave
91
+ exactly like a full-batch version.
92
+
93
+ Args:
94
+ inputs: An input tensor or list of input tensors. It is assumed that the
95
+ data points have been previously randomly permuted.
96
+ num_clusters: An integer tensor specifying the number of clusters. This
97
+ argument is ignored if initial_clusters is a tensor or numpy array.
98
+ initial_clusters: Specifies the clusters used during initialization. One
99
+ of the following: - a tensor or numpy array with the initial cluster
100
+ centers. - a function f(inputs, k) that returns up to k centers from
101
+ `inputs`.
102
+ - "random": Choose centers randomly from `inputs`.
103
+ - "kmeans_plus_plus": Use kmeans++ to choose centers from `inputs`.
104
+ - "kmc2": Use the fast k-MC2 algorithm to choose centers from `inputs`.
105
+ In the last three cases, one batch of `inputs` may not yield
106
+ `num_clusters` centers, in which case initialization will require
107
+ multiple batches until enough centers are chosen. In the case of
108
+ "random" or "kmeans_plus_plus", if the input size is <= `num_clusters`
109
+ then the entire batch is chosen to be cluster centers.
110
+ distance_metric: Distance metric used for clustering. Supported options:
111
+ "squared_euclidean", "cosine".
112
+ use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume
113
+ full batch.
114
+ mini_batch_steps_per_iteration: Number of steps after which the updated
115
+ cluster centers are synced back to a master copy.
116
+ random_seed: Seed for PRNG used to initialize seeds.
117
+ kmeans_plus_plus_num_retries: For each point that is sampled during
118
+ kmeans++ initialization, this parameter specifies the number of
119
+ additional points to draw from the current distribution before selecting
120
+ the best. If a negative value is specified, a heuristic is used to
121
+ sample O(log(num_to_sample)) additional points.
122
+ kmc2_chain_length: Determines how many candidate points are used by the
123
+ k-MC2 algorithm to produce one new cluster centers. If a (mini-)batch
124
+ contains less points, one new cluster center is generated from the
125
+ (mini-)batch.
126
+
127
+ Raises:
128
+ ValueError: An invalid argument was passed to initial_clusters or
129
+ distance_metric.
130
+ """
131
+ initialization_algorithms = [RANDOM_INIT, KMEANS_PLUS_PLUS_INIT, KMC2_INIT]
132
+ if isinstance(initial_clusters,
133
+ str) and initial_clusters not in initialization_algorithms:
134
+ raise ValueError(
135
+ f'Unsupported initialization algorithm `{initial_clusters}`,'
136
+ f'must be one of `{initialization_algorithms}`.')
137
+
138
+ distance_metrics = [SQUARED_EUCLIDEAN_DISTANCE, COSINE_DISTANCE]
139
+ if distance_metric not in distance_metrics:
140
+ raise ValueError(f'Unsupported distance metric `{distance_metric}`,'
141
+ f'must be one of `{distance_metrics}`.')
142
+ self._inputs = inputs if isinstance(inputs, list) else [inputs]
143
+ self._num_clusters = num_clusters
144
+ self._initial_clusters = initial_clusters
145
+ self._distance_metric = distance_metric
146
+ self._use_mini_batch = use_mini_batch
147
+ self._mini_batch_steps_per_iteration = int(mini_batch_steps_per_iteration)
148
+ self._seed = random_seed_ops.get_seed(random_seed)[0]
149
+ self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
150
+ self._kmc2_chain_length = kmc2_chain_length
151
+
152
+ @classmethod
153
+ def _distance_graph(cls, inputs, clusters, distance_metric):
154
+ """Computes distance between each input and each cluster center.
155
+
156
+ Args:
157
+ inputs: list of input Tensors.
158
+ clusters: cluster Tensor.
159
+ distance_metric: distance metric used for clustering
160
+
161
+ Returns:
162
+ list of Tensors, where each element corresponds to each element in inputs.
163
+ The value is the distance of each row to all the cluster centers.
164
+ Currently only Euclidean distance and cosine distance are supported.
165
+ """
166
+ assert isinstance(inputs, list)
167
+ if distance_metric == SQUARED_EUCLIDEAN_DISTANCE:
168
+ return cls._compute_euclidean_distance(inputs, clusters)
169
+ elif distance_metric == COSINE_DISTANCE:
170
+ return cls._compute_cosine_distance(
171
+ inputs, clusters, inputs_normalized=True)
172
+ else:
173
+ assert False, str(distance_metric)
174
+
175
+ @classmethod
176
+ def _compute_euclidean_distance(cls, inputs, clusters):
177
+ """Computes Euclidean distance between each input and each cluster center.
178
+
179
+ Args:
180
+ inputs: list of input Tensors.
181
+ clusters: cluster Tensor.
182
+
183
+ Returns:
184
+ list of Tensors, where each element corresponds to each element in inputs.
185
+ The value is the distance of each row to all the cluster centers.
186
+ """
187
+ output = []
188
+ for inp in inputs:
189
+ with ops.colocate_with(inp, ignore_existing=True):
190
+ # Computes Euclidean distance. Note the first and third terms are
191
+ # broadcast additions.
192
+ squared_distance = (
193
+ math_ops.reduce_sum(math_ops.square(inp), 1, keepdims=True) -
194
+ 2 * math_ops.matmul(inp, clusters, transpose_b=True) +
195
+ array_ops.transpose(
196
+ math_ops.reduce_sum(
197
+ math_ops.square(clusters), 1, keepdims=True)))
198
+ output.append(squared_distance)
199
+
200
+ return output
201
+
202
+ @classmethod
203
+ def _compute_cosine_distance(cls, inputs, clusters, inputs_normalized=True):
204
+ """Computes cosine distance between each input and each cluster center.
205
+
206
+ Args:
207
+ inputs: list of input Tensor.
208
+ clusters: cluster Tensor
209
+ inputs_normalized: if True, it assumes that inp and clusters are
210
+ normalized and computes the dot product which is equivalent to the
211
+ cosine distance. Else it L2 normalizes the inputs first.
212
+
213
+ Returns:
214
+ list of Tensors, where each element corresponds to each element in inp.
215
+ The value is the distance of each row to all the cluster centers.
216
+ """
217
+ output = []
218
+ if not inputs_normalized:
219
+ with ops.colocate_with(clusters, ignore_existing=True):
220
+ clusters = nn_impl.l2_normalize(clusters, axis=1)
221
+ for inp in inputs:
222
+ with ops.colocate_with(inp, ignore_existing=True):
223
+ if not inputs_normalized:
224
+ inp = nn_impl.l2_normalize(inp, axis=1)
225
+ output.append(1 - math_ops.matmul(inp, clusters, transpose_b=True))
226
+ return output
227
+
228
+ def _infer_graph(self, inputs, clusters):
229
+ """Maps input to closest cluster and the score.
230
+
231
+ Args:
232
+ inputs: list of input Tensors.
233
+ clusters: Tensor of cluster centers.
234
+
235
+ Returns:
236
+ List of tuple, where each value in tuple corresponds to a value in inp.
237
+ The tuple has following three elements:
238
+ all_scores: distance of each input to each cluster center.
239
+ score: distance of each input to closest cluster center.
240
+ cluster_idx: index of cluster center closest to the corresponding input.
241
+ """
242
+ assert isinstance(inputs, list)
243
+ # Pairwise distances are used only by transform(). In all other cases, this
244
+ # sub-graph is not evaluated.
245
+ scores = self._distance_graph(inputs, clusters, self._distance_metric)
246
+ output = []
247
+ if (self._distance_metric == COSINE_DISTANCE and
248
+ not self._clusters_l2_normalized()):
249
+ # The cosine distance between normalized vectors x and y is the same as
250
+ # 2 * squared_euclidean_distance. We are using this fact and reusing the
251
+ # nearest_neighbors op.
252
+ # TODO(ands): Support COSINE distance in nearest_neighbors and remove
253
+ # this.
254
+ with ops.colocate_with(clusters, ignore_existing=True):
255
+ clusters = nn_impl.l2_normalize(clusters, axis=1)
256
+ for inp, score in zip(inputs, scores):
257
+ with ops.colocate_with(inp, ignore_existing=True):
258
+ (indices,
259
+ distances) = gen_clustering_ops.nearest_neighbors(inp, clusters, 1)
260
+ if self._distance_metric == COSINE_DISTANCE:
261
+ distances *= 0.5
262
+ output.append(
263
+ (score, array_ops.squeeze(distances,
264
+ [-1]), array_ops.squeeze(indices, [-1])))
265
+ return zip(*output)
266
+
267
+ def _clusters_l2_normalized(self):
268
+ """Returns True if clusters centers are kept normalized."""
269
+ return (self._distance_metric == COSINE_DISTANCE and
270
+ (not self._use_mini_batch or
271
+ self._mini_batch_steps_per_iteration > 1))
272
+
273
+ def _create_variables(self, num_clusters):
274
+ """Creates variables.
275
+
276
+ Args:
277
+ num_clusters: an integer Tensor providing the number of clusters.
278
+
279
+ Returns:
280
+ Tuple with following elements:
281
+ - cluster_centers: a Tensor for storing cluster centers
282
+ - cluster_centers_initialized: bool Variable indicating whether clusters
283
+ are initialized.
284
+ - cluster_counts: a Tensor for storing counts of points assigned to this
285
+ cluster. This is used by mini-batch training.
286
+ - cluster_centers_updated: Tensor representing copy of cluster centers
287
+ that are updated every step.
288
+ - update_in_steps: numbers of steps left before we sync
289
+ cluster_centers_updated back to cluster_centers.
290
+ """
291
+ init_value = array_ops.placeholder_with_default([], shape=None)
292
+ cluster_centers = variable_v1.VariableV1(
293
+ init_value, name=CLUSTERS_VAR_NAME, validate_shape=False)
294
+ cluster_centers_initialized = variable_v1.VariableV1(
295
+ False, dtype=dtypes.bool, name='initialized')
296
+
297
+ if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
298
+ # Copy of cluster centers actively updated each step according to
299
+ # mini-batch update rule.
300
+ cluster_centers_updated = variable_v1.VariableV1(
301
+ init_value, name='clusters_updated', validate_shape=False)
302
+ # How many steps till we copy the updated clusters to cluster_centers.
303
+ update_in_steps = variable_v1.VariableV1(
304
+ self._mini_batch_steps_per_iteration,
305
+ dtype=dtypes.int64,
306
+ name='update_in_steps')
307
+ # Count of points assigned to cluster_centers_updated.
308
+ cluster_counts = variable_v1.VariableV1(
309
+ array_ops.zeros([num_clusters], dtype=dtypes.int64))
310
+ else:
311
+ cluster_centers_updated = cluster_centers
312
+ update_in_steps = None
313
+ cluster_counts = (
314
+ variable_v1.VariableV1(
315
+ array_ops.ones([num_clusters], dtype=dtypes.int64))
316
+ if self._use_mini_batch else None)
317
+ return (cluster_centers, cluster_centers_initialized, cluster_counts,
318
+ cluster_centers_updated, update_in_steps)
319
+
320
+ @classmethod
321
+ def _l2_normalize_data(cls, inputs):
322
+ """Normalized the input data."""
323
+ output = []
324
+ for inp in inputs:
325
+ with ops.colocate_with(inp, ignore_existing=True):
326
+ output.append(nn_impl.l2_normalize(inp, dim=1))
327
+ return output
328
+
329
+ def training_graph(self):
330
+ """Generate a training graph for kmeans algorithm.
331
+
332
+ This returns, among other things, an op that chooses initial centers
333
+ (init_op), a boolean variable that is set to True when the initial centers
334
+ are chosen (cluster_centers_initialized), and an op to perform either an
335
+ entire Lloyd iteration or a mini-batch of a Lloyd iteration (training_op).
336
+ The caller should use these components as follows. A single worker should
337
+ execute init_op multiple times until cluster_centers_initialized becomes
338
+ True. Then multiple workers may execute training_op any number of times.
339
+
340
+ Returns:
341
+ A tuple consisting of:
342
+ all_scores: A matrix (or list of matrices) of dimensions (num_input,
343
+ num_clusters) where the value is the distance of an input vector and a
344
+ cluster center.
345
+ cluster_idx: A vector (or list of vectors). Each element in the vector
346
+ corresponds to an input row in 'inp' and specifies the cluster id
347
+ corresponding to the input.
348
+ scores: Similar to cluster_idx but specifies the distance to the
349
+ assigned cluster instead.
350
+ cluster_centers_initialized: scalar indicating whether clusters have been
351
+ initialized.
352
+ init_op: an op to initialize the clusters.
353
+ training_op: an op that runs an iteration of training.
354
+ """
355
+ # Implementation of kmeans.
356
+ if (isinstance(self._initial_clusters, str) or
357
+ callable(self._initial_clusters)):
358
+ initial_clusters = self._initial_clusters
359
+ num_clusters = ops.convert_to_tensor(self._num_clusters)
360
+ else:
361
+ initial_clusters = ops.convert_to_tensor(self._initial_clusters)
362
+ num_clusters = array_ops.shape(initial_clusters)[0]
363
+
364
+ inputs = self._inputs
365
+ (cluster_centers_var, cluster_centers_initialized, total_counts,
366
+ cluster_centers_updated,
367
+ update_in_steps) = self._create_variables(num_clusters)
368
+ init_op = _InitializeClustersOpFactory(
369
+ self._inputs, num_clusters, initial_clusters, self._distance_metric,
370
+ self._seed, self._kmeans_plus_plus_num_retries, self._kmc2_chain_length,
371
+ cluster_centers_var, cluster_centers_updated,
372
+ cluster_centers_initialized).op()
373
+ cluster_centers = cluster_centers_var
374
+
375
+ if self._distance_metric == COSINE_DISTANCE:
376
+ inputs = self._l2_normalize_data(inputs)
377
+ if not self._clusters_l2_normalized():
378
+ cluster_centers = nn_impl.l2_normalize(cluster_centers, dim=1)
379
+
380
+ all_scores, scores, cluster_idx = self._infer_graph(inputs, cluster_centers)
381
+ if self._use_mini_batch:
382
+ sync_updates_op = self._mini_batch_sync_updates_op(
383
+ update_in_steps, cluster_centers_var, cluster_centers_updated,
384
+ total_counts)
385
+ assert sync_updates_op is not None
386
+ with ops.control_dependencies([sync_updates_op]):
387
+ training_op = self._mini_batch_training_op(inputs, cluster_idx,
388
+ cluster_centers_updated,
389
+ total_counts)
390
+ else:
391
+ assert cluster_centers == cluster_centers_var
392
+ training_op = self._full_batch_training_op(inputs, num_clusters,
393
+ cluster_idx,
394
+ cluster_centers_var)
395
+
396
+ return (all_scores, cluster_idx, scores, cluster_centers_initialized,
397
+ init_op, training_op)
398
+
399
+ def _mini_batch_sync_updates_op(self, update_in_steps, cluster_centers_var,
400
+ cluster_centers_updated, total_counts):
401
+ if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
402
+ assert update_in_steps is not None
403
+ with ops.colocate_with(update_in_steps, ignore_existing=True):
404
+
405
+ def _f():
406
+ # Note that there is a race condition here, so we do a best effort
407
+ # updates here. We reset update_in_steps first so that other workers
408
+ # don't duplicate the updates. Also we update cluster_center_vars
409
+ # before resetting total_counts to avoid large updates to
410
+ # cluster_centers_updated based on partially updated
411
+ # cluster_center_vars.
412
+ with ops.control_dependencies([
413
+ state_ops.assign(update_in_steps,
414
+ self._mini_batch_steps_per_iteration - 1)
415
+ ]):
416
+ with ops.colocate_with(
417
+ cluster_centers_updated, ignore_existing=True):
418
+ if self._distance_metric == COSINE_DISTANCE:
419
+ cluster_centers = nn_impl.l2_normalize(
420
+ cluster_centers_updated, dim=1)
421
+ else:
422
+ cluster_centers = cluster_centers_updated
423
+ with ops.colocate_with(cluster_centers_var, ignore_existing=True):
424
+ with ops.control_dependencies(
425
+ [state_ops.assign(cluster_centers_var, cluster_centers)]):
426
+ with ops.colocate_with(None, ignore_existing=True):
427
+ with ops.control_dependencies([
428
+ state_ops.assign(total_counts,
429
+ array_ops.zeros_like(total_counts))
430
+ ]):
431
+ return array_ops.identity(update_in_steps)
432
+
433
+ return cond.cond(
434
+ update_in_steps <= 0, _f,
435
+ lambda: state_ops.assign_sub(update_in_steps, 1))
436
+ else:
437
+ return control_flow_ops.no_op()
438
+
439
+ def _mini_batch_training_op(self, inputs, cluster_idx_list, cluster_centers,
440
+ total_counts):
441
+ """Creates an op for training for mini batch case.
442
+
443
+ Args:
444
+ inputs: list of input Tensors.
445
+ cluster_idx_list: A vector (or list of vectors). Each element in the
446
+ vector corresponds to an input row in 'inp' and specifies the cluster id
447
+ corresponding to the input.
448
+ cluster_centers: Tensor Ref of cluster centers.
449
+ total_counts: Tensor Ref of cluster counts.
450
+
451
+ Returns:
452
+ An op for doing an update of mini-batch k-means.
453
+ """
454
+ update_ops = []
455
+ for inp, cluster_idx in zip(inputs, cluster_idx_list):
456
+ with ops.colocate_with(inp, ignore_existing=True):
457
+ assert total_counts is not None
458
+ cluster_idx = array_ops.reshape(cluster_idx, [-1])
459
+ # Dedupe the unique ids of cluster_centers being updated so that updates
460
+ # can be locally aggregated.
461
+ unique_ids, unique_idx = array_ops.unique(cluster_idx)
462
+ num_unique_cluster_idx = array_ops.size(unique_ids)
463
+ # Fetch the old values of counts and cluster_centers.
464
+ with ops.colocate_with(total_counts, ignore_existing=True):
465
+ old_counts = array_ops.gather(total_counts, unique_ids)
466
+ # TODO(agarwal): This colocation seems to run into problems. Fix it.
467
+ with ops.colocate_with(cluster_centers, ignore_existing=True):
468
+ old_cluster_centers = array_ops.gather(cluster_centers, unique_ids)
469
+ # Locally aggregate the increment to counts.
470
+ count_updates = math_ops.unsorted_segment_sum(
471
+ array_ops.ones_like(unique_idx, dtype=total_counts.dtype),
472
+ unique_idx, num_unique_cluster_idx)
473
+ # Locally compute the sum of inputs mapped to each id.
474
+ # For a cluster with old cluster value x, old count n, and with data
475
+ # d_1,...d_k newly assigned to it, we recompute the new value as
476
+ # \\(x += (sum_i(d_i) - k * x) / (n + k)\\).
477
+ # Compute \\(sum_i(d_i)\\), see comment above.
478
+ cluster_center_updates = math_ops.unsorted_segment_sum(
479
+ inp, unique_idx, num_unique_cluster_idx)
480
+ # Shape to enable broadcasting count_updates and learning_rate to inp.
481
+ # It extends the shape with 1's to match the rank of inp.
482
+ broadcast_shape = array_ops.concat([
483
+ array_ops.reshape(num_unique_cluster_idx, [1]),
484
+ array_ops.ones(
485
+ array_ops.reshape(array_ops.rank(inp) - 1, [1]),
486
+ dtype=dtypes.int32)
487
+ ], 0)
488
+ # Subtract k * x, see comment above.
489
+ cluster_center_updates -= math_ops.cast(
490
+ array_ops.reshape(count_updates, broadcast_shape),
491
+ inp.dtype) * old_cluster_centers
492
+ learning_rate = math_ops.reciprocal(
493
+ math_ops.cast(old_counts + count_updates, inp.dtype))
494
+ learning_rate = array_ops.reshape(learning_rate, broadcast_shape)
495
+ # scale by 1 / (n + k), see comment above.
496
+ cluster_center_updates *= learning_rate
497
+ # Apply the updates.
498
+ update_counts = state_ops.scatter_add(total_counts, unique_ids,
499
+ count_updates)
500
+ update_cluster_centers = state_ops.scatter_add(cluster_centers,
501
+ unique_ids,
502
+ cluster_center_updates)
503
+ update_ops.extend([update_counts, update_cluster_centers])
504
+ return control_flow_ops.group(*update_ops)
505
+
506
+ def _full_batch_training_op(self, inputs, num_clusters, cluster_idx_list,
507
+ cluster_centers):
508
+ """Creates an op for training for full batch case.
509
+
510
+ Args:
511
+ inputs: list of input Tensors.
512
+ num_clusters: an integer Tensor providing the number of clusters.
513
+ cluster_idx_list: A vector (or list of vectors). Each element in the
514
+ vector corresponds to an input row in 'inp' and specifies the cluster id
515
+ corresponding to the input.
516
+ cluster_centers: Tensor Ref of cluster centers.
517
+
518
+ Returns:
519
+ An op for doing an update of mini-batch k-means.
520
+ """
521
+ cluster_sums = []
522
+ cluster_counts = []
523
+ epsilon = constant_op.constant(1e-6, dtype=inputs[0].dtype)
524
+ for inp, cluster_idx in zip(inputs, cluster_idx_list):
525
+ with ops.colocate_with(inp, ignore_existing=True):
526
+ cluster_sums.append(
527
+ math_ops.unsorted_segment_sum(inp, cluster_idx, num_clusters))
528
+ cluster_counts.append(
529
+ math_ops.unsorted_segment_sum(
530
+ array_ops.reshape(
531
+ array_ops.ones(
532
+ array_ops.reshape(array_ops.shape(inp)[0], [-1])),
533
+ [-1, 1]), cluster_idx, num_clusters))
534
+ with ops.colocate_with(cluster_centers, ignore_existing=True):
535
+ new_clusters_centers = math_ops.add_n(cluster_sums) / (
536
+ math_ops.cast(math_ops.add_n(cluster_counts), cluster_sums[0].dtype) +
537
+ epsilon)
538
+ if self._clusters_l2_normalized():
539
+ new_clusters_centers = nn_impl.l2_normalize(new_clusters_centers, dim=1)
540
+ return state_ops.assign(cluster_centers, new_clusters_centers)
541
+
542
+
543
+ class _InitializeClustersOpFactory:
544
+ """Internal class to create the op to initialize the clusters.
545
+
546
+ The op performs this algorithm (see constructor args):
547
+
548
+ num_remaining = num_clusters - length(cluster_centers)
549
+ if num_remaining == 0:
550
+ assert that cluster_centers_initialized is true
551
+ else:
552
+ assert that num_remaining > 0
553
+ new_centers = choose up to num_remaining initial centers
554
+ l2-normalize new_centers if using cosine distance
555
+ all_centers = concat(cluster_centers, new_centers)
556
+ cluster_centers := all_centers
557
+ if there is a cluster_centers_updated variable:
558
+ cluster_centers_updated := cluster_centers
559
+ num_now_remaining = num_clusters - length(cluster_centers)
560
+ if num_now_remaining == 0:
561
+ cluster_centers_initialized := true
562
+ """
563
+
564
+ # TODO(ccolby): Refactor this class so that kmc2 isn't so much a special case.
565
+
566
+ def __init__(self, inputs, num_clusters, initial_clusters, distance_metric,
567
+ random_seed, kmeans_plus_plus_num_retries, kmc2_chain_length,
568
+ cluster_centers, cluster_centers_updated,
569
+ cluster_centers_initialized):
570
+ """Creates an op factory.
571
+
572
+ Args:
573
+ inputs: See KMeans constructor.
574
+ num_clusters: An integer Tensor providing the number of clusters.
575
+ initial_clusters: See KMeans constructor.
576
+ distance_metric: See KMeans constructor.
577
+ random_seed: See KMeans constructor.
578
+ kmeans_plus_plus_num_retries: See KMeans constructor.
579
+ kmc2_chain_length: See KMeans constructor.
580
+ cluster_centers: The TF variable holding the initial centers. It may
581
+ already contain some centers when the op is executed.
582
+ cluster_centers_updated: A second TF variable to hold a copy of the
583
+ initial centers, used for full-batch mode. In mini-batch mode,
584
+ cluster_centers_updated is the same variable as cluster_centers.
585
+ cluster_centers_initialized: A boolean TF variable that will be set to
586
+ true when all the initial centers have been chosen.
587
+ """
588
+ # All of these instance variables are constants.
589
+ self._inputs = inputs
590
+ self._num_clusters = num_clusters
591
+ self._initial_clusters = initial_clusters
592
+ self._distance_metric = distance_metric
593
+ self._seed = random_seed
594
+ self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
595
+ self._kmc2_chain_length = kmc2_chain_length
596
+ self._cluster_centers = cluster_centers
597
+ self._cluster_centers_updated = cluster_centers_updated
598
+ self._cluster_centers_initialized = cluster_centers_initialized
599
+
600
+ self._num_selected = array_ops.shape(self._cluster_centers)[0]
601
+ self._num_remaining = self._num_clusters - self._num_selected
602
+ self._num_data = math_ops.add_n(
603
+ [array_ops.shape(i)[0] for i in self._inputs])
604
+
605
+ def _random(self):
606
+ indices = random_ops.random_uniform(
607
+ array_ops.reshape(self._num_remaining, [-1]),
608
+ minval=0,
609
+ maxval=math_ops.cast(self._num_data, dtypes.int64),
610
+ seed=self._seed,
611
+ dtype=dtypes.int64)
612
+ return embedding_lookup(self._inputs, indices, partition_strategy='div')
613
+
614
+ def _kmeans_plus_plus(self):
615
+ # Points from only the first shard are used for initializing centers.
616
+ # TODO(ands): Use all points.
617
+ inp = self._inputs[0]
618
+ if self._distance_metric == COSINE_DISTANCE:
619
+ inp = nn_impl.l2_normalize(inp, dim=1)
620
+ return gen_clustering_ops.kmeans_plus_plus_initialization(
621
+ inp, math_ops.cast(self._num_remaining, dtypes.int64), self._seed,
622
+ self._kmeans_plus_plus_num_retries)
623
+
624
+ def _kmc2_multiple_centers(self):
625
+ """Adds new initial cluster centers using the k-MC2 algorithm.
626
+
627
+ In each call to the op, the provided batch is split into subsets based on
628
+ the specified `kmc2_chain_length`. On each subset, a single Markov chain of
629
+ the k-MC2 algorithm is used to add *one* new center cluster center. If there
630
+ are less than `kmc2_chain_length` points in the subset, a single center is
631
+ added using one Markov chain on the full input. It is assumed that the
632
+ provided batch has previously been randomly permuted. Otherwise, k-MC2 may
633
+ return suboptimal centers.
634
+
635
+ Returns:
636
+ An op that adds new cluster centers.
637
+ """
638
+ # The op only operates on the first shard of data.
639
+ first_shard = self._inputs[0]
640
+ # Number of points in the input that can be used.
641
+ batch_size = array_ops.shape(first_shard)[0]
642
+ # Maximum number of subsets such that the size of each subset is at least
643
+ # `kmc2_chain_length`. Final subsets may be larger.
644
+ max_to_sample = math_ops.cast(
645
+ batch_size / self._kmc2_chain_length, dtype=dtypes.int32)
646
+ # We sample at least one new center and at most all remaining centers.
647
+ num_to_sample = math_ops.maximum(
648
+ math_ops.minimum(self._num_remaining, max_to_sample), 1)
649
+
650
+ def _cond(i, _):
651
+ """Stopping condition for the while loop."""
652
+ return math_ops.less(i, num_to_sample)
653
+
654
+ def _body(i, _):
655
+ """Body that adds a single new center based on a subset."""
656
+
657
+ def _sample_random():
658
+ """Returns a random point as a cluster center."""
659
+ # By assumption the batch is reshuffled and _sample_random is always
660
+ # called for i=0. Hence, we simply return the first point.
661
+ new_center = array_ops.reshape(first_shard[0], [1, -1])
662
+ if self._distance_metric == COSINE_DISTANCE:
663
+ new_center = nn_impl.l2_normalize(new_center, dim=1)
664
+ return new_center
665
+
666
+ def _sample_kmc2_chain():
667
+ """Returns previous centers as well as a new center sampled using k-MC2."""
668
+ # Extract the subset from the underlying batch.
669
+ start = i * self._kmc2_chain_length
670
+ end = start + self._kmc2_chain_length
671
+ subset = first_shard[start:end]
672
+ # Compute the distances from points in the subset to previous centers.
673
+ _, distances = gen_clustering_ops.nearest_neighbors(
674
+ subset, self._cluster_centers, 1)
675
+ # Sample index of new center using k-MC2 Markov chain.
676
+ new_center_index = gen_clustering_ops.kmc2_chain_initialization(
677
+ array_ops.squeeze(distances), self._seed)
678
+ # Extract actual new center.
679
+ newly_sampled_center = array_ops.reshape(subset[new_center_index],
680
+ [1, -1])
681
+ # Return concatenation with previously sampled centers.
682
+ if self._distance_metric == COSINE_DISTANCE:
683
+ newly_sampled_center = nn_impl.l2_normalize(
684
+ newly_sampled_center, dim=1)
685
+ return array_ops.concat([self._cluster_centers, newly_sampled_center],
686
+ 0)
687
+
688
+ # Obtain a random point if there are no previously sampled centers.
689
+ # Otherwise, construct a k-MC2 Markov chain.
690
+ new_centers = cond.cond(
691
+ math_ops.equal(self._num_selected, 0), _sample_random,
692
+ _sample_kmc2_chain)
693
+ # Assign new cluster centers to underlying variable.
694
+ assigned_centers = state_ops.assign(
695
+ self._cluster_centers, new_centers, validate_shape=False)
696
+ if self._cluster_centers_updated is not self._cluster_centers:
697
+ assigned_centers = state_ops.assign(
698
+ self._cluster_centers_updated,
699
+ assigned_centers,
700
+ validate_shape=False)
701
+ return i + 1, self._num_clusters - array_ops.shape(assigned_centers)[0]
702
+
703
+ # Add num_to_sample new data points.
704
+ _, num_remaining = while_loop.while_loop(_cond, _body, [0, 0])
705
+ return num_remaining
706
+
707
+ def _greedy_batch_sampler(self, sampler):
708
+ # If the input dataset size is smaller than the number of centers
709
+ # remaining, choose the entire input dataset as centers. This can happen
710
+ # with mini-batch. Otherwise, sample the batch according to the provided
711
+ # sampler.
712
+ return cond.cond(self._num_data <= self._num_remaining,
713
+ lambda: array_ops.concat(self._inputs, 0),
714
+ sampler)
715
+
716
+ def _single_batch_sampler(self, sampler):
717
+ # Enforce that there are at least as many data points as centers
718
+ # remaining. This gives the provided sampler the chance to select all
719
+ # remaining centers from a single batch.
720
+ with ops.control_dependencies(
721
+ [check_ops.assert_greater_equal(self._num_data, self._num_remaining)]):
722
+ return sampler()
723
+
724
+ def _choose_initial_centers(self):
725
+ if isinstance(self._initial_clusters, str):
726
+ if self._initial_clusters == RANDOM_INIT:
727
+ return self._greedy_batch_sampler(self._random)
728
+ else: # self._initial_clusters == KMEANS_PLUS_PLUS_INIT
729
+ return self._single_batch_sampler(self._kmeans_plus_plus)
730
+ elif callable(self._initial_clusters):
731
+ return self._initial_clusters(self._inputs, self._num_remaining)
732
+ else:
733
+ with ops.control_dependencies([
734
+ check_ops.assert_equal(self._num_remaining,
735
+ array_ops.shape(self._initial_clusters)[0])
736
+ ]):
737
+ return self._initial_clusters
738
+
739
+ def _add_new_centers(self):
740
+ """Adds some centers and returns the number of centers remaining."""
741
+ new_centers = self._choose_initial_centers()
742
+ if self._distance_metric == COSINE_DISTANCE:
743
+ new_centers = nn_impl.l2_normalize(new_centers, dim=1)
744
+ # If cluster_centers is empty, it doesn't have the right shape for concat.
745
+ all_centers = cond.cond(
746
+ math_ops.equal(self._num_selected, 0), lambda: new_centers,
747
+ lambda: array_ops.concat([self._cluster_centers, new_centers], 0))
748
+ # TODO(ccolby): De-dupe all_centers?
749
+ a = state_ops.assign(
750
+ self._cluster_centers, all_centers, validate_shape=False)
751
+ if self._cluster_centers_updated is not self._cluster_centers:
752
+ a = state_ops.assign(
753
+ self._cluster_centers_updated, a, validate_shape=False)
754
+ return self._num_clusters - array_ops.shape(a)[0]
755
+
756
+ def _initialize(self):
757
+ with ops.control_dependencies([
758
+ check_ops.assert_positive(self._num_remaining),
759
+ ]):
760
+ if self._initial_clusters == KMC2_INIT:
761
+ num_now_remaining = self._kmc2_multiple_centers()
762
+ else:
763
+ num_now_remaining = self._add_new_centers()
764
+ return cond.cond(
765
+ math_ops.equal(num_now_remaining, 0),
766
+ lambda: state_ops.assign(self._cluster_centers_initialized, True),
767
+ control_flow_ops.no_op)
768
+
769
+ def op(self):
770
+ """Returns the cluster initializer op."""
771
+ return cond.cond(
772
+ math_ops.equal(self._num_remaining, 0),
773
+ lambda: check_ops.assert_equal(self._cluster_centers_initialized, True),
774
+ self._initialize)
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/collective_ops.py ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """TensorFlow collective Ops."""
16
+ from tensorflow.python.ops import gen_collective_ops
17
+
18
+
19
+ def all_reduce(t,
20
+ group_size,
21
+ group_key,
22
+ instance_key,
23
+ merge_op='Add',
24
+ final_op='Id',
25
+ subdiv_offsets=(0,),
26
+ communication_hint='auto',
27
+ timeout=0):
28
+ """Reduces tensors collectively, across devices.
29
+
30
+ Args:
31
+ t: the tensor to be reduced.
32
+ group_size: the total number of tensors to be collectively reduced.
33
+ Each must reside on a different device. Should be a positive integer.
34
+ group_key: an integer identifying the group of devices.
35
+ instance_key: an integer identifying the participating group of Ops.
36
+ merge_op: string naming the binary Op to be applied to compute each
37
+ partial reduction.
38
+ final_op: string naming the unary Op to be applied to each fully
39
+ reduced value. Can be 'Id' for no operation.
40
+ subdiv_offsets: a list of integer offsets into the tensor at which each
41
+ independent subdivision should begin. Use [0] if no subdivision should
42
+ be done.
43
+ communication_hint: preferred collective communication. The implementation
44
+ may fall back to another mechanism. Options include `auto`, `ring`, and
45
+ `nccl`.
46
+ timeout: a float. If set to a non zero, set a completion timeout to detect
47
+ staleness. If the timer goes off, a DeadlineExceededError is raised. The
48
+ timeout value in seconds. This feature is experimental.
49
+
50
+ Returns:
51
+ An Op implementing the distributed reduction.
52
+
53
+ Raises:
54
+ ValueError: if any of the input parameter constraints are not met.
55
+ """
56
+ if group_size < 1:
57
+ raise ValueError('Parameter `group_size` to all_reduce must be at least 1. '
58
+ f'Received: {group_size}.')
59
+ return gen_collective_ops.collective_reduce(
60
+ t,
61
+ group_size=group_size,
62
+ group_key=group_key,
63
+ instance_key=instance_key,
64
+ merge_op=merge_op,
65
+ final_op=final_op,
66
+ subdiv_offsets=subdiv_offsets,
67
+ communication_hint=communication_hint.lower(),
68
+ timeout_seconds=timeout)
69
+
70
+
71
+ def assign_group_v2(group_assignment, device_index, base_key):
72
+ """Assign group key based on group_assignment.
73
+
74
+ Args:
75
+ group_assignment: a 2 dimensional integer Tensor that encodes which devices
76
+ belong to the same group. The values are indices of the devices within 0
77
+ to number of devices.
78
+ device_index: integer for the index of the current device
79
+ base_key: integer to offset the resulted group_key. The base key shall be
80
+ unique for different values of group_assignment in the same tf.function.
81
+ Notes: The device_index argument must be consistent with the index of the
82
+ device of this Op in the device assignment list. The behavior of this Op is
83
+ undefined if they are inconsistent.
84
+
85
+ Returns:
86
+ group_size, group_key: The group size and group key for the current device.
87
+ """
88
+ group_size, group_key = gen_collective_ops.collective_assign_group_v2(
89
+ group_assignment=group_assignment,
90
+ device_index=device_index,
91
+ base_key=base_key)
92
+ return group_size, group_key
93
+
94
+
95
+ def all_reduce_v2(t,
96
+ group_size,
97
+ group_key,
98
+ instance_key,
99
+ merge_op='Add',
100
+ final_op='Id',
101
+ communication_hint='auto',
102
+ timeout=0,
103
+ ordering_token=None,
104
+ max_subdivs_per_device=-1,
105
+ name=None):
106
+ """Reduces tensors collectively, across devices.
107
+
108
+ Args:
109
+ t: the tensor to be reduced.
110
+ group_size: an int32 tensor. The total number of tensors to be collectively
111
+ reduced. Each must reside on a different device. Should be a positive
112
+ integer.
113
+ group_key: an int32 tensor identifying the group of devices.
114
+ instance_key: an int32 tensor identifying the participating group of Ops.
115
+ merge_op: string naming the binary Op to be applied to compute each partial
116
+ reduction.
117
+ final_op: string naming the unary Op to be applied to each fully reduced
118
+ value. Can be 'Id' for no operation.
119
+ communication_hint: preferred collective communication. The implementation
120
+ may fall back to another mechanism. Options include `auto`, `ring`, and
121
+ `nccl`.
122
+ timeout: a float. If set to a non zero, set a completion timeout to detect
123
+ staleness. If the timer goes off, a DeadlineExceededError is raised. The
124
+ timeout value in seconds. This feature is experimental.
125
+ ordering_token: a resource tensor on the same device as the op to order
126
+ the collectives in a per-device manner by auto control dependency.
127
+ This argument can be omited when there is one collective Op per
128
+ `tf.function`, or when explicit control dependency is used instead of
129
+ auto control dependency.
130
+ max_subdivs_per_device: int specifying the maximum number of subdivisions a
131
+ tensor on a device can be divided into. The runtime uses this contraint to
132
+ parallelize processing of each per-device tensor. Setting to -1 disables
133
+ subdivision and reverts to previous behavior of not sub-dividing tensor.
134
+ Setting to 0 uses sytem defaults.
135
+ name: name of the Op.
136
+
137
+ Returns:
138
+ An Op implementing the distributed reduction.
139
+ """
140
+ if ordering_token is not None:
141
+ ordering_token = [ordering_token]
142
+ else:
143
+ ordering_token = []
144
+
145
+ return gen_collective_ops.collective_reduce_v2(
146
+ t,
147
+ group_size=group_size,
148
+ group_key=group_key,
149
+ instance_key=instance_key,
150
+ merge_op=merge_op,
151
+ final_op=final_op,
152
+ communication_hint=communication_hint.lower(),
153
+ timeout_seconds=timeout,
154
+ is_stateless=False,
155
+ ordering_token=ordering_token,
156
+ max_subdivs_per_device=max_subdivs_per_device,
157
+ name=name)
158
+
159
+
160
+ def all_gather(t,
161
+ group_size,
162
+ group_key,
163
+ instance_key,
164
+ communication_hint='auto',
165
+ timeout=0):
166
+ """Accumulates tensors collectively, across devices, along first dimension.
167
+
168
+ Args:
169
+ t: the tensor to participate in the accumulation.
170
+ group_size: the total number of tensors to be collectively accumulated.
171
+ Each must reside on a different device. Should be a positive integer.
172
+ group_key: an integer identifying the group of devices.
173
+ instance_key: an integer identifying the participating group of Ops.
174
+ communication_hint: preferred collective communication. The implementation
175
+ may fall back to another mechanism. Options include `auto`, `ring`, and
176
+ `nccl`.
177
+ timeout: a float. If set to a non zero, set a completion timeout to detect
178
+ staleness. If the timer goes off, a DeadlineExceededError is raised. The
179
+ timeout value in seconds. This feature is experimental.
180
+
181
+ Returns:
182
+ An Op implementing the distributed operation.
183
+
184
+ Raises:
185
+ ValueError: if any of the input parameter constraints are not met.
186
+ """
187
+ if group_size < 1:
188
+ raise ValueError('Parameter `group_size` to all_gather must be at least 1.'
189
+ f' Received: {group_size}.')
190
+ return gen_collective_ops.collective_gather(
191
+ t,
192
+ shape=[0],
193
+ group_size=group_size,
194
+ group_key=group_key,
195
+ instance_key=instance_key,
196
+ communication_hint=communication_hint.lower(),
197
+ timeout_seconds=timeout)
198
+
199
+
200
+ def all_gather_v2(t,
201
+ group_size,
202
+ group_key,
203
+ instance_key,
204
+ communication_hint='auto',
205
+ timeout=0,
206
+ ordering_token=None,
207
+ name=None):
208
+ """Accumulates tensors collectively, across devices, along first dimension.
209
+
210
+ Args:
211
+ t: the tensor to participate in the accumulation.
212
+ group_size: an int32 tensor, the total number of tensors to be collectively
213
+ accumulated. Each must reside on a different device. Should be a positive
214
+ integer.
215
+ group_key: an int32 tensor identifying the group of devices.
216
+ instance_key: an int32 tensor identifying the participating group of Ops.
217
+ communication_hint: preferred collective communication. The implementation
218
+ may fall back to another mechanism. Options include `auto`, `ring`, and
219
+ `nccl`.
220
+ timeout: a float. If set to a non zero, set a completion timeout to detect
221
+ staleness. If the timer goes off, a DeadlineExceededError is raised. The
222
+ timeout value in seconds. This feature is experimental.
223
+ ordering_token: a resource tensor on the same device as the op to order
224
+ the collectives in a per-device manner by auto control dependency.
225
+ This argument can be omited when there is one collective Op per
226
+ `tf.function`, or when explicit control dependency is used instead of
227
+ auto control dependency.
228
+ name: name of the Op.
229
+
230
+ Returns:
231
+ An Op implementing the distributed operation.
232
+ """
233
+ if ordering_token is not None:
234
+ ordering_token = [ordering_token]
235
+ else:
236
+ ordering_token = []
237
+
238
+ return gen_collective_ops.collective_gather_v2(
239
+ t,
240
+ group_size=group_size,
241
+ group_key=group_key,
242
+ instance_key=instance_key,
243
+ communication_hint=communication_hint.lower(),
244
+ timeout_seconds=timeout,
245
+ is_stateless=False,
246
+ ordering_token=ordering_token,
247
+ name=name)
248
+
249
+
250
+ def broadcast_send(t,
251
+ shape,
252
+ dtype,
253
+ group_size,
254
+ group_key,
255
+ instance_key,
256
+ communication_hint='auto',
257
+ timeout=0):
258
+ """Broadcasts one tensor to a group of others, across devices.
259
+
260
+ Args:
261
+ t: the tensor to be sent.
262
+ shape: the shape of the tensor being sent, which must agree with t.
263
+ dtype: the type of the tensor being sent, which must agree with t.
264
+ group_size: one plus the number of receiving tensors, i.e. the total
265
+ number of devices participating. Each tensor must reside on a
266
+ different device.
267
+ group_key: an integer identifying the group of devices.
268
+ instance_key: an integer identifying the participating group of Ops.
269
+ communication_hint: preferred collective communication. The implementation
270
+ may fall back to another mechanism. Options include `auto`, `ring`, and
271
+ `nccl`.
272
+ timeout: If set to a non zero, set a completion timeout to detect staleness.
273
+ If the timer goes off, a DeadlineExceededError is raised.
274
+ The timeout value in seconds. This feature is experimental.
275
+
276
+ Returns:
277
+ An Op implementing the distributed broadcast send.
278
+
279
+ Raises:
280
+ ValueError: if any of the input parameter constraints are not met.
281
+
282
+ Note that the shape and dtype arguments appear redundant since they
283
+ should be obtainable from t. The are two reasons for including
284
+ them. First, the shape and type of tensors passed via broadcast must
285
+ be known ahead of time in their most specific form so that the receive
286
+ side can allocate memory for the operation and shape/type inference can
287
+ carry forward from there. Including the same declarations on the
288
+ send side clarifies a commitment already made. Secondly, having nearly
289
+ identical use syntax for send and receive sides may simplify tool-driven
290
+ generation of broadcast.
291
+ """
292
+ if group_size <= 1:
293
+ raise ValueError(
294
+ 'Parameter `group_size` to broadcast_send must be at least 2. '
295
+ f'Received: {group_size}.')
296
+ if t.shape != shape:
297
+ raise ValueError(
298
+ 'Shape of broadcast_send tensor `t` not equal to declared shape. '
299
+ f'Received {t.shape}, expected {shape}.')
300
+ if t.dtype != dtype:
301
+ raise ValueError(
302
+ 'Type of broadcast_send tensor `t` not equal to declared type. '
303
+ f'Received {t.dtype}, expected {dtype}.')
304
+ return gen_collective_ops.collective_bcast_send(
305
+ t,
306
+ shape=shape,
307
+ group_size=group_size,
308
+ group_key=group_key,
309
+ instance_key=instance_key,
310
+ communication_hint=communication_hint.lower(),
311
+ timeout_seconds=timeout)
312
+
313
+
314
+ def broadcast_send_v2(t,
315
+ group_size,
316
+ group_key,
317
+ instance_key,
318
+ communication_hint='auto',
319
+ timeout=0):
320
+ """Broadcasts one tensor to a group of others, across devices.
321
+
322
+ Args:
323
+ t: the tensor to be sent.
324
+ group_size: an int32 tensor. One plus the number of receiving tensors, i.e.
325
+ the total number of devices participating. Each tensor must reside on a
326
+ different device.
327
+ group_key: an int32 tensor identifying the group of devices.
328
+ instance_key: an int32 tensor identifying the participating group of Ops.
329
+ communication_hint: preferred collective communication. The implementation
330
+ may fall back to another mechanism. Options include `auto`, `ring`, and
331
+ `nccl`.
332
+ timeout: If set to a non zero, set a completion timeout to detect staleness.
333
+ If the timer goes off, a DeadlineExceededError is raised.
334
+ The timeout value in seconds. This feature is experimental.
335
+
336
+ Returns:
337
+ An Op implementing the distributed broadcast send.
338
+ """
339
+ return gen_collective_ops.collective_bcast_send_v2(
340
+ t,
341
+ group_size=group_size,
342
+ group_key=group_key,
343
+ instance_key=instance_key,
344
+ communication_hint=communication_hint.lower(),
345
+ timeout_seconds=timeout)
346
+
347
+
348
+ def broadcast_recv(shape,
349
+ dtype,
350
+ group_size,
351
+ group_key,
352
+ instance_key,
353
+ communication_hint='auto',
354
+ timeout=0):
355
+ """Receives a broadcasts tensor, across devices.
356
+
357
+ Args:
358
+ shape: Shape of the tensor to be received.
359
+ dtype: Type of the tensor to be received.
360
+ group_size: one plus the number of receiving tensors, i.e. the total
361
+ number of devices participating. Each tensor must reside on a
362
+ different device.
363
+ group_key: an integer identifying the group of devices.
364
+ instance_key: an integer identifying the participating group of Ops.
365
+ communication_hint: preferred collective communication. The implementation
366
+ may fall back to another mechanism. Options include `auto`, `ring`, and
367
+ `nccl`.
368
+ timeout: If set to a non zero, set a completion timeout to detect staleness.
369
+ If the timer goes off, a DeadlineExceededError is raised.
370
+ The timeout value in seconds. This feature is experimental.
371
+
372
+ Returns:
373
+ An Op implementing the broadcast receive.
374
+
375
+ Raises:
376
+ ValueError: if any of the input parameter constraints are not met.
377
+ """
378
+ if group_size <= 1:
379
+ raise ValueError(
380
+ 'Parameter `group_size` to broadcast_send must be at least 2. '
381
+ f'Received: {group_size}.')
382
+ return gen_collective_ops.collective_bcast_recv(
383
+ shape=shape,
384
+ T=dtype,
385
+ group_size=group_size,
386
+ group_key=group_key,
387
+ instance_key=instance_key,
388
+ communication_hint=communication_hint.lower(),
389
+ timeout_seconds=timeout)
390
+
391
+
392
+ def broadcast_recv_v2(shape,
393
+ dtype,
394
+ group_size,
395
+ group_key,
396
+ instance_key,
397
+ communication_hint='auto',
398
+ timeout=0):
399
+ """Receives a broadcasts tensor, across devices.
400
+
401
+ Args:
402
+ shape: an int tensor. Shape of the tensor to be received.
403
+ dtype: Type of the tensor to be received.
404
+ group_size: an int32 tensor. One plus the number of receiving tensors, i.e.
405
+ the total number of devices participating. Each tensor must reside on a
406
+ different device.
407
+ group_key: an int32 tensor identifying the group of devices.
408
+ instance_key: an int32 tensor identifying the participating group of Ops.
409
+ communication_hint: preferred collective communication. The implementation
410
+ may fall back to another mechanism. Options include `auto`, `ring`, and
411
+ `nccl`.
412
+ timeout: If set to a non zero, set a completion timeout to detect staleness.
413
+ If the timer goes off, a DeadlineExceededError is raised.
414
+ The timeout value in seconds. This feature is experimental.
415
+
416
+ Returns:
417
+ An Op implementing the broadcast receive.
418
+ """
419
+ return gen_collective_ops.collective_bcast_recv_v2(
420
+ T=dtype,
421
+ group_size=group_size,
422
+ group_key=group_key,
423
+ instance_key=instance_key,
424
+ shape=shape,
425
+ communication_hint=communication_hint.lower(),
426
+ timeout_seconds=timeout)
427
+
428
+
429
+ def initialize_communicator(group_key,
430
+ rank,
431
+ group_size,
432
+ communication_hint='auto',
433
+ timeout_seconds=0):
434
+ """Initializes a collective communicator.
435
+
436
+ This creates a collective communicator, which represents membership to a
437
+ collective group identified by the group_key. It should be called once per
438
+ member of the group, and each member needs to be on a different device.
439
+ It blocks until all members of the group run this op.
440
+
441
+ Communicators of a group can only be initialized once. Trying to initialize
442
+ communicators for an existing group key will result in an error.
443
+
444
+ Args:
445
+ group_key: an int32 `tf.Tensor` identifying the group.
446
+ rank: an `tf.Tensor` specifying the rank of this device in the group. If
447
+ specified, the rank is required to be unique in the group.
448
+ group_size: an int32 `tf.Tensor`. The size of the group.
449
+ communication_hint: preferred collective communication. The implementation
450
+ may fall back to another mechanism. Options include `auto`, `ring`, and
451
+ `nccl`.
452
+ timeout_seconds: If set to a non zero, set a completion timeout to detect
453
+ staleness. If the timer goes off, a DeadlineExceededError is raised. The
454
+ timeout value in seconds. This feature is experimental.
455
+
456
+
457
+ Returns:
458
+ A resource `tf.Tensor`.
459
+ """
460
+ return gen_collective_ops.collective_initialize_communicator(
461
+ group_key=group_key,
462
+ rank=rank,
463
+ group_size=group_size,
464
+ communication_hint=communication_hint,
465
+ timeout_seconds=timeout_seconds)
466
+
467
+
468
+ def all_reduce_v3(communicator,
469
+ t,
470
+ reduction='Add',
471
+ group_assignment=None,
472
+ timeout_seconds=None):
473
+ """Reduces tensors mutually.
474
+
475
+ Args:
476
+ communicator: the resource `tf.Tensor` returned from
477
+ `initialize_communicator`.
478
+ t: the `tf.Tensor` to be reduced.
479
+ reduction: a string. The name of the operation to reduce the values.
480
+ Accpeted values are `"min"`, `"max"`, `"mul"`, `"add"`.
481
+ group_assignment: Optional int32 `tf.Tensor` with shape [num_groups,
482
+ num_ranks_per_group]. `group_assignment[i]` represents the ranks in the
483
+ `ith` subgroup.
484
+ timeout_seconds: If set to a non zero, set a completion timeout to detect
485
+ staleness. If the timer goes off, a DeadlineExceededError is raised. The
486
+ timeout value in seconds. This feature is experimental.
487
+
488
+ Returns:
489
+ The reduced `tf.Tensor`.
490
+ """
491
+ if group_assignment is None:
492
+ group_assignment = []
493
+ return gen_collective_ops.collective_reduce_v3(
494
+ communicator=communicator,
495
+ input=t,
496
+ group_assignment=group_assignment,
497
+ reduction=reduction,
498
+ timeout_seconds=timeout_seconds)
499
+
500
+
501
+ def all_to_all_v2(
502
+ t,
503
+ group_size,
504
+ group_key,
505
+ instance_key,
506
+ communication_hint='auto',
507
+ timeout=0,
508
+ ordering_token=None,
509
+ name=None,
510
+ ):
511
+ """Exchanges tensors mutually.
512
+
513
+ Args:
514
+ t: a `tf.Tensor`. The first dimension should have the length as the size of
515
+ the group. `t[i]` is sent to `rank i` within the group.
516
+ group_size: an int32 tensor, the total number of tensors to be mutually
517
+ exchanged. Each must reside on a different device. Should be a positive
518
+ integer.
519
+ group_key: an int32 tensor identifying the group of devices.
520
+ instance_key: an int32 tensor identifying the participating group of Ops.
521
+ communication_hint: preferred collective communication. The implementation
522
+ may fall back to another mechanism. Options include `auto` and `nccl`.
523
+ timeout: a float. If set to a non zero, set a completion timeout to detect
524
+ staleness. If the timer goes off, a DeadlineExceededError is raised. The
525
+ timeout value in seconds. This feature is experimental.
526
+ ordering_token: a resource tensor on the same device as the op to order the
527
+ collectives in a per-device manner by auto control dependency. This
528
+ argument can be omited when there is one collective Op per `tf.function`,
529
+ or when explicit control dependency is used instead of auto control
530
+ dependency.
531
+ name: name of the Op.
532
+
533
+ Returns:
534
+ An Op implementing the distributed operation.
535
+ """
536
+ if ordering_token is not None:
537
+ ordering_token = [ordering_token]
538
+ else:
539
+ ordering_token = []
540
+
541
+ return gen_collective_ops.collective_all_to_all_v2(
542
+ t,
543
+ group_size=group_size,
544
+ group_key=group_key,
545
+ instance_key=instance_key,
546
+ communication_hint=communication_hint.lower(),
547
+ timeout_seconds=timeout,
548
+ is_stateless=False,
549
+ ordering_token=ordering_token,
550
+ name=name,
551
+ )
552
+
553
+
554
+ def all_to_all_v3(communicator, t, group_assignment=None, timeout_seconds=None):
555
+ """Exchanges tensors mutually.
556
+
557
+ Args:
558
+ communicator: the resource `tf.Tensor` returned from
559
+ `initialize_communicator`.
560
+ t: a `tf.Tensor`. The first dimension should have the length as the size of
561
+ the group. `t[i]` is sent to `rank i` within the group.
562
+ group_assignment: Optional int32 `tf.Tensor` with shape [num_groups,
563
+ num_ranks_per_group]. `group_assignment[i]` represents the ranks in the
564
+ `ith` subgroup.
565
+ timeout_seconds: If set to a non zero, set a completion timeout to detect
566
+ staleness. If the timer goes off, a DeadlineExceededError is raised. The
567
+ timeout value in seconds. This feature is experimental.
568
+
569
+ Returns:
570
+ a `tf.Tensor`. `t[i]` is sent from `rank i` within the group.
571
+ """
572
+ if group_assignment is None:
573
+ group_assignment = []
574
+ return gen_collective_ops.collective_all_to_all_v3(
575
+ communicator=communicator,
576
+ input=t,
577
+ group_assignment=group_assignment,
578
+ timeout_seconds=timeout_seconds)
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_grad.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+
16
+ """Gradients for operators defined in control_flow_ops.py."""
17
+
18
+ from tensorflow.python.framework import dtypes
19
+ from tensorflow.python.framework import indexed_slices
20
+ from tensorflow.python.framework import ops
21
+ from tensorflow.python.framework import sparse_tensor
22
+ from tensorflow.python.framework import tensor
23
+ from tensorflow.python.ops import control_flow_ops
24
+ from tensorflow.python.ops import control_flow_util
25
+ from tensorflow.python.ops import math_ops
26
+ # go/tf-wildcard-import
27
+ # pylint: disable=wildcard-import,undefined-variable,redefined-builtin
28
+ from tensorflow.python.ops.control_flow_ops import *
29
+ # pylint: enable=wildcard-import
30
+
31
+
32
+ def _SwitchGrad(op, *grad):
33
+ """Gradients for a Switch op is calculated using a Merge op.
34
+
35
+ If the switch is a loop switch, it will be visited twice. We create
36
+ the merge on the first visit, and update the other input of the merge
37
+ on the second visit. A next_iteration is also added on second visit.
38
+ """
39
+ graph = ops.get_default_graph()
40
+ # pylint: disable=protected-access
41
+ op_ctxt = op._get_control_flow_context()
42
+ grad_ctxt = graph._get_control_flow_context()
43
+ # pylint: enable=protected-access
44
+ if isinstance(op_ctxt, WhileContext):
45
+ merge_grad = grad_ctxt.grad_state.switch_map.get(op)
46
+ if merge_grad is not None:
47
+ # This is the second time this Switch is visited. It comes from
48
+ # the non-exit branch of the Switch, so update the second input
49
+ # to the Merge.
50
+ # TODO(yuanbyu): Perform shape inference with this new input.
51
+ if grad[1] is not None:
52
+ # pylint: disable=protected-access
53
+ control_flow_ops._AddNextAndBackEdge(merge_grad, grad[1],
54
+ enforce_shape_invariant=False)
55
+ # pylint: enable=protected-access
56
+ return None, None
57
+ elif grad[0] is not None:
58
+ # This is the first time this Switch is visited. It comes from
59
+ # the Exit branch, which is grad[0]. grad[1] is empty at this point.
60
+ # Use grad[0] for both inputs to merge for now, but update the second
61
+ # input of merge when we see this Switch the second time.
62
+ merge_grad = merge([grad[0], grad[0]], name="b_switch")[0]
63
+ grad_ctxt.grad_state.switch_map[op] = merge_grad
64
+ return merge_grad, None
65
+ else:
66
+ # This is the first time this Switch is visited. It comes from the
67
+ # Identity branch. Such a Switch has `None` gradient for the Exit branch,
68
+ # meaning the output is not differentiable.
69
+ return None, None
70
+ elif isinstance(op_ctxt, CondContext):
71
+ zero_grad = grad[1 - op_ctxt.branch]
72
+ # At this point, we have created zero_grad guarded by the right switch.
73
+ # Unfortunately, we may still get None here for not trainable data types.
74
+ if zero_grad is None:
75
+ # For resource variables we get None always on the other branch, so bypass
76
+ # this.
77
+ if op.inputs[0].dtype == dtypes.resource:
78
+ return merge(
79
+ [grad[op_ctxt.branch]] * 2, name="cond_resource_grad")[0], None
80
+ return None, None
81
+ return merge(grad, name="cond_grad")[0], None
82
+ else:
83
+ false_grad = switch(grad[0], op.inputs[1])[0]
84
+ true_grad = switch(grad[1], op.inputs[1])[1]
85
+ return merge([false_grad, true_grad])[0], None
86
+
87
+
88
+ ops.RegisterGradient("Switch")(_SwitchGrad)
89
+ ops.RegisterGradient("RefSwitch")(_SwitchGrad)
90
+
91
+
92
+ @ops.RegisterGradient("Merge")
93
+ def _MergeGrad(op, grad, _):
94
+ """Gradients for a Merge op are calculated using a Switch op."""
95
+ input_op = op.inputs[0].op
96
+ graph = ops.get_default_graph()
97
+ # pylint: disable=protected-access
98
+ op_ctxt = control_flow_util.GetOutputContext(input_op)
99
+ grad_ctxt = graph._get_control_flow_context()
100
+ # pylint: enable=protected-access
101
+ if isinstance(op_ctxt, WhileContext):
102
+ # pylint: disable=protected-access
103
+ return control_flow_ops._SwitchRefOrTensor(grad, grad_ctxt.pivot)
104
+ # pylint: enable=protected-access
105
+ elif isinstance(op_ctxt, CondContext):
106
+ pred = op_ctxt.pred
107
+ if grad_ctxt and grad_ctxt.grad_state:
108
+ # This Merge node is part of a cond within a loop.
109
+ # The backprop needs to have the value of this predicate for every
110
+ # iteration. So we must have its values accumulated in the forward, and
111
+ # use the accumulated values as the predicate for this backprop switch.
112
+ grad_state = grad_ctxt.grad_state
113
+ real_pred = grad_state.history_map.get(pred.name)
114
+ if real_pred is None:
115
+ # Remember the value of pred for every iteration.
116
+ grad_ctxt = grad_state.grad_context
117
+ grad_ctxt.Exit()
118
+ history_pred = grad_state.AddForwardAccumulator(pred)
119
+ grad_ctxt.Enter()
120
+
121
+ # Add the stack pop op. If pred.op is in a (outer) CondContext,
122
+ # the stack pop will be guarded with a switch.
123
+ real_pred = grad_state.AddBackpropAccumulatedValue(history_pred, pred)
124
+ grad_state.history_map[pred.name] = real_pred
125
+ pred = real_pred
126
+ # pylint: disable=protected-access
127
+ return control_flow_ops._SwitchRefOrTensor(grad, pred, name="cond_grad")
128
+ # pylint: enable=protected-access
129
+ else:
130
+ num_inputs = len(op.inputs)
131
+ cond = [math_ops.equal(op.outputs[1], i) for i in range(num_inputs)]
132
+ # pylint: disable=protected-access
133
+ return [
134
+ control_flow_ops._SwitchRefOrTensor(grad, cond[i])[1]
135
+ for i in range(num_inputs)
136
+ ]
137
+ # pylint: enable=protected-access
138
+
139
+
140
+ @ops.RegisterGradient("RefMerge")
141
+ def _RefMergeGrad(op, grad, _):
142
+ return _MergeGrad(op, grad, _)
143
+
144
+
145
+ @ops.RegisterGradient("Exit")
146
+ def _ExitGrad(op, grad):
147
+ """Gradients for an exit op are calculated using an Enter op."""
148
+ graph = ops.get_default_graph()
149
+ # pylint: disable=protected-access
150
+ op_ctxt = op._get_control_flow_context()
151
+ grad_ctxt = graph._get_control_flow_context()
152
+ # pylint: enable=protected-access
153
+ if not grad_ctxt.back_prop:
154
+ # The flag `back_prop` is set by users to suppress gradient
155
+ # computation for this loop. If the attribute `back_prop` is false,
156
+ # no gradient computation.
157
+ return None
158
+
159
+ if op_ctxt.grad_state:
160
+ raise TypeError("Second-order gradient for while loops not supported.")
161
+
162
+ if isinstance(grad, tensor.Tensor):
163
+ grad_ctxt.AddName(grad.name)
164
+ else:
165
+ if not isinstance(
166
+ grad, (indexed_slices.IndexedSlices, sparse_tensor.SparseTensor)):
167
+ raise TypeError(f"Type {type(grad)} not supported, must be either"
168
+ "`indexed_slices.IndexedSlices` or `SparseTensor`.")
169
+ grad_ctxt.AddName(grad.values.name)
170
+ grad_ctxt.AddName(grad.indices.name)
171
+ dense_shape = grad.dense_shape
172
+ if dense_shape is not None:
173
+ grad_ctxt.AddName(dense_shape.name)
174
+ grad_ctxt.Enter()
175
+ # pylint: disable=protected-access
176
+ result = control_flow_ops._Enter(
177
+ grad, grad_ctxt.name, is_constant=False,
178
+ parallel_iterations=grad_ctxt.parallel_iterations,
179
+ name="b_exit")
180
+ # pylint: enable=protected-access
181
+ grad_ctxt.loop_enters.append(result)
182
+ grad_ctxt.Exit()
183
+ return result
184
+
185
+
186
+ ops.RegisterGradient("RefExit")(_ExitGrad)
187
+
188
+
189
+ @ops.RegisterGradient("NextIteration")
190
+ def _NextIterationGrad(_, grad):
191
+ """A forward next_iteration is translated into a backprop identity.
192
+
193
+ Note that the backprop next_iteration is added in switch grad.
194
+ """
195
+ return grad
196
+
197
+
198
+ @ops.RegisterGradient("RefNextIteration")
199
+ def _RefNextIterationGrad(_, grad):
200
+ return _NextIterationGrad(_, grad)
201
+
202
+
203
+ @ops.RegisterGradient("Enter")
204
+ def _EnterGrad(op, grad):
205
+ """Gradients for an Enter are calculated using an Exit op.
206
+
207
+ For loop variables, grad is the gradient so just add an exit.
208
+ For loop invariants, we need to add an accumulator loop.
209
+ """
210
+ graph = ops.get_default_graph()
211
+ # pylint: disable=protected-access
212
+ grad_ctxt = graph._get_control_flow_context()
213
+ # pylint: enable=protected-access
214
+ if grad_ctxt is None:
215
+ return grad
216
+ if not grad_ctxt.back_prop:
217
+ # Skip gradient computation, if the attribute `back_prop` is false.
218
+ return grad
219
+ if grad_ctxt.grad_state is None:
220
+ # Pass the gradient through if we are not in a gradient while context.
221
+ return grad
222
+ if op.get_attr("is_constant"):
223
+ # Add a gradient accumulator for each loop invariant.
224
+ if isinstance(grad, tensor.Tensor):
225
+ result = grad_ctxt.AddBackpropAccumulator(op, grad)
226
+ elif isinstance(grad, indexed_slices.IndexedSlices):
227
+ result = grad_ctxt.AddBackpropIndexedSlicesAccumulator(op, grad)
228
+ else:
229
+ # TODO(yuanbyu, lukasr): Add support for SparseTensor.
230
+ raise TypeError(f"Type {type(grad)} not supported,"
231
+ "must be Tensor or Indexed Slices")
232
+ else:
233
+ result = exit(grad)
234
+ grad_ctxt.loop_exits.append(result)
235
+ grad_ctxt.ExitResult([result])
236
+ return result
237
+
238
+
239
+ @ops.RegisterGradient("RefEnter")
240
+ def _RefEnterGrad(op, grad):
241
+ return _EnterGrad(op, grad)
242
+
243
+
244
+ @ops.RegisterGradient("LoopCond")
245
+ def _LoopCondGrad(_):
246
+ """Stop backprop for the predicate of a while loop."""
247
+ return None
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_util.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+
16
+ """Utility functions for control flow.
17
+
18
+ This file is necessary to avoid cyclic dependencies between ops.py and
19
+ control_flow_ops.py.
20
+ """
21
+
22
+ import os
23
+ import traceback
24
+
25
+ from tensorflow.python import tf2
26
+ from tensorflow.python.platform import tf_logging as logging
27
+
28
+ ENABLE_CONTROL_FLOW_V2 = ((tf2.enabled() and
29
+ os.getenv("TF_ENABLE_CONTROL_FLOW_V2") != "0") or
30
+ os.getenv("TF_ENABLE_CONTROL_FLOW_V2", "0") != "0" or
31
+ os.getenv("TF_ENABLE_COND_V2", "0") != "0" or
32
+ os.getenv("TF_ENABLE_WHILE_V2", "0") != "0" or
33
+ os.getenv("TF_ENABLE_TENSOR_ARRAY_V2", "0") != "0")
34
+
35
+
36
+ # TODO(b/137793122): Remove this.
37
+ def enable_control_flow_v2(): # pylint: disable=invalid-name
38
+ """Use control flow v2.
39
+
40
+ Do not use this symbol. This will be removed.
41
+ """
42
+ global ENABLE_CONTROL_FLOW_V2
43
+ ENABLE_CONTROL_FLOW_V2 = True
44
+
45
+
46
+ def EnableControlFlowV2(graph):
47
+ """Returns whether control flow v2 should be used in `graph`."""
48
+ # Enable new control flow in FuncGraphs (but not legacy _FuncGraphs).
49
+ # TODO(skyewm): do something better than hasattr without messing up imports.
50
+ return ENABLE_CONTROL_FLOW_V2 or (
51
+ graph.building_function and not hasattr(graph, "_captured"))
52
+
53
+
54
+ def IsInXLAContext(op):
55
+ try:
56
+ xla_compile = op.get_attr("_XlaCompile")
57
+ if xla_compile: return True
58
+ except ValueError:
59
+ pass
60
+ ctxt = op._get_control_flow_context() # pylint: disable=protected-access
61
+ return GetContainingXLAContext(ctxt) is not None
62
+
63
+
64
+ def InXlaContext(graph):
65
+ ctxt = graph._get_control_flow_context() # pylint: disable=protected-access
66
+ return GetContainingXLAContext(ctxt) is not None
67
+
68
+
69
+ def GraphOrParentsInXlaContext(graph):
70
+ while True:
71
+ if InXlaContext(graph): return True
72
+ try:
73
+ graph = graph.outer_graph
74
+ except AttributeError:
75
+ return False
76
+
77
+
78
+ def IsInWhileLoop(op):
79
+ ctxt = op._get_control_flow_context() # pylint: disable=protected-access
80
+ return GetContainingWhileContext(ctxt) is not None
81
+
82
+
83
+ def IsInCond(op):
84
+ ctxt = op._get_control_flow_context() # pylint: disable=protected-access
85
+ return GetContainingCondContext(ctxt) is not None
86
+
87
+
88
+ def IsSwitch(op):
89
+ """Return true if `op` is a Switch."""
90
+ return op.type == "Switch" or op.type == "RefSwitch"
91
+
92
+
93
+ def IsMerge(op):
94
+ """Return true if `op` is a Merge."""
95
+ return op.type == "Merge" or op.type == "RefMerge"
96
+
97
+
98
+ def IsLoopEnter(op):
99
+ """Returns true if `op` is an Enter."""
100
+ return op.type == "Enter" or op.type == "RefEnter"
101
+
102
+
103
+ def IsLoopExit(op):
104
+ """Return true if `op` is an Exit."""
105
+ return op.type == "Exit" or op.type == "RefExit"
106
+
107
+
108
+ def IsCondSwitch(op):
109
+ """Return true if `op` is the Switch for a conditional."""
110
+ if not IsSwitch(op):
111
+ return False
112
+ if not op.outputs:
113
+ return False
114
+ # Switch nodes are not part of the cond control flow context that they
115
+ # represent, so consider the consumers of its outputs to determine if it is
116
+ # cond switch or not. A switch is a cond switch iff all its consumers are in
117
+ # cond contexts.
118
+ is_cond_switch = True
119
+ for o in op.outputs:
120
+ for c in o.consumers():
121
+ ctxt = c._get_control_flow_context() # pylint: disable=protected-access
122
+ if IsLoopEnter(c):
123
+ ctxt = ctxt.outer_context
124
+ is_cond_switch = is_cond_switch and (ctxt is not None and
125
+ ctxt.IsCondContext())
126
+ return is_cond_switch
127
+
128
+
129
+ def IsCondMerge(op):
130
+ """Return true if `op` is the Merge for a conditional."""
131
+ if not IsMerge(op):
132
+ return False
133
+ if not op.inputs:
134
+ return False
135
+ # Merge nodes are not part of the cond control flow context that they
136
+ # represent, so consider the inputs to the merge of to determine if it is
137
+ # cond merge or not: A merge is a cond merge iff all its inputs are in
138
+ # cond contexts.
139
+ is_cond_merge = True
140
+ for i in op.inputs:
141
+ ctxt = GetOutputContext(i.op)
142
+ is_cond_merge = is_cond_merge and ctxt is not None and ctxt.IsCondContext()
143
+ return is_cond_merge
144
+
145
+
146
+ def IsLoopSwitch(op):
147
+ """Return true if `op` is the Switch for a while loop."""
148
+ if IsSwitch(op):
149
+ ctxt = op._get_control_flow_context() # pylint: disable=protected-access
150
+ return ctxt is not None and ctxt.IsWhileContext() and not IsCondSwitch(op)
151
+ return False
152
+
153
+
154
+ def IsLoopMerge(op):
155
+ """Return true if `op` is the Merge for a while loop."""
156
+ if IsMerge(op):
157
+ ctxt = op._get_control_flow_context() # pylint: disable=protected-access
158
+ return ctxt is not None and ctxt.IsWhileContext() and not IsCondMerge(op)
159
+ return False
160
+
161
+
162
+ def IsLoopConstantEnter(op):
163
+ """Return true iff op is a loop invariant."""
164
+ return IsLoopEnter(op) and op.get_attr("is_constant")
165
+
166
+
167
+ def GetLoopConstantEnter(value):
168
+ """Return the enter op if we can infer `value` to be a loop invariant."""
169
+ id_ops = {"Switch", "RefSwitch", "Identity", "RefIdentity"}
170
+ op = value.op
171
+ while op.type in id_ops:
172
+ op = op.inputs[0].op
173
+ return op if IsLoopConstantEnter(op) else None
174
+
175
+
176
+ def GetOutputContext(op):
177
+ """Return the control flow context for the output of an op."""
178
+ ctxt = op._get_control_flow_context() # pylint: disable=protected-access
179
+ # Exit nodes usually have a control flow context, except in the case where the
180
+ # exit node was imported via import_graph_def (in which case no nodes have
181
+ # control flow contexts).
182
+ if ctxt is not None and IsLoopExit(op):
183
+ ctxt = ctxt.outer_context
184
+ return ctxt
185
+
186
+
187
+ def GetContainingWhileContext(ctxt, stop_ctxt=None):
188
+ """Returns the first ancestor WhileContext of `ctxt`.
189
+
190
+ Returns `ctxt` if `ctxt` is a WhileContext, or None if `ctxt` is not in a
191
+ while loop.
192
+
193
+ Args:
194
+ ctxt: ControlFlowContext
195
+ stop_ctxt: ControlFlowContext, optional. If provided, the search will end
196
+ if it sees stop_ctxt.
197
+
198
+ Returns:
199
+ `ctxt` if `ctxt` is a WhileContext, the most nested WhileContext containing
200
+ `ctxt`, or None if `ctxt` is not in a while loop. If `stop_ctxt` is not
201
+ `None`, this returns `ctxt` if it matches `stop_ctxt` in its traversal.
202
+ """
203
+ while ctxt:
204
+ if ctxt.IsWhileContext() or ctxt == stop_ctxt: return ctxt
205
+ ctxt = ctxt.outer_context
206
+ return None
207
+
208
+
209
+ def GetContainingXLAContext(ctxt):
210
+ """Returns the first ancestor XLAContext of `ctxt`.
211
+
212
+ Returns `ctxt` if `ctxt` is a XLAContext, or None if `ctxt` is not in a
213
+ while loop.
214
+
215
+ Args:
216
+ ctxt: ControlFlowContext
217
+
218
+ Returns:
219
+ `ctxt` if `ctxt` is a XLAContext, the most nested XLAContext containing
220
+ `ctxt`, or None if `ctxt` is not in a while loop.
221
+ """
222
+ while ctxt:
223
+ if ctxt.IsXLAContext(): return ctxt
224
+ ctxt = ctxt.outer_context
225
+ return None
226
+
227
+
228
+ def GetContainingCondContext(ctxt):
229
+ """Returns the first ancestor CondContext of `ctxt`.
230
+
231
+ Returns `ctxt` if `ctxt` is a CondContext, or None if `ctxt` is not in a cond.
232
+
233
+ Args:
234
+ ctxt: ControlFlowContext
235
+
236
+ Returns:
237
+ `ctxt` if `ctxt` is a CondContext, the most nested CondContext containing
238
+ `ctxt`, or None if `ctxt` is not in a cond.
239
+ """
240
+ while ctxt:
241
+ if ctxt.IsCondContext(): return ctxt
242
+ ctxt = ctxt.outer_context
243
+ return None
244
+
245
+
246
+ def IsContainingContext(ctxt, maybe_containing_ctxt):
247
+ """Returns true if `maybe_containing_ctxt` is or contains `ctxt`."""
248
+ while ctxt is not maybe_containing_ctxt:
249
+ if ctxt is None: return False
250
+ ctxt = ctxt.outer_context
251
+ return True
252
+
253
+
254
+ def OpInContext(op, ctxt):
255
+ return IsContainingContext(op._get_control_flow_context(), ctxt) # pylint: disable=protected-access
256
+
257
+
258
+ def TensorInContext(tensor, ctxt):
259
+ return OpInContext(tensor.op, ctxt)
260
+
261
+
262
+ def CheckInputFromValidContext(op, input_op):
263
+ """Returns whether `input_op` can be used from `op`s context.
264
+
265
+ Conceptually, only inputs from op's while context or any ancestor while
266
+ context (including outside of any context) are valid. In practice, there are
267
+ many other edge cases as well.
268
+
269
+ Args:
270
+ op: Operation
271
+ input_op: Operation
272
+
273
+ Raises:
274
+ ValueError: if input_op is from an invalid context.
275
+ """
276
+ op_ctxt = op._get_control_flow_context() # pylint: disable=protected-access
277
+ input_ctxt = GetOutputContext(input_op)
278
+ valid = False
279
+
280
+ if not input_ctxt:
281
+ # input_op isn't in a control flow context.
282
+ valid = True
283
+ elif op_ctxt is input_ctxt:
284
+ # input_op is in the same context as op.
285
+ valid = True
286
+ else:
287
+ while_ctxt = GetContainingWhileContext(op_ctxt)
288
+ input_while_ctxt = GetContainingWhileContext(input_ctxt)
289
+
290
+ if while_ctxt is None:
291
+ if input_while_ctxt is None:
292
+ # Neither op nor input_op is in a while loop, but one or both are in
293
+ # conds. We allow this, although execution will fail if the branch
294
+ # corresponding to input_op's cond context isn't taken.
295
+ valid = True
296
+ # Invalid if op isn't in a while loop and input_op is. Unless...
297
+ if IsLoopEnter(op):
298
+ # WhileContext._BuildLoop clears context for Enter nodes.
299
+ valid = True
300
+ if IsSwitch(op):
301
+ # CondContext.AddValue clears context for Switch nodes.
302
+ valid = True
303
+ elif IsContainingContext(while_ctxt, input_while_ctxt):
304
+ # input_op is in a while loop which contains op's while loop (or not in a
305
+ # while loop at all).
306
+ valid = True
307
+ elif (while_ctxt.grad_state and
308
+ IsContainingContext(while_ctxt.grad_state.forward_context,
309
+ input_while_ctxt)):
310
+ # op is in a gradient context and input_op is in the associated forward
311
+ # pass context or an ancestor thereof. This case is need to build while
312
+ # loop gradients.
313
+ # NOTE(skyewm): we theoretically also need this case for custom gradient
314
+ # functions that close over tensors from ancestor contexts, but I haven't
315
+ # verified this.
316
+ valid = True
317
+ elif (while_ctxt.grad_state and
318
+ while_ctxt.grad_state.forward_context is
319
+ input_while_ctxt._outer_context): # pylint: disable=protected-access
320
+ # op is in a gradient context and input_op is in a child of the associated
321
+ # forward pass context. This case is needed for the gradients of while
322
+ # loops with conds.
323
+ valid = True
324
+ elif (input_while_ctxt.grad_state and
325
+ input_while_ctxt.grad_state.forward_context is while_ctxt):
326
+ # input_op is in the gradient context of op's context. This case is needed
327
+ # when the gradient of a while loop gradient is requested (this will
328
+ # eventually fail unless there is a stop_gradient() or similar).
329
+ valid = True
330
+ elif (input_while_ctxt.grad_state and
331
+ input_ctxt.grad_state.forward_context.grad_state and
332
+ input_ctxt.grad_state.forward_context.grad_state.forward_context is
333
+ while_ctxt):
334
+ # input_op is in the grad grad context of op's context. This case is
335
+ # needed when the gradient of a while loop gradient is requested (this
336
+ # will eventually fail unless there is a stop_gradient() or similar).
337
+ valid = True
338
+
339
+ if not valid:
340
+ if while_ctxt:
341
+ error_msg = (
342
+ f"Cannot use '{input_op.name}' as input to '{op.name}' because they "
343
+ "are in different while loops.")
344
+ else:
345
+ error_msg = (
346
+ f"Cannot use '{input_op.name}' as input to '{op.name}' because "
347
+ f"'{input_op.name}' is in a while loop.")
348
+
349
+ # Log the error message plus the relevant stack traces. The stacks may be
350
+ # useful for debugging this error, but we don't want to raise an
351
+ # unreadable exception.
352
+ log_msg = error_msg
353
+ log_msg += "\n\n%s while context: %s" % (op.name, while_ctxt)
354
+ log_msg += "\n%s while context: %s" % (input_op.name, input_while_ctxt)
355
+ log_msg += "\n\nTraceback for %s:\n%s\nTraceback for %s:\n%s\n" % (
356
+ op.name, "".join(traceback.format_list(op.traceback)),
357
+ input_op.name, "".join(traceback.format_list(input_op.traceback)))
358
+ logging.info(log_msg)
359
+ raise ValueError(error_msg + " See info log for more details.")
360
+
361
+
362
+ def GetWhileContext(op):
363
+ """Get the WhileContext to which this op belongs."""
364
+ ctxt = op._get_control_flow_context() # pylint: disable=protected-access
365
+ if ctxt:
366
+ ctxt = ctxt.GetWhileContext()
367
+ return ctxt
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/data_flow_ops.py ADDED
@@ -0,0 +1,2518 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ #==============================================================================
15
+ """Data Flow Operations."""
16
+ # pylint: disable=g-bad-name
17
+ import functools
18
+ import hashlib
19
+ import threading
20
+
21
+ from tensorflow.python.eager import context
22
+ from tensorflow.python.framework import dtypes as _dtypes
23
+ from tensorflow.python.framework import indexed_slices
24
+ from tensorflow.python.framework import ops
25
+ from tensorflow.python.framework import random_seed
26
+ from tensorflow.python.framework import tensor_shape
27
+ from tensorflow.python.framework import tensor_util
28
+ from tensorflow.python.lib.io import python_io
29
+ from tensorflow.python.ops import array_ops
30
+ from tensorflow.python.ops import array_ops_stack
31
+ from tensorflow.python.ops import control_flow_ops
32
+ from tensorflow.python.ops import gen_data_flow_ops
33
+ from tensorflow.python.ops import math_ops
34
+ from tensorflow.python.ops import resource_variable_ops
35
+ # go/tf-wildcard-import
36
+ # pylint: disable=wildcard-import
37
+ from tensorflow.python.ops.gen_data_flow_ops import *
38
+ from tensorflow.python.util import deprecation
39
+ from tensorflow.python.util.compat import collections_abc
40
+ from tensorflow.python.util.tf_export import tf_export
41
+
42
+ # pylint: enable=wildcard-import
43
+
44
+
45
+ def _as_type_list(dtypes):
46
+ """Convert dtypes to a list of types."""
47
+ assert dtypes is not None
48
+ if not (isinstance(dtypes, list) or isinstance(dtypes, tuple)):
49
+ # We have a single type.
50
+ return [dtypes]
51
+ else:
52
+ # We have a list or tuple of types.
53
+ return list(dtypes)
54
+
55
+
56
+ def _as_shape_list(shapes,
57
+ dtypes,
58
+ unknown_dim_allowed=False,
59
+ unknown_rank_allowed=False):
60
+ """Convert shapes to a list of tuples of int (or None)."""
61
+ del dtypes
62
+ if unknown_dim_allowed:
63
+ if (not isinstance(shapes, collections_abc.Sequence) or not shapes or
64
+ any(shape is None or isinstance(shape, int) for shape in shapes)):
65
+ raise ValueError(
66
+ "When providing partial shapes, a list of shapes must be provided.")
67
+ if shapes is None:
68
+ return None
69
+ if isinstance(shapes, tensor_shape.TensorShape):
70
+ shapes = [shapes]
71
+ if not isinstance(shapes, (tuple, list)):
72
+ raise TypeError(
73
+ "Shapes must be a TensorShape or a list or tuple of TensorShapes, "
74
+ f"got {type(shapes)} instead.")
75
+ if all(shape is None or isinstance(shape, int) for shape in shapes):
76
+ # We have a single shape.
77
+ shapes = [shapes]
78
+ shapes = [tensor_shape.as_shape(shape) for shape in shapes]
79
+ if not unknown_dim_allowed:
80
+ if any(not shape.is_fully_defined() for shape in shapes):
81
+ raise ValueError(f"All shapes must be fully defined: {shapes}")
82
+ if not unknown_rank_allowed:
83
+ if any(shape.dims is None for shape in shapes):
84
+ raise ValueError(f"All shapes must have a defined rank: {shapes}")
85
+
86
+ return shapes
87
+
88
+
89
+ def _as_name_list(names, dtypes):
90
+ if names is None:
91
+ return None
92
+ if not isinstance(names, (list, tuple)):
93
+ names = [names]
94
+ if len(names) != len(dtypes):
95
+ raise ValueError("List of names must have the same length as the list "
96
+ f"of dtypes, received len(names)={len(names)},"
97
+ f"len(dtypes)={len(dtypes)}")
98
+ return list(names)
99
+
100
+
101
+ def _shape_common(s1, s2):
102
+ """The greatest lower bound (ordered by specificity) TensorShape."""
103
+ s1 = tensor_shape.TensorShape(s1)
104
+ s2 = tensor_shape.TensorShape(s2)
105
+ if s1.ndims is None or s2.ndims is None or s1.ndims != s2.ndims:
106
+ return tensor_shape.unknown_shape()
107
+ d = [
108
+ d1 if d1 is not None and d1 == d2 else None
109
+ for (d1, d2) in zip(s1.as_list(), s2.as_list())
110
+ ]
111
+ return tensor_shape.TensorShape(d)
112
+
113
+
114
+ # pylint: disable=protected-access
115
+ @tf_export("queue.QueueBase",
116
+ v1=["queue.QueueBase", "io.QueueBase", "QueueBase"])
117
+ @deprecation.deprecated_endpoints(["io.QueueBase", "QueueBase"])
118
+ class QueueBase:
119
+ """Base class for queue implementations.
120
+
121
+ A queue is a TensorFlow data structure that stores tensors across
122
+ multiple steps, and exposes operations that enqueue and dequeue
123
+ tensors.
124
+
125
+ Each queue element is a tuple of one or more tensors, where each
126
+ tuple component has a static dtype, and may have a static shape. The
127
+ queue implementations support versions of enqueue and dequeue that
128
+ handle single elements, versions that support enqueuing and
129
+ dequeuing a batch of elements at once.
130
+
131
+ See `tf.queue.FIFOQueue` and
132
+ `tf.queue.RandomShuffleQueue` for concrete
133
+ implementations of this class, and instructions on how to create
134
+ them.
135
+ """
136
+
137
+ def __init__(self, dtypes, shapes, names, queue_ref):
138
+ """Constructs a queue object from a queue reference.
139
+
140
+ The two optional lists, `shapes` and `names`, must be of the same length
141
+ as `dtypes` if provided. The values at a given index `i` indicate the
142
+ shape and name to use for the corresponding queue component in `dtypes`.
143
+
144
+ Args:
145
+ dtypes: A list of types. The length of dtypes must equal the number
146
+ of tensors in each element.
147
+ shapes: Constraints on the shapes of tensors in an element:
148
+ A list of shape tuples or None. This list is the same length
149
+ as dtypes. If the shape of any tensors in the element are constrained,
150
+ all must be; shapes can be None if the shapes should not be constrained.
151
+ names: Optional list of names. If provided, the `enqueue()` and
152
+ `dequeue()` methods will use dictionaries with these names as keys.
153
+ Must be None or a list or tuple of the same length as `dtypes`.
154
+ queue_ref: The queue reference, i.e. the output of the queue op.
155
+
156
+ Raises:
157
+ ValueError: If one of the arguments is invalid.
158
+ """
159
+ self._dtypes = dtypes
160
+ if shapes is not None:
161
+ if len(shapes) != len(dtypes):
162
+ raise ValueError("Queue shapes must have the same length as dtypes, "
163
+ f"received len(shapes)={len(shapes)}, "
164
+ f"len(dtypes)={len(dtypes)}")
165
+ self._shapes = [tensor_shape.TensorShape(s) for s in shapes]
166
+ else:
167
+ self._shapes = [tensor_shape.unknown_shape() for _ in self._dtypes]
168
+ if names is not None:
169
+ if len(names) != len(dtypes):
170
+ raise ValueError("Queue names must have the same length as dtypes,"
171
+ f"received len(names)={len(names)},"
172
+ f"len {len(dtypes)}")
173
+ self._names = names
174
+ else:
175
+ self._names = None
176
+ self._queue_ref = queue_ref
177
+ if isinstance(queue_ref, ops.EagerTensor):
178
+ if context.context().scope_name:
179
+ self._name = context.context().scope_name
180
+ else:
181
+ self._name = "Empty"
182
+ self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
183
+ queue_ref, None)
184
+ else:
185
+ self._name = self._queue_ref.op.name.split("/")[-1]
186
+
187
+ @staticmethod
188
+ def from_list(index, queues):
189
+ """Create a queue using the queue reference from `queues[index]`.
190
+
191
+ Args:
192
+ index: An integer scalar tensor that determines the input that gets
193
+ selected.
194
+ queues: A list of `QueueBase` objects.
195
+
196
+ Returns:
197
+ A `QueueBase` object.
198
+
199
+ Raises:
200
+ TypeError: When `queues` is not a list of `QueueBase` objects,
201
+ or when the data types of `queues` are not all the same.
202
+ """
203
+ if ((not queues) or (not isinstance(queues, list)) or
204
+ (not all(isinstance(x, QueueBase) for x in queues))):
205
+ raise TypeError("A list of queues expected")
206
+
207
+ dtypes = queues[0].dtypes
208
+ if not all(dtypes == q.dtypes for q in queues[1:]):
209
+ raise TypeError("Queues do not have matching component dtypes.")
210
+
211
+ names = queues[0].names
212
+ if not all(names == q.names for q in queues[1:]):
213
+ raise TypeError("Queues do not have matching component names.")
214
+
215
+ queue_shapes = [q.shapes for q in queues]
216
+ reduced_shapes = [
217
+ functools.reduce(_shape_common, s) for s in zip(*queue_shapes)
218
+ ]
219
+
220
+ queue_refs = array_ops_stack.stack([x.queue_ref for x in queues])
221
+ selected_queue = array_ops.gather(queue_refs, index)
222
+ return QueueBase(
223
+ dtypes=dtypes,
224
+ shapes=reduced_shapes,
225
+ names=names,
226
+ queue_ref=selected_queue)
227
+
228
+ @property
229
+ def queue_ref(self):
230
+ """The underlying queue reference."""
231
+ return self._queue_ref
232
+
233
+ @property
234
+ def name(self):
235
+ """The name of the underlying queue."""
236
+ if context.executing_eagerly():
237
+ return self._name
238
+ return self._queue_ref.op.name
239
+
240
+ @property
241
+ def dtypes(self):
242
+ """The list of dtypes for each component of a queue element."""
243
+ return self._dtypes
244
+
245
+ @property
246
+ def shapes(self):
247
+ """The list of shapes for each component of a queue element."""
248
+ return self._shapes
249
+
250
+ @property
251
+ def names(self):
252
+ """The list of names for each component of a queue element."""
253
+ return self._names
254
+
255
+ def _check_enqueue_dtypes(self, vals):
256
+ """Validate and convert `vals` to a list of `Tensor`s.
257
+
258
+ The `vals` argument can be a Tensor, a list or tuple of tensors, or a
259
+ dictionary with tensor values.
260
+
261
+ If it is a dictionary, the queue must have been constructed with a
262
+ `names` attribute and the dictionary keys must match the queue names.
263
+ If the queue was constructed with a `names` attribute, `vals` must
264
+ be a dictionary.
265
+
266
+ Args:
267
+ vals: A tensor, a list or tuple of tensors, or a dictionary..
268
+
269
+ Returns:
270
+ A list of `Tensor` objects.
271
+
272
+ Raises:
273
+ ValueError: If `vals` is invalid.
274
+ """
275
+ if isinstance(vals, dict):
276
+ if not self._names:
277
+ raise ValueError("Queue must have names to enqueue a dictionary")
278
+ if sorted(self._names, key=str) != sorted(vals.keys(), key=str):
279
+ raise ValueError("Keys in dictionary to enqueue do not match "
280
+ f"names of Queue. Dictionary: {sorted(vals.keys())},"
281
+ f"Queue: {sorted(self._names)}")
282
+ # The order of values in `self._names` indicates the order in which the
283
+ # tensors in the dictionary `vals` must be listed.
284
+ vals = [vals[k] for k in self._names]
285
+ else:
286
+ if self._names:
287
+ raise ValueError("You must enqueue a dictionary in a Queue with names")
288
+ if not isinstance(vals, (list, tuple)):
289
+ vals = [vals]
290
+
291
+ tensors = []
292
+ for i, (val, dtype) in enumerate(zip(vals, self._dtypes)):
293
+ tensors.append(
294
+ ops.convert_to_tensor(val, dtype=dtype, name="component_%d" % i))
295
+
296
+ return tensors
297
+
298
+ def _scope_vals(self, vals):
299
+ """Return a list of values to pass to `name_scope()`.
300
+
301
+ Args:
302
+ vals: A tensor, a list or tuple of tensors, or a dictionary.
303
+
304
+ Returns:
305
+ The values in vals as a list.
306
+ """
307
+ if isinstance(vals, (list, tuple)):
308
+ return vals
309
+ elif isinstance(vals, dict):
310
+ return vals.values()
311
+ else:
312
+ return [vals]
313
+
314
+ def enqueue(self, vals, name=None):
315
+ """Enqueues one element to this queue.
316
+
317
+ If the queue is full when this operation executes, it will block
318
+ until the element has been enqueued.
319
+
320
+ At runtime, this operation may raise an error if the queue is
321
+ `tf.QueueBase.close` before or during its execution. If the
322
+ queue is closed before this operation runs,
323
+ `tf.errors.CancelledError` will be raised. If this operation is
324
+ blocked, and either (i) the queue is closed by a close operation
325
+ with `cancel_pending_enqueues=True`, or (ii) the session is
326
+ `tf.Session.close`,
327
+ `tf.errors.CancelledError` will be raised.
328
+
329
+ Args:
330
+ vals: A tensor, a list or tuple of tensors, or a dictionary containing
331
+ the values to enqueue.
332
+ name: A name for the operation (optional).
333
+
334
+ Returns:
335
+ The operation that enqueues a new tuple of tensors to the queue.
336
+ """
337
+ with ops.name_scope(name, "%s_enqueue" % self._name,
338
+ self._scope_vals(vals)) as scope:
339
+ vals = self._check_enqueue_dtypes(vals)
340
+
341
+ # NOTE(mrry): Not using a shape function because we need access to
342
+ # the `QueueBase` object.
343
+ for val, shape in zip(vals, self._shapes):
344
+ val.get_shape().assert_is_compatible_with(shape)
345
+
346
+ if self._queue_ref.dtype == _dtypes.resource:
347
+ return gen_data_flow_ops.queue_enqueue_v2(
348
+ self._queue_ref, vals, name=scope)
349
+ else:
350
+ return gen_data_flow_ops.queue_enqueue(
351
+ self._queue_ref, vals, name=scope)
352
+
353
+ def enqueue_many(self, vals, name=None):
354
+ """Enqueues zero or more elements to this queue.
355
+
356
+ This operation slices each component tensor along the 0th dimension to
357
+ make multiple queue elements. All of the tensors in `vals` must have the
358
+ same size in the 0th dimension.
359
+
360
+ If the queue is full when this operation executes, it will block
361
+ until all of the elements have been enqueued.
362
+
363
+ At runtime, this operation may raise an error if the queue is
364
+ `tf.QueueBase.close` before or during its execution. If the
365
+ queue is closed before this operation runs,
366
+ `tf.errors.CancelledError` will be raised. If this operation is
367
+ blocked, and either (i) the queue is closed by a close operation
368
+ with `cancel_pending_enqueues=True`, or (ii) the session is
369
+ `tf.Session.close`,
370
+ `tf.errors.CancelledError` will be raised.
371
+
372
+ Args:
373
+ vals: A tensor, a list or tuple of tensors, or a dictionary
374
+ from which the queue elements are taken.
375
+ name: A name for the operation (optional).
376
+
377
+ Returns:
378
+ The operation that enqueues a batch of tuples of tensors to the queue.
379
+ """
380
+ with ops.name_scope(name, "%s_EnqueueMany" % self._name,
381
+ self._scope_vals(vals)) as scope:
382
+ vals = self._check_enqueue_dtypes(vals)
383
+
384
+ # NOTE(mrry): Not using a shape function because we need access to
385
+ # the `QueueBase` object.
386
+ # NOTE(fchollet): the code that follow is verbose because it needs to be
387
+ # compatible with both TF v1 TensorShape behavior and TF v2 behavior.
388
+ batch_dim = tensor_shape.dimension_value(
389
+ vals[0].get_shape().with_rank_at_least(1)[0])
390
+ batch_dim = tensor_shape.Dimension(batch_dim)
391
+ for val, shape in zip(vals, self._shapes):
392
+ val_batch_dim = tensor_shape.dimension_value(
393
+ val.get_shape().with_rank_at_least(1)[0])
394
+ val_batch_dim = tensor_shape.Dimension(val_batch_dim)
395
+ batch_dim = batch_dim.merge_with(val_batch_dim)
396
+ val.get_shape()[1:].assert_is_compatible_with(shape)
397
+
398
+ return gen_data_flow_ops.queue_enqueue_many_v2(
399
+ self._queue_ref, vals, name=scope)
400
+
401
+ def _dequeue_return_value(self, tensors):
402
+ """Return the value to return from a dequeue op.
403
+
404
+ If the queue has names, return a dictionary with the
405
+ names as keys. Otherwise return either a single tensor
406
+ or a list of tensors depending on the length of `tensors`.
407
+
408
+ Args:
409
+ tensors: List of tensors from the dequeue op.
410
+
411
+ Returns:
412
+ A single tensor, a list of tensors, or a dictionary
413
+ of tensors.
414
+ """
415
+ if self._names:
416
+ # The returned values in `tensors` are in the same order as
417
+ # the names in `self._names`.
418
+ return {n: tensors[i] for i, n in enumerate(self._names)}
419
+ elif len(tensors) == 1:
420
+ return tensors[0]
421
+ else:
422
+ return tensors
423
+
424
+ def dequeue(self, name=None):
425
+ """Dequeues one element from this queue.
426
+
427
+ If the queue is empty when this operation executes, it will block
428
+ until there is an element to dequeue.
429
+
430
+ At runtime, this operation may raise an error if the queue is
431
+ `tf.QueueBase.close` before or during its execution. If the
432
+ queue is closed, the queue is empty, and there are no pending
433
+ enqueue operations that can fulfill this request,
434
+ `tf.errors.OutOfRangeError` will be raised. If the session is
435
+ `tf.Session.close`,
436
+ `tf.errors.CancelledError` will be raised.
437
+
438
+ Args:
439
+ name: A name for the operation (optional).
440
+
441
+ Returns:
442
+ The tuple of tensors that was dequeued.
443
+ """
444
+ if name is None:
445
+ name = "%s_Dequeue" % self._name
446
+ if self._queue_ref.dtype == _dtypes.resource:
447
+ ret = gen_data_flow_ops.queue_dequeue_v2(
448
+ self._queue_ref, self._dtypes, name=name)
449
+ else:
450
+ ret = gen_data_flow_ops.queue_dequeue(
451
+ self._queue_ref, self._dtypes, name=name)
452
+
453
+ # NOTE(mrry): Not using a shape function because we need access to
454
+ # the `QueueBase` object.
455
+ if not context.executing_eagerly():
456
+ op = ret[0].op
457
+ for output, shape in zip(op.values(), self._shapes):
458
+ output.set_shape(shape)
459
+
460
+ return self._dequeue_return_value(ret)
461
+
462
+ def dequeue_many(self, n, name=None):
463
+ """Dequeues and concatenates `n` elements from this queue.
464
+
465
+ This operation concatenates queue-element component tensors along
466
+ the 0th dimension to make a single component tensor. All of the
467
+ components in the dequeued tuple will have size `n` in the 0th dimension.
468
+
469
+ If the queue is closed and there are less than `n` elements left, then an
470
+ `OutOfRange` exception is raised.
471
+
472
+ At runtime, this operation may raise an error if the queue is
473
+ `tf.QueueBase.close` before or during its execution. If the
474
+ queue is closed, the queue contains fewer than `n` elements, and
475
+ there are no pending enqueue operations that can fulfill this
476
+ request, `tf.errors.OutOfRangeError` will be raised. If the
477
+ session is `tf.Session.close`,
478
+ `tf.errors.CancelledError` will be raised.
479
+
480
+ Args:
481
+ n: A scalar `Tensor` containing the number of elements to dequeue.
482
+ name: A name for the operation (optional).
483
+
484
+ Returns:
485
+ The list of concatenated tensors that was dequeued.
486
+ """
487
+ if name is None:
488
+ name = "%s_DequeueMany" % self._name
489
+
490
+ ret = gen_data_flow_ops.queue_dequeue_many_v2(
491
+ self._queue_ref, n=n, component_types=self._dtypes, name=name)
492
+
493
+ # NOTE(mrry): Not using a shape function because we need access to
494
+ # the Queue object.
495
+ if not context.executing_eagerly():
496
+ op = ret[0].op
497
+ batch_dim = tensor_shape.Dimension(
498
+ tensor_util.constant_value(op.inputs[1]))
499
+ for output, shape in zip(op.values(), self._shapes):
500
+ output.set_shape(
501
+ tensor_shape.TensorShape([batch_dim]).concatenate(shape))
502
+
503
+ return self._dequeue_return_value(ret)
504
+
505
+ def dequeue_up_to(self, n, name=None):
506
+ """Dequeues and concatenates `n` elements from this queue.
507
+
508
+ **Note** This operation is not supported by all queues. If a queue does not
509
+ support DequeueUpTo, then a `tf.errors.UnimplementedError` is raised.
510
+
511
+ This operation concatenates queue-element component tensors along
512
+ the 0th dimension to make a single component tensor. If the queue
513
+ has not been closed, all of the components in the dequeued tuple
514
+ will have size `n` in the 0th dimension.
515
+
516
+ If the queue is closed and there are more than `0` but fewer than
517
+ `n` elements remaining, then instead of raising a
518
+ `tf.errors.OutOfRangeError` like `tf.QueueBase.dequeue_many`,
519
+ less than `n` elements are returned immediately. If the queue is
520
+ closed and there are `0` elements left in the queue, then a
521
+ `tf.errors.OutOfRangeError` is raised just like in `dequeue_many`.
522
+ Otherwise the behavior is identical to `dequeue_many`.
523
+
524
+ Args:
525
+ n: A scalar `Tensor` containing the number of elements to dequeue.
526
+ name: A name for the operation (optional).
527
+
528
+ Returns:
529
+ The tuple of concatenated tensors that was dequeued.
530
+ """
531
+ if name is None:
532
+ name = "%s_DequeueUpTo" % self._name
533
+
534
+ ret = gen_data_flow_ops.queue_dequeue_up_to_v2(
535
+ self._queue_ref, n=n, component_types=self._dtypes, name=name)
536
+
537
+ # NOTE(mrry): Not using a shape function because we need access to
538
+ # the Queue object.
539
+ if not context.executing_eagerly():
540
+ op = ret[0].op
541
+ for output, shape in zip(op.values(), self._shapes):
542
+ output.set_shape(tensor_shape.TensorShape([None]).concatenate(shape))
543
+
544
+ return self._dequeue_return_value(ret)
545
+
546
+ def close(self, cancel_pending_enqueues=False, name=None):
547
+ """Closes this queue.
548
+
549
+ This operation signals that no more elements will be enqueued in
550
+ the given queue. Subsequent `enqueue` and `enqueue_many`
551
+ operations will fail. Subsequent `dequeue` and `dequeue_many`
552
+ operations will continue to succeed if sufficient elements remain
553
+ in the queue. Subsequently dequeue and dequeue_many operations
554
+ that would otherwise block waiting for more elements (if close
555
+ hadn't been called) will now fail immediately.
556
+
557
+ If `cancel_pending_enqueues` is `True`, all pending requests will also
558
+ be canceled.
559
+
560
+ Args:
561
+ cancel_pending_enqueues: (Optional.) A boolean, defaulting to
562
+ `False` (described above).
563
+ name: A name for the operation (optional).
564
+
565
+ Returns:
566
+ The operation that closes the queue.
567
+ """
568
+ if name is None:
569
+ name = "%s_Close" % self._name
570
+ if self._queue_ref.dtype == _dtypes.resource:
571
+ return gen_data_flow_ops.queue_close_v2(
572
+ self._queue_ref,
573
+ cancel_pending_enqueues=cancel_pending_enqueues,
574
+ name=name)
575
+ else:
576
+ return gen_data_flow_ops.queue_close(
577
+ self._queue_ref,
578
+ cancel_pending_enqueues=cancel_pending_enqueues,
579
+ name=name)
580
+
581
+ def is_closed(self, name=None):
582
+ """Returns true if queue is closed.
583
+
584
+ This operation returns true if the queue is closed and false if the queue
585
+ is open.
586
+
587
+ Args:
588
+ name: A name for the operation (optional).
589
+
590
+ Returns:
591
+ True if the queue is closed and false if the queue is open.
592
+ """
593
+ if name is None:
594
+ name = "%s_Is_Closed" % self._name
595
+ if self._queue_ref.dtype == _dtypes.resource:
596
+ return gen_data_flow_ops.queue_is_closed_v2(self._queue_ref, name=name)
597
+ else:
598
+ return gen_data_flow_ops.queue_is_closed_(self._queue_ref, name=name)
599
+
600
+ def size(self, name=None):
601
+ """Compute the number of elements in this queue.
602
+
603
+ Args:
604
+ name: A name for the operation (optional).
605
+
606
+ Returns:
607
+ A scalar tensor containing the number of elements in this queue.
608
+ """
609
+ if name is None:
610
+ name = "%s_Size" % self._name
611
+ if self._queue_ref.dtype == _dtypes.resource:
612
+ return gen_data_flow_ops.queue_size_v2(self._queue_ref, name=name)
613
+ else:
614
+ return gen_data_flow_ops.queue_size(self._queue_ref, name=name)
615
+
616
+ def _shared_name(shared_name):
617
+ if context.executing_eagerly():
618
+ return str(ops.uid())
619
+ return shared_name
620
+
621
+
622
+ @tf_export(
623
+ "queue.RandomShuffleQueue",
624
+ v1=["queue.RandomShuffleQueue",
625
+ "io.RandomShuffleQueue", "RandomShuffleQueue"])
626
+ @deprecation.deprecated_endpoints(
627
+ ["io.RandomShuffleQueue", "RandomShuffleQueue"])
628
+ class RandomShuffleQueue(QueueBase):
629
+ """A queue implementation that dequeues elements in a random order.
630
+
631
+ See `tf.queue.QueueBase` for a description of the methods on
632
+ this class.
633
+ """
634
+
635
+ def __init__(self,
636
+ capacity,
637
+ min_after_dequeue,
638
+ dtypes,
639
+ shapes=None,
640
+ names=None,
641
+ seed=None,
642
+ shared_name=None,
643
+ name="random_shuffle_queue"):
644
+ """Create a queue that dequeues elements in a random order.
645
+
646
+ A `RandomShuffleQueue` has bounded capacity; supports multiple
647
+ concurrent producers and consumers; and provides exactly-once
648
+ delivery.
649
+
650
+ A `RandomShuffleQueue` holds a list of up to `capacity`
651
+ elements. Each element is a fixed-length tuple of tensors whose
652
+ dtypes are described by `dtypes`, and whose shapes are optionally
653
+ described by the `shapes` argument.
654
+
655
+ If the `shapes` argument is specified, each component of a queue
656
+ element must have the respective fixed shape. If it is
657
+ unspecified, different queue elements may have different shapes,
658
+ but the use of `dequeue_many` is disallowed.
659
+
660
+ The `min_after_dequeue` argument allows the caller to specify a
661
+ minimum number of elements that will remain in the queue after a
662
+ `dequeue` or `dequeue_many` operation completes, to ensure a
663
+ minimum level of mixing of elements. This invariant is maintained
664
+ by blocking those operations until sufficient elements have been
665
+ enqueued. The `min_after_dequeue` argument is ignored after the
666
+ queue has been closed.
667
+
668
+ Args:
669
+ capacity: An integer. The upper bound on the number of elements
670
+ that may be stored in this queue.
671
+ min_after_dequeue: An integer (described above).
672
+ dtypes: A list of `DType` objects. The length of `dtypes` must equal
673
+ the number of tensors in each queue element.
674
+ shapes: (Optional.) A list of fully-defined `TensorShape` objects
675
+ with the same length as `dtypes`, or `None`.
676
+ names: (Optional.) A list of string naming the components in the queue
677
+ with the same length as `dtypes`, or `None`. If specified the dequeue
678
+ methods return a dictionary with the names as keys.
679
+ seed: A Python integer. Used to create a random seed. See
680
+ `tf.compat.v1.set_random_seed`
681
+ for behavior.
682
+ shared_name: (Optional.) If non-empty, this queue will be shared under
683
+ the given name across multiple sessions.
684
+ name: Optional name for the queue operation.
685
+ """
686
+ dtypes = _as_type_list(dtypes)
687
+ shapes = _as_shape_list(shapes, dtypes)
688
+ names = _as_name_list(names, dtypes)
689
+ seed1, seed2 = random_seed.get_seed(seed)
690
+ if seed1 is None and seed2 is None:
691
+ seed1, seed2 = 0, 0
692
+ elif seed is None and shared_name is not None:
693
+ # This means that graph seed is provided but op seed is not provided.
694
+ # If shared_name is also provided, make seed2 depend only on the graph
695
+ # seed and shared_name. (seed2 from get_seed() is generally dependent on
696
+ # the id of the last op created.)
697
+ string = (str(seed1) + shared_name).encode("utf-8")
698
+ seed2 = int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
699
+ queue_ref = gen_data_flow_ops.random_shuffle_queue_v2(
700
+ component_types=dtypes,
701
+ shapes=shapes,
702
+ capacity=capacity,
703
+ min_after_dequeue=min_after_dequeue,
704
+ seed=seed1,
705
+ seed2=seed2,
706
+ shared_name=_shared_name(shared_name),
707
+ name=name)
708
+
709
+ super(RandomShuffleQueue, self).__init__(dtypes, shapes, names, queue_ref)
710
+
711
+
712
+ @tf_export("queue.FIFOQueue", v1=["queue.FIFOQueue", "FIFOQueue"])
713
+ @deprecation.deprecated_endpoints("FIFOQueue")
714
+ class FIFOQueue(QueueBase):
715
+ """A queue implementation that dequeues elements in first-in first-out order.
716
+
717
+ See `tf.queue.QueueBase` for a description of the methods on
718
+ this class.
719
+ """
720
+
721
+ def __init__(self,
722
+ capacity,
723
+ dtypes,
724
+ shapes=None,
725
+ names=None,
726
+ shared_name=None,
727
+ name="fifo_queue"):
728
+ """Creates a queue that dequeues elements in a first-in first-out order.
729
+
730
+ A `FIFOQueue` has bounded capacity; supports multiple concurrent
731
+ producers and consumers; and provides exactly-once delivery.
732
+
733
+ A `FIFOQueue` holds a list of up to `capacity` elements. Each
734
+ element is a fixed-length tuple of tensors whose dtypes are
735
+ described by `dtypes`, and whose shapes are optionally described
736
+ by the `shapes` argument.
737
+
738
+ If the `shapes` argument is specified, each component of a queue
739
+ element must have the respective fixed shape. If it is
740
+ unspecified, different queue elements may have different shapes,
741
+ but the use of `dequeue_many` is disallowed.
742
+
743
+ Args:
744
+ capacity: An integer. The upper bound on the number of elements
745
+ that may be stored in this queue.
746
+ dtypes: A list of `DType` objects. The length of `dtypes` must equal
747
+ the number of tensors in each queue element.
748
+ shapes: (Optional.) A list of fully-defined `TensorShape` objects
749
+ with the same length as `dtypes`, or `None`.
750
+ names: (Optional.) A list of string naming the components in the queue
751
+ with the same length as `dtypes`, or `None`. If specified the dequeue
752
+ methods return a dictionary with the names as keys.
753
+ shared_name: (Optional.) If non-empty, this queue will be shared under
754
+ the given name across multiple sessions.
755
+ name: Optional name for the queue operation.
756
+ """
757
+ dtypes = _as_type_list(dtypes)
758
+ shapes = _as_shape_list(shapes, dtypes)
759
+ names = _as_name_list(names, dtypes)
760
+ with ops.init_scope(), ops.device("CPU"):
761
+ queue_ref = gen_data_flow_ops.fifo_queue_v2(
762
+ component_types=dtypes,
763
+ shapes=shapes,
764
+ capacity=capacity,
765
+ shared_name=_shared_name(shared_name),
766
+ name=name)
767
+
768
+ super(FIFOQueue, self).__init__(dtypes, shapes, names, queue_ref)
769
+
770
+
771
+ # TODO(allenl): If GPU-compatible queues turn out to be useful, we should
772
+ # implement GPU kernels for EnqueueMany and DequeueMany so we can make the
773
+ # public FIFOQueue GPU-compatible and remove this internal version.
774
+ class GPUCompatibleFIFOQueue(QueueBase):
775
+ """A queue implementation that dequeues elements in first-in first-out order.
776
+
777
+ GPUCompatibleFIFOQueue is like FIFOQueue, but the queue resource may be placed
778
+ either on a CPU or on a GPU. It is not cross-device: enqueues and dequeues
779
+ will be colocated with the queue resource. GPUCompatibleFIFOQueue only
780
+ supports enqueue and dequeue at the moment, not enqueue_many or dequeue_many.
781
+
782
+ See `tf.queue.QueueBase` for a description of the methods on this class.
783
+ """
784
+
785
+ def __init__(self,
786
+ capacity,
787
+ dtypes,
788
+ shapes=None,
789
+ names=None,
790
+ shared_name=None,
791
+ name="fifo_queue"):
792
+ """Creates a queue that dequeues elements in a first-in first-out order.
793
+
794
+ A `FIFOQueue` has bounded capacity; supports multiple concurrent
795
+ producers and consumers; and provides exactly-once delivery.
796
+
797
+ A `FIFOQueue` holds a list of up to `capacity` elements. Each
798
+ element is a fixed-length tuple of tensors whose dtypes are
799
+ described by `dtypes`, and whose shapes are optionally described
800
+ by the `shapes` argument.
801
+
802
+ If the `shapes` argument is specified, each component of a queue
803
+ element must have the respective fixed shape. If it is
804
+ unspecified, different queue elements may have different shapes,
805
+ but the use of `dequeue_many` is disallowed.
806
+
807
+ Args:
808
+ capacity: An integer. The upper bound on the number of elements
809
+ that may be stored in this queue.
810
+ dtypes: A list of `DType` objects. The length of `dtypes` must equal
811
+ the number of tensors in each queue element.
812
+ shapes: (Optional.) A list of fully-defined `TensorShape` objects
813
+ with the same length as `dtypes`, or `None`.
814
+ names: (Optional.) A list of string naming the components in the queue
815
+ with the same length as `dtypes`, or `None`. If specified the dequeue
816
+ methods return a dictionary with the names as keys.
817
+ shared_name: (Optional.) If non-empty, this queue will be shared under
818
+ the given name across multiple sessions.
819
+ name: Optional name for the queue operation.
820
+ """
821
+ dtypes = _as_type_list(dtypes)
822
+ shapes = _as_shape_list(shapes, dtypes)
823
+ names = _as_name_list(names, dtypes)
824
+ with ops.init_scope():
825
+ queue_ref = gen_data_flow_ops.fifo_queue_v2(
826
+ component_types=dtypes,
827
+ shapes=shapes,
828
+ capacity=capacity,
829
+ shared_name=_shared_name(shared_name),
830
+ name=name)
831
+
832
+ super(GPUCompatibleFIFOQueue, self).__init__(
833
+ dtypes, shapes, names, queue_ref)
834
+
835
+ def enqueue_many(self, vals, name=None):
836
+ """enqueue_many is not supported on GPUCompatibleFIFOQueue."""
837
+ raise NotImplementedError(
838
+ "GPUCompatibleFIFOQueue does not support enqueue_many or dequeue_many, "
839
+ "only enqueue and dequeue.")
840
+
841
+ def dequeue_many(self, n, name=None):
842
+ """dequeue_many is not supported on GPUCompatibleFIFOQueue."""
843
+ raise NotImplementedError(
844
+ "GPUCompatibleFIFOQueue does not support enqueue_many or dequeue_many, "
845
+ "only enqueue and dequeue.")
846
+
847
+
848
+ @tf_export(
849
+ "queue.PaddingFIFOQueue",
850
+ v1=["queue.PaddingFIFOQueue", "io.PaddingFIFOQueue", "PaddingFIFOQueue"])
851
+ @deprecation.deprecated_endpoints(["io.PaddingFIFOQueue", "PaddingFIFOQueue"])
852
+ class PaddingFIFOQueue(QueueBase):
853
+ """A FIFOQueue that supports batching variable-sized tensors by padding.
854
+
855
+ A `PaddingFIFOQueue` may contain components with dynamic shape, while also
856
+ supporting `dequeue_many`. See the constructor for more details.
857
+
858
+ See `tf.queue.QueueBase` for a description of the methods on
859
+ this class.
860
+ """
861
+
862
+ def __init__(self,
863
+ capacity,
864
+ dtypes,
865
+ shapes,
866
+ names=None,
867
+ shared_name=None,
868
+ name="padding_fifo_queue"):
869
+ """Creates a queue that dequeues elements in a first-in first-out order.
870
+
871
+ A `PaddingFIFOQueue` has bounded capacity; supports multiple concurrent
872
+ producers and consumers; and provides exactly-once delivery.
873
+
874
+ A `PaddingFIFOQueue` holds a list of up to `capacity` elements. Each
875
+ element is a fixed-length tuple of tensors whose dtypes are
876
+ described by `dtypes`, and whose shapes are described by the `shapes`
877
+ argument.
878
+
879
+ The `shapes` argument must be specified; each component of a queue
880
+ element must have the respective shape. Shapes of fixed
881
+ rank but variable size are allowed by setting any shape dimension to None.
882
+ In this case, the inputs' shape may vary along the given dimension, and
883
+ `dequeue_many` will pad the given dimension with zeros up to the maximum
884
+ shape of all elements in the given batch.
885
+
886
+ Args:
887
+ capacity: An integer. The upper bound on the number of elements
888
+ that may be stored in this queue.
889
+ dtypes: A list of `DType` objects. The length of `dtypes` must equal
890
+ the number of tensors in each queue element.
891
+ shapes: A list of `TensorShape` objects, with the same length as
892
+ `dtypes`. Any dimension in the `TensorShape` containing value
893
+ `None` is dynamic and allows values to be enqueued with
894
+ variable size in that dimension.
895
+ names: (Optional.) A list of string naming the components in the queue
896
+ with the same length as `dtypes`, or `None`. If specified the dequeue
897
+ methods return a dictionary with the names as keys.
898
+ shared_name: (Optional.) If non-empty, this queue will be shared under
899
+ the given name across multiple sessions.
900
+ name: Optional name for the queue operation.
901
+
902
+ Raises:
903
+ ValueError: If shapes is not a list of shapes, or the lengths of dtypes
904
+ and shapes do not match, or if names is specified and the lengths of
905
+ dtypes and names do not match.
906
+ """
907
+ dtypes = _as_type_list(dtypes)
908
+ shapes = _as_shape_list(shapes, dtypes, unknown_dim_allowed=True)
909
+ names = _as_name_list(names, dtypes)
910
+ if len(dtypes) != len(shapes):
911
+ raise ValueError("Shapes must be provided for all components, "
912
+ f"but received {len(dtypes)} dtypes and "
913
+ f"{len(shapes)} shapes.")
914
+ queue_ref = gen_data_flow_ops.padding_fifo_queue_v2(
915
+ component_types=dtypes,
916
+ shapes=shapes,
917
+ capacity=capacity,
918
+ shared_name=_shared_name(shared_name),
919
+ name=name)
920
+
921
+ super(PaddingFIFOQueue, self).__init__(dtypes, shapes, names, queue_ref)
922
+
923
+
924
+ @tf_export("queue.PriorityQueue",
925
+ v1=["queue.PriorityQueue", "io.PriorityQueue", "PriorityQueue"])
926
+ @deprecation.deprecated_endpoints(["io.PriorityQueue", "PriorityQueue"])
927
+ class PriorityQueue(QueueBase):
928
+ """A queue implementation that dequeues elements in prioritized order.
929
+
930
+ See `tf.queue.QueueBase` for a description of the methods on
931
+ this class.
932
+ """
933
+
934
+ def __init__(self,
935
+ capacity,
936
+ types,
937
+ shapes=None,
938
+ names=None,
939
+ shared_name=None,
940
+ name="priority_queue"):
941
+ """Creates a queue that dequeues elements in a first-in first-out order.
942
+
943
+ A `PriorityQueue` has bounded capacity; supports multiple concurrent
944
+ producers and consumers; and provides exactly-once delivery.
945
+
946
+ A `PriorityQueue` holds a list of up to `capacity` elements. Each
947
+ element is a fixed-length tuple of tensors whose dtypes are
948
+ described by `types`, and whose shapes are optionally described
949
+ by the `shapes` argument.
950
+
951
+ If the `shapes` argument is specified, each component of a queue
952
+ element must have the respective fixed shape. If it is
953
+ unspecified, different queue elements may have different shapes,
954
+ but the use of `dequeue_many` is disallowed.
955
+
956
+ Enqueues and Dequeues to the `PriorityQueue` must include an additional
957
+ tuple entry at the beginning: the `priority`. The priority must be
958
+ an int64 scalar (for `enqueue`) or an int64 vector (for `enqueue_many`).
959
+
960
+ Args:
961
+ capacity: An integer. The upper bound on the number of elements
962
+ that may be stored in this queue.
963
+ types: A list of `DType` objects. The length of `types` must equal
964
+ the number of tensors in each queue element, except the first priority
965
+ element. The first tensor in each element is the priority,
966
+ which must be type int64.
967
+ shapes: (Optional.) A list of fully-defined `TensorShape` objects,
968
+ with the same length as `types`, or `None`.
969
+ names: (Optional.) A list of strings naming the components in the queue
970
+ with the same length as `dtypes`, or `None`. If specified, the dequeue
971
+ methods return a dictionary with the names as keys.
972
+ shared_name: (Optional.) If non-empty, this queue will be shared under
973
+ the given name across multiple sessions.
974
+ name: Optional name for the queue operation.
975
+ """
976
+ types = _as_type_list(types)
977
+ shapes = _as_shape_list(shapes, types)
978
+
979
+ queue_ref = gen_data_flow_ops.priority_queue_v2(
980
+ component_types=types,
981
+ shapes=shapes,
982
+ capacity=capacity,
983
+ shared_name=_shared_name(shared_name),
984
+ name=name)
985
+
986
+ priority_dtypes = [_dtypes.int64] + types
987
+ priority_shapes = [()] + shapes if shapes else shapes
988
+
989
+ super(PriorityQueue, self).__init__(priority_dtypes, priority_shapes, names,
990
+ queue_ref)
991
+
992
+
993
+ # TODO(josh11b): class BatchQueue(QueueBase):
994
+
995
+
996
+ class Barrier:
997
+ """Represents a key-value map that persists across graph executions."""
998
+
999
+ def __init__(self, types, shapes=None, shared_name=None, name="barrier"):
1000
+ """Creates a barrier that persists across different graph executions.
1001
+
1002
+ A barrier represents a key-value map, where each key is a string, and
1003
+ each value is a tuple of tensors.
1004
+
1005
+ At runtime, the barrier contains 'complete' and 'incomplete'
1006
+ elements. A complete element has defined tensors for all
1007
+ components of its value tuple, and may be accessed using
1008
+ take_many. An incomplete element has some undefined components in
1009
+ its value tuple, and may be updated using insert_many.
1010
+
1011
+ The barrier call `take_many` outputs values in a particular order.
1012
+ First, it only outputs completed values. Second, the order in which
1013
+ completed values are returned matches the order in which their very
1014
+ first component was inserted into the barrier. So, for example, for this
1015
+ sequence of insertions and removals:
1016
+
1017
+ barrier = Barrier((tf.string, tf.int32), shapes=((), ()))
1018
+ barrier.insert_many(0, keys=["k1", "k2"], values=["a", "b"]).run()
1019
+ barrier.insert_many(1, keys=["k1"], values=[1]).run()
1020
+ barrier.insert_many(0, keys=["k3"], values=["c"]).run()
1021
+ barrier.insert_many(1, keys=["k3"], values=[3]).run()
1022
+ barrier.insert_many(1, keys=["k2"], values=[2]).run()
1023
+
1024
+ (indices, keys, values) = barrier.take_many(2)
1025
+ (indices_val, keys_val, values0_val, values1_val) =
1026
+ session.run([indices, keys, values[0], values[1]])
1027
+
1028
+ The output will be (up to permutation of "k1" and "k2"):
1029
+
1030
+ indices_val == (-2**63, -2**63)
1031
+ keys_val == ("k1", "k2")
1032
+ values0_val == ("a", "b")
1033
+ values1_val == (1, 2)
1034
+
1035
+ Note the key "k2" was inserted into the barrier before "k3". Even though
1036
+ "k3" was completed first, both are complete by the time
1037
+ take_many is called. As a result, "k2" is prioritized and "k1" and "k2"
1038
+ are returned first. "k3" remains in the barrier until the next execution
1039
+ of `take_many`. Since "k1" and "k2" had their first insertions into
1040
+ the barrier together, their indices are the same (-2**63). The index
1041
+ of "k3" will be -2**63 + 1, because it was the next new inserted key.
1042
+
1043
+ Args:
1044
+ types: A single dtype or a tuple of dtypes, corresponding to the
1045
+ dtypes of the tensor elements that comprise a value in this barrier.
1046
+ shapes: Optional. Constraints on the shapes of tensors in the values:
1047
+ a single tensor shape tuple; a tuple of tensor shape tuples
1048
+ for each barrier-element tuple component; or None if the shape should
1049
+ not be constrained.
1050
+ shared_name: Optional. If non-empty, this barrier will be shared under
1051
+ the given name across multiple sessions.
1052
+ name: Optional name for the barrier op.
1053
+
1054
+ Raises:
1055
+ ValueError: If one of the `shapes` indicate no elements.
1056
+ """
1057
+ self._types = _as_type_list(types)
1058
+
1059
+ if shapes is not None:
1060
+ shapes = _as_shape_list(shapes, self._types)
1061
+ self._shapes = [tensor_shape.TensorShape(s) for s in shapes]
1062
+ for i, shape in enumerate(self._shapes):
1063
+ if shape.num_elements() == 0:
1064
+ raise ValueError("Empty tensors are not supported, but received "
1065
+ f"shape '{shape}' at index {i}")
1066
+ else:
1067
+ self._shapes = [tensor_shape.unknown_shape() for _ in self._types]
1068
+
1069
+ self._barrier_ref = gen_data_flow_ops.barrier(
1070
+ component_types=self._types,
1071
+ shapes=self._shapes,
1072
+ shared_name=shared_name,
1073
+ name=name)
1074
+ if context.executing_eagerly():
1075
+ self._name = context.context().scope_name
1076
+ else:
1077
+ self._name = self._barrier_ref.op.name.split("/")[-1]
1078
+
1079
+ @property
1080
+ def barrier_ref(self):
1081
+ """Get the underlying barrier reference."""
1082
+ return self._barrier_ref
1083
+
1084
+ @property
1085
+ def name(self):
1086
+ """The name of the underlying barrier."""
1087
+ if context.executing_eagerly():
1088
+ return self._name
1089
+ return self._barrier_ref.op.name
1090
+
1091
+ def insert_many(self, component_index, keys, values, name=None):
1092
+ """For each key, assigns the respective value to the specified component.
1093
+
1094
+ This operation updates each element at component_index.
1095
+
1096
+ Args:
1097
+ component_index: The component of the value that is being assigned.
1098
+ keys: A vector of keys, with length n.
1099
+ values: An any-dimensional tensor of values, which are associated with the
1100
+ respective keys. The first dimension must have length n.
1101
+ name: Optional name for the op.
1102
+
1103
+ Returns:
1104
+ The operation that performs the insertion.
1105
+ Raises:
1106
+ InvalidArgumentsError: If inserting keys and values without elements.
1107
+ """
1108
+ if name is None:
1109
+ name = "%s_BarrierInsertMany" % self._name
1110
+ return gen_data_flow_ops.barrier_insert_many(
1111
+ self._barrier_ref, keys, values, component_index, name=name)
1112
+
1113
+ def take_many(self,
1114
+ num_elements,
1115
+ allow_small_batch=False,
1116
+ timeout=None,
1117
+ name=None):
1118
+ """Takes the given number of completed elements from this barrier.
1119
+
1120
+ This operation concatenates completed-element component tensors along
1121
+ the 0th dimension to make a single component tensor.
1122
+
1123
+ If barrier has no completed elements, this operation will block
1124
+ until there are 'num_elements' elements to take.
1125
+
1126
+ TODO(b/25743580): the semantics of `allow_small_batch` are experimental
1127
+ and may be extended to other cases in the future.
1128
+
1129
+ TODO(ebrevdo): If a take_many(allow_small_batch=True) is blocking
1130
+ already when the barrier is closed, it will block for ever. Fix this
1131
+ by using asynchronous operations.
1132
+
1133
+ Args:
1134
+ num_elements: The number of elements to take.
1135
+ allow_small_batch: If the barrier is closed, don't block if there are less
1136
+ completed elements than requested, but instead return all available
1137
+ completed elements.
1138
+ timeout: This specifies the number of milliseconds to block
1139
+ before returning with DEADLINE_EXCEEDED. (This option is not
1140
+ supported yet.)
1141
+ name: A name for the operation (optional).
1142
+
1143
+ Returns:
1144
+ A tuple of (index, key, value_list).
1145
+ "index" is a int64 tensor of length num_elements containing the
1146
+ index of the insert_many call for which the very first component of
1147
+ the given element was inserted into the Barrier, starting with
1148
+ the value -2**63. Note, this value is different from the
1149
+ index of the insert_many call for which the element was completed.
1150
+ "key" is a string tensor of length num_elements containing the keys.
1151
+ "value_list" is a tuple of tensors, each one with size num_elements
1152
+ in the 0th dimension for each component in the barrier's values.
1153
+
1154
+ """
1155
+ if name is None:
1156
+ name = "%s_BarrierTakeMany" % self._name
1157
+ ret = gen_data_flow_ops.barrier_take_many(
1158
+ self._barrier_ref,
1159
+ num_elements,
1160
+ self._types,
1161
+ allow_small_batch,
1162
+ timeout,
1163
+ name=name)
1164
+
1165
+ # NOTE(mrry): Not using a shape function because we need access to
1166
+ # the Barrier object.
1167
+ if not context.executing_eagerly():
1168
+ op = ret[0].op
1169
+ if allow_small_batch:
1170
+ batch_dim = None
1171
+ else:
1172
+ batch_dim = tensor_shape.Dimension(
1173
+ tensor_util.constant_value(op.inputs[1]))
1174
+ op.outputs[0].set_shape(tensor_shape.TensorShape([batch_dim])) # indices
1175
+ op.outputs[1].set_shape(tensor_shape.TensorShape([batch_dim])) # keys
1176
+ for output, shape in zip(op.outputs[2:], self._shapes): # value_list
1177
+ output.set_shape(
1178
+ tensor_shape.TensorShape([batch_dim]).concatenate(shape))
1179
+
1180
+ return ret
1181
+
1182
+ def close(self, cancel_pending_enqueues=False, name=None):
1183
+ """Closes this barrier.
1184
+
1185
+ This operation signals that no more new key values will be inserted in the
1186
+ given barrier. Subsequent InsertMany operations with new keys will fail.
1187
+ InsertMany operations that just complement already existing keys with other
1188
+ components, will continue to succeed. Subsequent TakeMany operations will
1189
+ continue to succeed if sufficient elements remain in the barrier. Subsequent
1190
+ TakeMany operations that would block will fail immediately.
1191
+
1192
+ If `cancel_pending_enqueues` is `True`, all pending requests to the
1193
+ underlying queue will also be canceled, and completing of already
1194
+ started values is also not acceptable anymore.
1195
+
1196
+ Args:
1197
+ cancel_pending_enqueues: (Optional.) A boolean, defaulting to
1198
+ `False` (described above).
1199
+ name: Optional name for the op.
1200
+
1201
+ Returns:
1202
+ The operation that closes the barrier.
1203
+ """
1204
+ if name is None:
1205
+ name = "%s_BarrierClose" % self._name
1206
+ return gen_data_flow_ops.barrier_close(
1207
+ self._barrier_ref,
1208
+ cancel_pending_enqueues=cancel_pending_enqueues,
1209
+ name=name)
1210
+
1211
+ def ready_size(self, name=None):
1212
+ """Compute the number of complete elements in the given barrier.
1213
+
1214
+ Args:
1215
+ name: A name for the operation (optional).
1216
+
1217
+ Returns:
1218
+ A single-element tensor containing the number of complete elements in the
1219
+ given barrier.
1220
+ """
1221
+ if name is None:
1222
+ name = "%s_BarrierReadySize" % self._name
1223
+ return gen_data_flow_ops.barrier_ready_size(self._barrier_ref, name=name)
1224
+
1225
+ def incomplete_size(self, name=None):
1226
+ """Compute the number of incomplete elements in the given barrier.
1227
+
1228
+ Args:
1229
+ name: A name for the operation (optional).
1230
+
1231
+ Returns:
1232
+ A single-element tensor containing the number of incomplete elements in
1233
+ the given barrier.
1234
+ """
1235
+ if name is None:
1236
+ name = "%s_BarrierIncompleteSize" % self._name
1237
+ return gen_data_flow_ops.barrier_incomplete_size(
1238
+ self._barrier_ref, name=name)
1239
+
1240
+
1241
+ @tf_export(v1=["ConditionalAccumulatorBase"])
1242
+ class ConditionalAccumulatorBase:
1243
+ """A conditional accumulator for aggregating gradients.
1244
+
1245
+ Up-to-date gradients (i.e., time step at which gradient was computed is
1246
+ equal to the accumulator's time step) are added to the accumulator.
1247
+
1248
+ Extraction of the average gradient is blocked until the required number of
1249
+ gradients has been accumulated.
1250
+ """
1251
+
1252
+ def __init__(self, dtype, shape, accumulator_ref):
1253
+ """Creates a new ConditionalAccumulator.
1254
+
1255
+ Args:
1256
+ dtype: Datatype of the accumulated gradients.
1257
+ shape: Shape of the accumulated gradients.
1258
+ accumulator_ref: A handle to the conditional accumulator, created by sub-
1259
+ classes
1260
+ """
1261
+ self._dtype = dtype
1262
+ if shape is not None:
1263
+ self._shape = tensor_shape.TensorShape(shape)
1264
+ else:
1265
+ self._shape = tensor_shape.unknown_shape()
1266
+ self._accumulator_ref = accumulator_ref
1267
+ if context.executing_eagerly():
1268
+ self._name = context.context().scope_name
1269
+ else:
1270
+ self._name = self._accumulator_ref.op.name.split("/")[-1]
1271
+
1272
+ @property
1273
+ def accumulator_ref(self):
1274
+ """The underlying accumulator reference."""
1275
+ return self._accumulator_ref
1276
+
1277
+ @property
1278
+ def name(self):
1279
+ """The name of the underlying accumulator."""
1280
+ return self._name
1281
+
1282
+ @property
1283
+ def dtype(self):
1284
+ """The datatype of the gradients accumulated by this accumulator."""
1285
+ return self._dtype
1286
+
1287
+ def num_accumulated(self, name=None):
1288
+ """Number of gradients that have currently been aggregated in accumulator.
1289
+
1290
+ Args:
1291
+ name: Optional name for the operation.
1292
+
1293
+ Returns:
1294
+ Number of accumulated gradients currently in accumulator.
1295
+ """
1296
+ if name is None:
1297
+ name = "%s_NumAccumulated" % self._name
1298
+
1299
+ return gen_data_flow_ops.resource_accumulator_num_accumulated(
1300
+ self._accumulator_ref, name=name)
1301
+
1302
+ def set_global_step(self, new_global_step, name=None):
1303
+ """Sets the global time step of the accumulator.
1304
+
1305
+ The operation logs a warning if we attempt to set to a time step that is
1306
+ lower than the accumulator's own time step.
1307
+
1308
+ Args:
1309
+ new_global_step: Value of new time step. Can be a variable or a constant
1310
+ name: Optional name for the operation.
1311
+
1312
+ Returns:
1313
+ Operation that sets the accumulator's time step.
1314
+ """
1315
+ return gen_data_flow_ops.resource_accumulator_set_global_step(
1316
+ self._accumulator_ref,
1317
+ math_ops.cast(ops.convert_to_tensor(new_global_step), _dtypes.int64),
1318
+ name=name)
1319
+
1320
+
1321
+ @tf_export(v1=["ConditionalAccumulator"])
1322
+ class ConditionalAccumulator(ConditionalAccumulatorBase):
1323
+ """A conditional accumulator for aggregating gradients.
1324
+
1325
+ Up-to-date gradients (i.e., time step at which gradient was computed is
1326
+ equal to the accumulator's time step) are added to the accumulator.
1327
+
1328
+ Extraction of the average gradient is blocked until the required number of
1329
+ gradients has been accumulated.
1330
+ """
1331
+
1332
+ def __init__(self,
1333
+ dtype,
1334
+ shape=None,
1335
+ shared_name=None,
1336
+ name="conditional_accumulator",
1337
+ reduction_type="MEAN"):
1338
+ """Creates a new ConditionalAccumulator.
1339
+
1340
+ Args:
1341
+ dtype: Datatype of the accumulated gradients.
1342
+ shape: Shape of the accumulated gradients.
1343
+ shared_name: Optional. If non-empty, this accumulator will be shared under
1344
+ the given name across multiple sessions.
1345
+ name: Optional name for the accumulator.
1346
+ reduction_type: Reduction type to use when taking the gradient.
1347
+ """
1348
+ accumulator_ref = gen_data_flow_ops.resource_conditional_accumulator(
1349
+ dtype=dtype,
1350
+ shape=shape,
1351
+ shared_name=shared_name,
1352
+ name=name,
1353
+ reduction_type=reduction_type)
1354
+ if context.executing_eagerly():
1355
+ self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
1356
+ handle=accumulator_ref, handle_device=context.context().device_name)
1357
+
1358
+ super(ConditionalAccumulator, self).__init__(dtype, shape, accumulator_ref)
1359
+
1360
+ def apply_grad(self, grad, local_step=0, name=None):
1361
+ """Attempts to apply a gradient to the accumulator.
1362
+
1363
+ The attempt is silently dropped if the gradient is stale, i.e., local_step
1364
+ is less than the accumulator's global time step.
1365
+
1366
+ Args:
1367
+ grad: The gradient tensor to be applied.
1368
+ local_step: Time step at which the gradient was computed.
1369
+ name: Optional name for the operation.
1370
+
1371
+ Returns:
1372
+ The operation that (conditionally) applies a gradient to the accumulator.
1373
+
1374
+ Raises:
1375
+ ValueError: If grad is of the wrong shape
1376
+ """
1377
+ grad = ops.convert_to_tensor(grad, self._dtype)
1378
+ grad.get_shape().assert_is_compatible_with(self._shape)
1379
+ local_step = math_ops.cast(ops.convert_to_tensor(local_step), _dtypes.int64)
1380
+
1381
+ return gen_data_flow_ops.resource_accumulator_apply_gradient(
1382
+ self._accumulator_ref, local_step=local_step, gradient=grad, name=name)
1383
+
1384
+ def take_grad(self, num_required, name=None):
1385
+ """Attempts to extract the average gradient from the accumulator.
1386
+
1387
+ The operation blocks until sufficient number of gradients have been
1388
+ successfully applied to the accumulator.
1389
+
1390
+ Once successful, the following actions are also triggered:
1391
+
1392
+ - Counter of accumulated gradients is reset to 0.
1393
+ - Aggregated gradient is reset to 0 tensor.
1394
+ - Accumulator's internal time step is incremented by 1.
1395
+
1396
+ Args:
1397
+ num_required: Number of gradients that needs to have been aggregated
1398
+ name: Optional name for the operation
1399
+
1400
+ Returns:
1401
+ A tensor holding the value of the average gradient.
1402
+
1403
+ Raises:
1404
+ InvalidArgumentError: If num_required < 1
1405
+ """
1406
+ out = gen_data_flow_ops.resource_accumulator_take_gradient(
1407
+ self._accumulator_ref, num_required, dtype=self._dtype, name=name)
1408
+ out.set_shape(self._shape)
1409
+ return out
1410
+
1411
+
1412
+ @tf_export(
1413
+ v1=["sparse.SparseConditionalAccumulator", "SparseConditionalAccumulator"])
1414
+ class SparseConditionalAccumulator(ConditionalAccumulatorBase):
1415
+ """A conditional accumulator for aggregating sparse gradients.
1416
+
1417
+ Sparse gradients are represented by `IndexedSlices`.
1418
+
1419
+ Up-to-date gradients (i.e., time step at which gradient was computed is
1420
+ equal to the accumulator's time step) are added to the accumulator.
1421
+
1422
+ Extraction of the average gradient is blocked until the required number of
1423
+ gradients has been accumulated.
1424
+
1425
+ Args:
1426
+ dtype: Datatype of the accumulated gradients.
1427
+ shape: Shape of the accumulated gradients.
1428
+ shared_name: Optional. If non-empty, this accumulator will be shared under
1429
+ the given name across multiple sessions.
1430
+ name: Optional name for the accumulator.
1431
+ reduction_type: Reduction type to use when taking the gradient.
1432
+ """
1433
+
1434
+ def __init__(self,
1435
+ dtype,
1436
+ shape=None,
1437
+ shared_name=None,
1438
+ name="sparse_conditional_accumulator",
1439
+ reduction_type="MEAN"):
1440
+ accumulator_ref = gen_data_flow_ops.sparse_conditional_accumulator(
1441
+ dtype=dtype,
1442
+ shape=shape,
1443
+ shared_name=shared_name,
1444
+ name=name,
1445
+ reduction_type=reduction_type)
1446
+ super(SparseConditionalAccumulator, self).__init__(dtype, shape,
1447
+ accumulator_ref)
1448
+
1449
+ def apply_indexed_slices_grad(self, grad, local_step=0, name=None):
1450
+ """Attempts to apply a gradient to the accumulator.
1451
+
1452
+ The attempt is silently dropped if the gradient is stale, i.e., `local_step`
1453
+ is less than the accumulator's global time step.
1454
+
1455
+ Args:
1456
+ grad: The gradient `IndexedSlices` to be applied.
1457
+ local_step: Time step at which the gradient was computed.
1458
+ name: Optional name for the operation.
1459
+
1460
+ Returns:
1461
+ The operation that (conditionally) applies a gradient to the accumulator.
1462
+
1463
+ Raises:
1464
+ InvalidArgumentError: If grad is of the wrong shape
1465
+ """
1466
+ return self.apply_grad(
1467
+ grad_indices=grad.indices,
1468
+ grad_values=grad.values,
1469
+ grad_shape=grad.dense_shape,
1470
+ local_step=local_step,
1471
+ name=name)
1472
+
1473
+ def apply_grad(self,
1474
+ grad_indices,
1475
+ grad_values,
1476
+ grad_shape=None,
1477
+ local_step=0,
1478
+ name=None):
1479
+ """Attempts to apply a sparse gradient to the accumulator.
1480
+
1481
+ The attempt is silently dropped if the gradient is stale, i.e., `local_step`
1482
+ is less than the accumulator's global time step.
1483
+
1484
+ A sparse gradient is represented by its indices, values and possibly empty
1485
+ or None shape. Indices must be a vector representing the locations of
1486
+ non-zero entries in the tensor. Values are the non-zero slices of the
1487
+ gradient, and must have the same first dimension as indices, i.e., the nnz
1488
+ represented by indices and values must be consistent. Shape, if not empty or
1489
+ None, must be consistent with the accumulator's shape (if also provided).
1490
+
1491
+ Example:
1492
+ A tensor [[0, 0], [0, 1], [2, 3]] can be represented
1493
+ indices: [1,2]
1494
+ values: [[0,1],[2,3]]
1495
+ shape: [3, 2]
1496
+
1497
+ Args:
1498
+ grad_indices: Indices of the sparse gradient to be applied.
1499
+ grad_values: Values of the sparse gradient to be applied.
1500
+ grad_shape: Shape of the sparse gradient to be applied.
1501
+ local_step: Time step at which the gradient was computed.
1502
+ name: Optional name for the operation.
1503
+
1504
+ Returns:
1505
+ The operation that (conditionally) applies a gradient to the accumulator.
1506
+
1507
+ Raises:
1508
+ InvalidArgumentError: If grad is of the wrong shape
1509
+ """
1510
+ local_step = math_ops.cast(ops.convert_to_tensor(local_step), _dtypes.int64)
1511
+ return gen_data_flow_ops.sparse_accumulator_apply_gradient(
1512
+ self._accumulator_ref,
1513
+ local_step=local_step,
1514
+ gradient_indices=math_ops.cast(grad_indices, _dtypes.int64),
1515
+ gradient_values=grad_values,
1516
+ gradient_shape=math_ops.cast(
1517
+ [] if grad_shape is None else grad_shape, _dtypes.int64),
1518
+ has_known_shape=(grad_shape is not None),
1519
+ name=name)
1520
+
1521
+ def take_grad(self, num_required, name=None):
1522
+ """Attempts to extract the average gradient from the accumulator.
1523
+
1524
+ The operation blocks until sufficient number of gradients have been
1525
+ successfully applied to the accumulator.
1526
+
1527
+ Once successful, the following actions are also triggered:
1528
+ - Counter of accumulated gradients is reset to 0.
1529
+ - Aggregated gradient is reset to 0 tensor.
1530
+ - Accumulator's internal time step is incremented by 1.
1531
+
1532
+ Args:
1533
+ num_required: Number of gradients that needs to have been aggregated
1534
+ name: Optional name for the operation
1535
+
1536
+ Returns:
1537
+ A tuple of indices, values, and shape representing the average gradient.
1538
+
1539
+ Raises:
1540
+ InvalidArgumentError: If `num_required` < 1
1541
+ """
1542
+ return gen_data_flow_ops.sparse_accumulator_take_gradient(
1543
+ self._accumulator_ref, num_required, dtype=self._dtype, name=name)
1544
+
1545
+ def take_indexed_slices_grad(self, num_required, name=None):
1546
+ """Attempts to extract the average gradient from the accumulator.
1547
+
1548
+ The operation blocks until sufficient number of gradients have been
1549
+ successfully applied to the accumulator.
1550
+
1551
+ Once successful, the following actions are also triggered:
1552
+ - Counter of accumulated gradients is reset to 0.
1553
+ - Aggregated gradient is reset to 0 tensor.
1554
+ - Accumulator's internal time step is incremented by 1.
1555
+
1556
+ Args:
1557
+ num_required: Number of gradients that needs to have been aggregated
1558
+ name: Optional name for the operation
1559
+
1560
+ Returns:
1561
+ An `IndexedSlices` holding the value of the average gradient.
1562
+
1563
+ Raises:
1564
+ InvalidArgumentError: If `num_required` < 1
1565
+ """
1566
+ return_val = gen_data_flow_ops.sparse_accumulator_take_gradient(
1567
+ self._accumulator_ref, num_required, dtype=self._dtype, name=name)
1568
+ return indexed_slices.IndexedSlices(
1569
+ indices=return_val.indices,
1570
+ values=return_val.values,
1571
+ dense_shape=return_val.shape)
1572
+
1573
+ # SparseConditionalAccumulator is not switched to resource. Use old kernels.
1574
+ def num_accumulated(self, name=None):
1575
+ """Number of gradients that have currently been aggregated in accumulator.
1576
+
1577
+ Args:
1578
+ name: Optional name for the operation.
1579
+
1580
+ Returns:
1581
+ Number of accumulated gradients currently in accumulator.
1582
+ """
1583
+ if name is None:
1584
+ name = "%s_NumAccumulated" % self._name
1585
+
1586
+ return gen_data_flow_ops.accumulator_num_accumulated(
1587
+ self._accumulator_ref, name=name)
1588
+
1589
+ def set_global_step(self, new_global_step, name=None):
1590
+ """Sets the global time step of the accumulator.
1591
+
1592
+ The operation logs a warning if we attempt to set to a time step that is
1593
+ lower than the accumulator's own time step.
1594
+
1595
+ Args:
1596
+ new_global_step: Value of new time step. Can be a variable or a constant
1597
+ name: Optional name for the operation.
1598
+
1599
+ Returns:
1600
+ Operation that sets the accumulator's time step.
1601
+ """
1602
+ return gen_data_flow_ops.accumulator_set_global_step(
1603
+ self._accumulator_ref,
1604
+ math_ops.cast(ops.convert_to_tensor(new_global_step), _dtypes.int64),
1605
+ name=name)
1606
+
1607
+
1608
+ class BaseStagingArea:
1609
+ """Base class for Staging Areas."""
1610
+ _identifier = 0
1611
+ _lock = threading.Lock()
1612
+
1613
+ def __init__(self,
1614
+ dtypes,
1615
+ shapes=None,
1616
+ names=None,
1617
+ shared_name=None,
1618
+ capacity=0,
1619
+ memory_limit=0):
1620
+ if shared_name is None:
1621
+ self._name = (
1622
+ ops.get_default_graph().unique_name(self.__class__.__name__))
1623
+ elif isinstance(shared_name, str):
1624
+ self._name = shared_name
1625
+ else:
1626
+ raise ValueError(f"shared_name must be a string, got {shared_name}")
1627
+
1628
+ self._dtypes = dtypes
1629
+
1630
+ if shapes is not None:
1631
+ if len(shapes) != len(dtypes):
1632
+ raise ValueError("StagingArea shapes must be the same length as dtypes")
1633
+ self._shapes = [tensor_shape.TensorShape(s) for s in shapes]
1634
+ else:
1635
+ self._shapes = [tensor_shape.unknown_shape() for _ in self._dtypes]
1636
+
1637
+ if names is not None:
1638
+ if len(names) != len(dtypes):
1639
+ raise ValueError("StagingArea names must be the same length as dtypes")
1640
+ self._names = names
1641
+ else:
1642
+ self._names = None
1643
+
1644
+ self._capacity = capacity
1645
+ self._memory_limit = memory_limit
1646
+
1647
+ # all get and put ops must colocate with this op
1648
+ with ops.name_scope("%s_root" % self._name):
1649
+ self._coloc_op = control_flow_ops.no_op()
1650
+
1651
+ @property
1652
+ def name(self):
1653
+ """The name of the staging area."""
1654
+ return self._name
1655
+
1656
+ @property
1657
+ def dtypes(self):
1658
+ """The list of dtypes for each component of a staging area element."""
1659
+ return self._dtypes
1660
+
1661
+ @property
1662
+ def shapes(self):
1663
+ """The list of shapes for each component of a staging area element."""
1664
+ return self._shapes
1665
+
1666
+ @property
1667
+ def names(self):
1668
+ """The list of names for each component of a staging area element."""
1669
+ return self._names
1670
+
1671
+ @property
1672
+ def capacity(self):
1673
+ """The maximum number of elements of this staging area."""
1674
+ return self._capacity
1675
+
1676
+ @property
1677
+ def memory_limit(self):
1678
+ """The maximum number of bytes of this staging area."""
1679
+ return self._memory_limit
1680
+
1681
+ def _check_put_dtypes(self, vals, indices=None):
1682
+ """Validate and convert `vals` to a list of `Tensor`s.
1683
+
1684
+ The `vals` argument can be a Tensor, a list or tuple of tensors, or a
1685
+ dictionary with tensor values.
1686
+
1687
+ If `vals` is a list, then the appropriate indices associated with the
1688
+ values must be provided.
1689
+
1690
+ If it is a dictionary, the staging area must have been constructed with a
1691
+ `names` attribute and the dictionary keys must match the staging area names.
1692
+ `indices` will be inferred from the dictionary keys.
1693
+ If the staging area was constructed with a `names` attribute, `vals` must
1694
+ be a dictionary.
1695
+
1696
+ Checks that the dtype and shape of each value matches that
1697
+ of the staging area.
1698
+
1699
+ Args:
1700
+ vals: A tensor, a list or tuple of tensors, or a dictionary.
1701
+
1702
+ Returns:
1703
+ A (tensors, indices) tuple where `tensors` is a list of `Tensor` objects
1704
+ and `indices` is a list of indices associated with the tensors.
1705
+
1706
+ Raises:
1707
+ ValueError: If `vals` or `indices` is invalid.
1708
+ """
1709
+ if isinstance(vals, dict):
1710
+ if not self._names:
1711
+ raise ValueError(
1712
+ "Staging areas must have names to enqueue a dictionary")
1713
+ if not set(vals.keys()).issubset(self._names):
1714
+ raise ValueError("Keys in dictionary to put do not match names "
1715
+ f"of staging area. Dictionary: {sorted(vals.keys())}"
1716
+ f"Queue: {sorted(self._names)}")
1717
+ # The order of values in `self._names` indicates the order in which the
1718
+ # tensors in the dictionary `vals` must be listed.
1719
+ vals, indices, _ = zip(*[(vals[k], i, k)
1720
+ for i, k in enumerate(self._names)
1721
+ if k in vals])
1722
+ else:
1723
+ if self._names:
1724
+ raise ValueError("You must enqueue a dictionary in a staging area "
1725
+ "with names")
1726
+
1727
+ if indices is None:
1728
+ raise ValueError("Indices must be supplied when inserting a list "
1729
+ "of tensors")
1730
+
1731
+ if len(indices) != len(vals):
1732
+ raise ValueError(f"Number of indices {len(indices)} doesn't match "
1733
+ f"number of values {len(vals)}")
1734
+
1735
+ if not isinstance(vals, (list, tuple)):
1736
+ vals = [vals]
1737
+ indices = [0]
1738
+
1739
+ # Sanity check number of values
1740
+ if not len(vals) <= len(self._dtypes):
1741
+ raise ValueError(f"Unexpected number of inputs {len(vals)} vs "
1742
+ f"{len(self._dtypes)}")
1743
+
1744
+ tensors = []
1745
+
1746
+ for val, i in zip(vals, indices):
1747
+ dtype, shape = self._dtypes[i], self._shapes[i]
1748
+ # Check dtype
1749
+ if val.dtype != dtype:
1750
+ raise ValueError(f"Datatypes do not match. "
1751
+ f"Received val.dtype {str(val.dtype)} and "
1752
+ f"dtype {str(dtype)}")
1753
+ # Check shape
1754
+ val.get_shape().assert_is_compatible_with(shape)
1755
+
1756
+ tensors.append(
1757
+ ops.convert_to_tensor(val, dtype=dtype, name="component_%d" % i))
1758
+
1759
+ return tensors, indices
1760
+
1761
+ def _create_device_transfers(self, tensors):
1762
+ """Encode inter-device transfers if the current device
1763
+ is not the same as the Staging Area's device.
1764
+ """
1765
+
1766
+ if not isinstance(tensors, (tuple, list)):
1767
+ tensors = [tensors]
1768
+
1769
+ curr_device_scope = control_flow_ops.no_op().device
1770
+
1771
+ if curr_device_scope != self._coloc_op.device:
1772
+ tensors = [array_ops.identity(t) for t in tensors]
1773
+
1774
+ return tensors
1775
+
1776
+ def _get_return_value(self, tensors, indices):
1777
+ """Return the value to return from a get op.
1778
+
1779
+ If the staging area has names, return a dictionary with the
1780
+ names as keys. Otherwise return either a single tensor
1781
+ or a list of tensors depending on the length of `tensors`.
1782
+
1783
+ Args:
1784
+ tensors: List of tensors from the get op.
1785
+ indices: Indices of associated names and shapes
1786
+
1787
+ Returns:
1788
+ A single tensor, a list of tensors, or a dictionary
1789
+ of tensors.
1790
+ """
1791
+
1792
+ tensors = self._create_device_transfers(tensors)
1793
+
1794
+ # Sets shape
1795
+ for output, i in zip(tensors, indices):
1796
+ output.set_shape(self._shapes[i])
1797
+
1798
+ if self._names:
1799
+ # The returned values in `tensors` are in the same order as
1800
+ # the names in `self._names`.
1801
+ return {self._names[i]: t for t, i in zip(tensors, indices)}
1802
+ return tensors
1803
+
1804
+ def _scope_vals(self, vals):
1805
+ """Return a list of values to pass to `name_scope()`.
1806
+
1807
+ Args:
1808
+ vals: A tensor, a list or tuple of tensors, or a dictionary.
1809
+
1810
+ Returns:
1811
+ The values in vals as a list.
1812
+ """
1813
+ if isinstance(vals, (list, tuple)):
1814
+ return vals
1815
+ elif isinstance(vals, dict):
1816
+ return vals.values()
1817
+ else:
1818
+ return [vals]
1819
+
1820
+
1821
+ class StagingArea(BaseStagingArea):
1822
+ """Class for staging inputs. No ordering guarantees.
1823
+
1824
+ A `StagingArea` is a TensorFlow data structure that stores tensors across
1825
+ multiple steps, and exposes operations that can put and get tensors.
1826
+
1827
+ Each `StagingArea` element is a tuple of one or more tensors, where each
1828
+ tuple component has a static dtype, and may have a static shape.
1829
+
1830
+ The capacity of a `StagingArea` may be bounded or unbounded.
1831
+ It supports multiple concurrent producers and consumers; and
1832
+ provides exactly-once delivery.
1833
+
1834
+ Each element of a `StagingArea` is a fixed-length tuple of tensors whose
1835
+ dtypes are described by `dtypes`, and whose shapes are optionally described
1836
+ by the `shapes` argument.
1837
+
1838
+ If the `shapes` argument is specified, each component of a staging area
1839
+ element must have the respective fixed shape. If it is
1840
+ unspecified, different elements may have different shapes,
1841
+
1842
+ It can be configured with a capacity in which case
1843
+ put(values) will block until space becomes available.
1844
+
1845
+ Similarly, it can be configured with a memory limit which
1846
+ will block put(values) until space is available.
1847
+ This is mostly useful for limiting the number of tensors on
1848
+ devices such as GPUs.
1849
+
1850
+ All get() and peek() commands block if the requested data
1851
+ is not present in the Staging Area.
1852
+
1853
+ """
1854
+
1855
+ def __init__(self,
1856
+ dtypes,
1857
+ shapes=None,
1858
+ names=None,
1859
+ shared_name=None,
1860
+ capacity=0,
1861
+ memory_limit=0):
1862
+ """Constructs a staging area object.
1863
+
1864
+ The two optional lists, `shapes` and `names`, must be of the same length
1865
+ as `dtypes` if provided. The values at a given index `i` indicate the
1866
+ shape and name to use for the corresponding queue component in `dtypes`.
1867
+
1868
+ The device scope at the time of object creation determines where the
1869
+ storage for the `StagingArea` will reside. Calls to `put` will incur a copy
1870
+ to this memory space, if necessary. Tensors returned by `get` will be
1871
+ placed according to the device scope when `get` is called.
1872
+
1873
+ Args:
1874
+ dtypes: A list of types. The length of dtypes must equal the number
1875
+ of tensors in each element.
1876
+ shapes: (Optional.) Constraints on the shapes of tensors in an element.
1877
+ A list of shape tuples or None. This list is the same length
1878
+ as dtypes. If the shape of any tensors in the element are constrained,
1879
+ all must be; shapes can be None if the shapes should not be constrained.
1880
+ names: (Optional.) If provided, the `get()` and
1881
+ `put()` methods will use dictionaries with these names as keys.
1882
+ Must be None or a list or tuple of the same length as `dtypes`.
1883
+ shared_name: (Optional.) A name to be used for the shared object. By
1884
+ passing the same name to two different python objects they will share
1885
+ the underlying staging area. Must be a string.
1886
+ capacity: (Optional.) Maximum number of elements.
1887
+ An integer. If zero, the Staging Area is unbounded
1888
+ memory_limit: (Optional.) Maximum number of bytes of all tensors
1889
+ in the Staging Area.
1890
+ An integer. If zero, the Staging Area is unbounded
1891
+
1892
+ Raises:
1893
+ ValueError: If one of the arguments is invalid.
1894
+ """
1895
+
1896
+ super(StagingArea, self).__init__(dtypes, shapes, names, shared_name,
1897
+ capacity, memory_limit)
1898
+
1899
+ def put(self, values, name=None):
1900
+ """Create an op that places a value into the staging area.
1901
+
1902
+ This operation will block if the `StagingArea` has reached
1903
+ its capacity.
1904
+
1905
+ Args:
1906
+ values: A single tensor, a list or tuple of tensors, or a dictionary with
1907
+ tensor values. The number of elements must match the length of the
1908
+ list provided to the dtypes argument when creating the StagingArea.
1909
+ name: A name for the operation (optional).
1910
+
1911
+ Returns:
1912
+ The created op.
1913
+
1914
+ Raises:
1915
+ ValueError: If the number or type of inputs don't match the staging area.
1916
+ """
1917
+ with ops.name_scope(name, "%s_put" % self._name,
1918
+ self._scope_vals(values)) as scope:
1919
+
1920
+ if not isinstance(values, (list, tuple, dict)):
1921
+ values = [values]
1922
+
1923
+ # Hard-code indices for this staging area
1924
+ indices = list(range(len(values)))
1925
+ vals, _ = self._check_put_dtypes(values, indices)
1926
+
1927
+ with ops.colocate_with(self._coloc_op):
1928
+ op = gen_data_flow_ops.stage(
1929
+ values=vals,
1930
+ shared_name=self._name,
1931
+ name=scope,
1932
+ capacity=self._capacity,
1933
+ memory_limit=self._memory_limit)
1934
+
1935
+ return op
1936
+
1937
+ def __internal_get(self, get_fn, name):
1938
+ with ops.colocate_with(self._coloc_op):
1939
+ ret = get_fn()
1940
+
1941
+ indices = list(range(len(self._dtypes))) # Hard coded
1942
+ return self._get_return_value(ret, indices)
1943
+
1944
+ def get(self, name=None):
1945
+ """Gets one element from this staging area.
1946
+
1947
+ If the staging area is empty when this operation executes, it will block
1948
+ until there is an element to dequeue.
1949
+
1950
+ Note that unlike others ops that can block, like the queue Dequeue
1951
+ operations, this can stop other work from happening. To avoid this, the
1952
+ intended use is for this to be called only when there will be an element
1953
+ already available. One method for doing this in a training loop would be to
1954
+ run a `put()` call during a warmup session.run call, and then call both
1955
+ `get()` and `put()` in each subsequent step.
1956
+
1957
+ The placement of the returned tensor will be determined by the current
1958
+ device scope when this function is called.
1959
+
1960
+ Args:
1961
+ name: A name for the operation (optional).
1962
+
1963
+ Returns:
1964
+ The tuple of tensors that was gotten.
1965
+ """
1966
+ if name is None:
1967
+ name = "%s_get" % self._name
1968
+
1969
+ # pylint: disable=bad-continuation
1970
+ fn = lambda: gen_data_flow_ops.unstage(dtypes=self._dtypes,
1971
+ shared_name=self._name, name=name,
1972
+ capacity=self._capacity,
1973
+ memory_limit=self._memory_limit)
1974
+ # pylint: enable=bad-continuation
1975
+
1976
+ return self.__internal_get(fn, name)
1977
+
1978
+ def peek(self, index, name=None):
1979
+ """Peeks at an element in the staging area.
1980
+
1981
+ If the staging area is too small to contain the element at
1982
+ the specified index, it will block until enough elements
1983
+ are inserted to complete the operation.
1984
+
1985
+ The placement of the returned tensor will be determined by
1986
+ the current device scope when this function is called.
1987
+
1988
+ Args:
1989
+ index: The index of the tensor within the staging area
1990
+ to look up.
1991
+ name: A name for the operation (optional).
1992
+
1993
+ Returns:
1994
+ The tuple of tensors that was gotten.
1995
+ """
1996
+ if name is None:
1997
+ name = "%s_peek" % self._name
1998
+
1999
+ # pylint: disable=bad-continuation
2000
+ fn = lambda: gen_data_flow_ops.stage_peek(index,
2001
+ dtypes=self._dtypes, shared_name=self._name,
2002
+ name=name, capacity=self._capacity,
2003
+ memory_limit=self._memory_limit)
2004
+ # pylint: enable=bad-continuation
2005
+
2006
+ return self.__internal_get(fn, name)
2007
+
2008
+ def size(self, name=None):
2009
+ """Returns the number of elements in the staging area.
2010
+
2011
+ Args:
2012
+ name: A name for the operation (optional)
2013
+
2014
+ Returns:
2015
+ The created op
2016
+ """
2017
+ if name is None:
2018
+ name = "%s_size" % self._name
2019
+
2020
+ return gen_data_flow_ops.stage_size(
2021
+ name=name,
2022
+ shared_name=self._name,
2023
+ dtypes=self._dtypes,
2024
+ capacity=self._capacity,
2025
+ memory_limit=self._memory_limit)
2026
+
2027
+ def clear(self, name=None):
2028
+ """Clears the staging area.
2029
+
2030
+ Args:
2031
+ name: A name for the operation (optional)
2032
+
2033
+ Returns:
2034
+ The created op
2035
+ """
2036
+ if name is None:
2037
+ name = "%s_clear" % self._name
2038
+
2039
+ return gen_data_flow_ops.stage_clear(
2040
+ name=name,
2041
+ shared_name=self._name,
2042
+ dtypes=self._dtypes,
2043
+ capacity=self._capacity,
2044
+ memory_limit=self._memory_limit)
2045
+
2046
+
2047
+ class MapStagingArea(BaseStagingArea):
2048
+ """A `MapStagingArea` is a TensorFlow data structure that stores tensors
2049
+ across multiple steps, and exposes operations that can put and get tensors.
2050
+
2051
+ Each `MapStagingArea` element is a (key, value) pair.
2052
+ Only int64 keys are supported, other types should be
2053
+ hashed to produce a key.
2054
+ Values are a tuple of one or more tensors.
2055
+ Each tuple component has a static dtype,
2056
+ and may have a static shape.
2057
+
2058
+ The capacity of a `MapStagingArea` may be bounded or unbounded.
2059
+ It supports multiple concurrent producers and consumers; and
2060
+ provides exactly-once delivery.
2061
+
2062
+ Each value tuple of a `MapStagingArea` is a fixed-length tuple of tensors
2063
+ whose
2064
+ dtypes are described by `dtypes`, and whose shapes are optionally described
2065
+ by the `shapes` argument.
2066
+
2067
+ If the `shapes` argument is specified, each component of a staging area
2068
+ element must have the respective fixed shape. If it is
2069
+ unspecified, different elements may have different shapes,
2070
+
2071
+ It behaves like an associative container with support for:
2072
+
2073
+ - put(key, values)
2074
+ - peek(key) like dict.get(key)
2075
+ - get(key) like dict.pop(key)
2076
+ - get(key=None) like dict.popitem()
2077
+ - size()
2078
+ - clear()
2079
+
2080
+ If ordered a tree structure ordered by key will be used and
2081
+ get(key=None) will remove (key, value) pairs in increasing key order.
2082
+ Otherwise a hashtable
2083
+
2084
+ It can be configured with a capacity in which case
2085
+ put(key, values) will block until space becomes available.
2086
+
2087
+ Similarly, it can be configured with a memory limit which
2088
+ will block put(key, values) until space is available.
2089
+ This is mostly useful for limiting the number of tensors on
2090
+ devices such as GPUs.
2091
+
2092
+ All get() and peek() commands block if the requested
2093
+ (key, value) pair is not present in the staging area.
2094
+
2095
+ Partial puts are supported and will be placed in an incomplete
2096
+ map until such time as all values associated with the key have
2097
+ been inserted. Once completed, this (key, value) pair will be
2098
+ inserted into the map. Data in the incomplete map
2099
+ counts towards the memory limit, but not towards capacity limit.
2100
+
2101
+ Partial gets from the map are also supported.
2102
+ This removes the partially requested tensors from the entry,
2103
+ but the entry is only removed from the map once all tensors
2104
+ associated with it are removed.
2105
+ """
2106
+
2107
+ def __init__(self,
2108
+ dtypes,
2109
+ shapes=None,
2110
+ names=None,
2111
+ shared_name=None,
2112
+ ordered=False,
2113
+ capacity=0,
2114
+ memory_limit=0):
2115
+ """Args:
2116
+
2117
+ dtypes: A list of types. The length of dtypes must equal the number
2118
+ of tensors in each element.
2119
+ capacity: (Optional.) Maximum number of elements.
2120
+ An integer. If zero, the Staging Area is unbounded
2121
+ memory_limit: (Optional.) Maximum number of bytes of all tensors
2122
+ in the Staging Area (excluding keys).
2123
+ An integer. If zero, the Staging Area is unbounded
2124
+ ordered: (Optional.) If True the underlying data structure
2125
+ is a tree ordered on key. Otherwise assume a hashtable.
2126
+ shapes: (Optional.) Constraints on the shapes of tensors in an element.
2127
+ A list of shape tuples or None. This list is the same length
2128
+ as dtypes. If the shape of any tensors in the element are constrained,
2129
+ all must be; shapes can be None if the shapes should not be constrained.
2130
+ names: (Optional.) If provided, the `get()` and
2131
+ `put()` methods will use dictionaries with these names as keys.
2132
+ Must be None or a list or tuple of the same length as `dtypes`.
2133
+ shared_name: (Optional.) A name to be used for the shared object. By
2134
+ passing the same name to two different python objects they will share
2135
+ the underlying staging area. Must be a string.
2136
+
2137
+ Raises:
2138
+ ValueError: If one of the arguments is invalid.
2139
+
2140
+ """
2141
+
2142
+ super(MapStagingArea, self).__init__(dtypes, shapes, names, shared_name,
2143
+ capacity, memory_limit)
2144
+
2145
+ # Defer to different methods depending if the map is ordered
2146
+ self._ordered = ordered
2147
+
2148
+ if ordered:
2149
+ self._put_fn = gen_data_flow_ops.ordered_map_stage
2150
+ self._pop_fn = gen_data_flow_ops.ordered_map_unstage
2151
+ self._popitem_fn = gen_data_flow_ops.ordered_map_unstage_no_key
2152
+ self._peek_fn = gen_data_flow_ops.ordered_map_peek
2153
+ self._size_fn = gen_data_flow_ops.ordered_map_size
2154
+ self._incomplete_size_fn = gen_data_flow_ops.ordered_map_incomplete_size
2155
+ self._clear_fn = gen_data_flow_ops.ordered_map_clear
2156
+ else:
2157
+ self._put_fn = gen_data_flow_ops.map_stage
2158
+ self._pop_fn = gen_data_flow_ops.map_unstage
2159
+ self._popitem_fn = gen_data_flow_ops.map_unstage_no_key
2160
+ self._peek_fn = gen_data_flow_ops.map_peek
2161
+ self._size_fn = gen_data_flow_ops.map_size
2162
+ self._incomplete_size_fn = gen_data_flow_ops.map_incomplete_size
2163
+ self._clear_fn = gen_data_flow_ops.map_clear
2164
+
2165
+ def put(self, key, vals, indices=None, name=None):
2166
+ """Create an op that stores the (key, vals) pair in the staging area.
2167
+
2168
+ Incomplete puts are possible, preferably using a dictionary for vals
2169
+ as the appropriate dtypes and shapes can be inferred from the value names
2170
+ dictionary key values. If vals is a list or tuple, indices must
2171
+ also be specified so that the op knows at which element position
2172
+ to perform the insert.
2173
+
2174
+ This operation will block if the capacity or memory limit of this
2175
+ container is reached.
2176
+
2177
+ Args:
2178
+ key: Key associated with the data
2179
+ vals: Tensor (or a dict/tuple of Tensors) to place
2180
+ into the staging area.
2181
+ indices: (Optional) if vals is a tuple/list, this is required.
2182
+ name: A name for the operation (optional)
2183
+
2184
+ Returns:
2185
+ The created op
2186
+
2187
+ Raises:
2188
+ ValueError: If the number or type of inputs don't match the staging
2189
+ area.
2190
+ """
2191
+
2192
+ with ops.name_scope(name, "%s_put" % self._name,
2193
+ self._scope_vals(vals)) as scope:
2194
+
2195
+ vals, indices = self._check_put_dtypes(vals, indices)
2196
+
2197
+ with ops.colocate_with(self._coloc_op):
2198
+ op = self._put_fn(
2199
+ key,
2200
+ indices,
2201
+ vals,
2202
+ dtypes=self._dtypes,
2203
+ shared_name=self._name,
2204
+ name=scope,
2205
+ capacity=self._capacity,
2206
+ memory_limit=self._memory_limit)
2207
+ return op
2208
+
2209
+ def _get_indices_and_dtypes(self, indices=None):
2210
+ if indices is None:
2211
+ indices = list(range(len(self._dtypes)))
2212
+
2213
+ if not isinstance(indices, (tuple, list)):
2214
+ raise TypeError(f"Invalid indices type {type(indices)}")
2215
+
2216
+ if len(indices) == 0:
2217
+ raise ValueError("Empty indices")
2218
+
2219
+ if all(isinstance(i, str) for i in indices):
2220
+ if self._names is None:
2221
+ raise ValueError(f"String indices provided {indices}, but "
2222
+ "this Staging Area was not created with names.")
2223
+
2224
+ try:
2225
+ indices = [self._names.index(n) for n in indices]
2226
+ except ValueError:
2227
+ raise ValueError(f"Named index not in "
2228
+ f"Staging Area names {self._names}")
2229
+ elif all(isinstance(i, int) for i in indices):
2230
+ pass
2231
+ else:
2232
+ raise TypeError(f"Mixed types in indices {indices}. "
2233
+ "May only be str or int")
2234
+
2235
+ dtypes = [self._dtypes[i] for i in indices]
2236
+
2237
+ return indices, dtypes
2238
+
2239
+ def peek(self, key, indices=None, name=None):
2240
+ """Peeks at staging area data associated with the key.
2241
+
2242
+ If the key is not in the staging area, it will block
2243
+ until the associated (key, value) is inserted.
2244
+
2245
+ Args:
2246
+ key: Key associated with the required data
2247
+ indices: Partial list of tensors to retrieve (optional).
2248
+ A list of integer or string indices.
2249
+ String indices are only valid if the Staging Area
2250
+ has names associated with it.
2251
+ name: A name for the operation (optional)
2252
+
2253
+ Returns:
2254
+ The created op
2255
+ """
2256
+
2257
+ if name is None:
2258
+ name = "%s_pop" % self._name
2259
+
2260
+ indices, dtypes = self._get_indices_and_dtypes(indices)
2261
+
2262
+ with ops.colocate_with(self._coloc_op):
2263
+ result = self._peek_fn(
2264
+ key,
2265
+ shared_name=self._name,
2266
+ indices=indices,
2267
+ dtypes=dtypes,
2268
+ name=name,
2269
+ capacity=self._capacity,
2270
+ memory_limit=self._memory_limit)
2271
+
2272
+ return self._get_return_value(result, indices)
2273
+
2274
+ def get(self, key=None, indices=None, name=None):
2275
+ """If the key is provided, the associated (key, value) is returned from the staging area.
2276
+
2277
+ If the key is not in the staging area, this method will block until
2278
+ the associated (key, value) is inserted.
2279
+ If no key is provided and the staging area is ordered,
2280
+ the (key, value) with the smallest key will be returned.
2281
+ Otherwise, a random (key, value) will be returned.
2282
+
2283
+ If the staging area is empty when this operation executes,
2284
+ it will block until there is an element to dequeue.
2285
+
2286
+ Args:
2287
+ key: Key associated with the required data (Optional)
2288
+ indices: Partial list of tensors to retrieve (optional).
2289
+ A list of integer or string indices.
2290
+ String indices are only valid if the Staging Area
2291
+ has names associated with it.
2292
+ name: A name for the operation (optional)
2293
+
2294
+ Returns:
2295
+ The created op
2296
+ """
2297
+ if key is None:
2298
+ return self._popitem(indices=indices, name=name)
2299
+ else:
2300
+ return self._pop(key, indices=indices, name=name)
2301
+
2302
+ def _pop(self, key, indices=None, name=None):
2303
+ """Remove and return the associated (key, value) is returned from the staging area.
2304
+
2305
+ If the key is not in the staging area, this method will block until
2306
+ the associated (key, value) is inserted.
2307
+ Args:
2308
+ key: Key associated with the required data
2309
+ indices: Partial list of tensors to retrieve (optional).
2310
+ A list of integer or string indices.
2311
+ String indices are only valid if the Staging Area
2312
+ has names associated with it.
2313
+ name: A name for the operation (optional)
2314
+
2315
+ Returns:
2316
+ The created op
2317
+ """
2318
+ if name is None:
2319
+ name = "%s_get" % self._name
2320
+
2321
+ indices, dtypes = self._get_indices_and_dtypes(indices)
2322
+
2323
+ with ops.colocate_with(self._coloc_op):
2324
+ result = self._pop_fn(
2325
+ key,
2326
+ shared_name=self._name,
2327
+ indices=indices,
2328
+ dtypes=dtypes,
2329
+ name=name,
2330
+ capacity=self._capacity,
2331
+ memory_limit=self._memory_limit)
2332
+
2333
+ return key, self._get_return_value(result, indices)
2334
+
2335
+ def _popitem(self, indices=None, name=None):
2336
+ """If the staging area is ordered, the (key, value) with the smallest key will be returned.
2337
+
2338
+ Otherwise, a random (key, value) will be returned.
2339
+ If the staging area is empty when this operation executes,
2340
+ it will block until there is an element to dequeue.
2341
+
2342
+ Args:
2343
+ key: Key associated with the required data
2344
+ indices: Partial list of tensors to retrieve (optional).
2345
+ A list of integer or string indices.
2346
+ String indices are only valid if the Staging Area
2347
+ has names associated with it.
2348
+ name: A name for the operation (optional)
2349
+
2350
+ Returns:
2351
+ The created op
2352
+ """
2353
+ if name is None:
2354
+ name = "%s_get_nokey" % self._name
2355
+
2356
+ indices, dtypes = self._get_indices_and_dtypes(indices)
2357
+
2358
+ with ops.colocate_with(self._coloc_op):
2359
+ key, result = self._popitem_fn(
2360
+ shared_name=self._name,
2361
+ indices=indices,
2362
+ dtypes=dtypes,
2363
+ name=name,
2364
+ capacity=self._capacity,
2365
+ memory_limit=self._memory_limit)
2366
+
2367
+ # Separate keys and results out from
2368
+ # underlying namedtuple
2369
+ key = self._create_device_transfers(key)[0]
2370
+ result = self._get_return_value(result, indices)
2371
+
2372
+ return key, result
2373
+
2374
+ def size(self, name=None):
2375
+ """Returns the number of elements in the staging area.
2376
+
2377
+ Args:
2378
+ name: A name for the operation (optional)
2379
+
2380
+ Returns:
2381
+ The created op
2382
+ """
2383
+ if name is None:
2384
+ name = "%s_size" % self._name
2385
+
2386
+ return self._size_fn(
2387
+ shared_name=self._name,
2388
+ name=name,
2389
+ dtypes=self._dtypes,
2390
+ capacity=self._capacity,
2391
+ memory_limit=self._memory_limit)
2392
+
2393
+ def incomplete_size(self, name=None):
2394
+ """Returns the number of incomplete elements in the staging area.
2395
+
2396
+ Args:
2397
+ name: A name for the operation (optional)
2398
+
2399
+ Returns:
2400
+ The created op
2401
+ """
2402
+ if name is None:
2403
+ name = "%s_incomplete_size" % self._name
2404
+
2405
+ return self._incomplete_size_fn(
2406
+ shared_name=self._name,
2407
+ name=name,
2408
+ dtypes=self._dtypes,
2409
+ capacity=self._capacity,
2410
+ memory_limit=self._memory_limit)
2411
+
2412
+ def clear(self, name=None):
2413
+ """Clears the staging area.
2414
+
2415
+ Args:
2416
+ name: A name for the operation (optional)
2417
+
2418
+ Returns:
2419
+ The created op
2420
+ """
2421
+ if name is None:
2422
+ name = "%s_clear" % self._name
2423
+
2424
+ return self._clear_fn(
2425
+ shared_name=self._name,
2426
+ name=name,
2427
+ dtypes=self._dtypes,
2428
+ capacity=self._capacity,
2429
+ memory_limit=self._memory_limit)
2430
+
2431
+
2432
+ class RecordInput:
2433
+ """RecordInput asynchronously reads and randomly yields TFRecords.
2434
+
2435
+ A RecordInput Op will continuously read a batch of records asynchronously
2436
+ into a buffer of some fixed capacity. It can also asynchronously yield
2437
+ random records from this buffer.
2438
+
2439
+ It will not start yielding until at least `buffer_size / 2` elements have been
2440
+ placed into the buffer so that sufficient randomization can take place.
2441
+
2442
+ The order the files are read will be shifted each epoch by `shift_amount` so
2443
+ that the data is presented in a different order every epoch.
2444
+ """
2445
+
2446
+ def __init__(self,
2447
+ file_pattern,
2448
+ batch_size=1,
2449
+ buffer_size=1,
2450
+ parallelism=1,
2451
+ shift_ratio=0,
2452
+ seed=0,
2453
+ name=None,
2454
+ batches=None,
2455
+ compression_type=None):
2456
+ """Constructs a RecordInput Op.
2457
+
2458
+ Args:
2459
+ file_pattern: File path to the dataset, possibly containing wildcards.
2460
+ All matching files will be iterated over each epoch.
2461
+ batch_size: How many records to return at a time.
2462
+ buffer_size: The maximum number of records the buffer will contain.
2463
+ parallelism: How many reader threads to use for reading from files.
2464
+ shift_ratio: What percentage of the total number files to move the start
2465
+ file forward by each epoch.
2466
+ seed: Specify the random number seed used by generator that randomizes
2467
+ records.
2468
+ name: Optional name for the operation.
2469
+ batches: None by default, creating a single batch op. Otherwise specifies
2470
+ how many batches to create, which are returned as a list when
2471
+ `get_yield_op()` is called. An example use case is to split processing
2472
+ between devices on one computer.
2473
+ compression_type: The type of compression for the file. Currently ZLIB and
2474
+ GZIP are supported. Defaults to none.
2475
+
2476
+ Raises:
2477
+ ValueError: If one of the arguments is invalid.
2478
+ """
2479
+ self._batch_size = batch_size
2480
+ if batches is not None:
2481
+ self._batch_size *= batches
2482
+ self._batches = batches
2483
+ self._file_pattern = file_pattern
2484
+ self._buffer_size = buffer_size
2485
+ self._parallelism = parallelism
2486
+ self._shift_ratio = shift_ratio
2487
+ self._seed = seed
2488
+ self._name = name
2489
+ self._compression_type = python_io.TFRecordCompressionType.NONE
2490
+ if compression_type is not None:
2491
+ self._compression_type = compression_type
2492
+
2493
+ def get_yield_op(self):
2494
+ """Adds a node that yields a group of records every time it is executed.
2495
+ If RecordInput `batches` parameter is not None, it yields a list of
2496
+ record batches with the specified `batch_size`.
2497
+ """
2498
+ compression_type = python_io.TFRecordOptions.get_compression_type_string(
2499
+ python_io.TFRecordOptions(self._compression_type))
2500
+ records = gen_data_flow_ops.record_input(
2501
+ file_pattern=self._file_pattern,
2502
+ file_buffer_size=self._buffer_size,
2503
+ file_parallelism=self._parallelism,
2504
+ file_shuffle_shift_ratio=self._shift_ratio,
2505
+ batch_size=self._batch_size,
2506
+ file_random_seed=self._seed,
2507
+ compression_type=compression_type,
2508
+ name=self._name)
2509
+ if self._batches is None:
2510
+ return records
2511
+ else:
2512
+ with ops.name_scope(self._name):
2513
+ batch_list = [[] for _ in range(self._batches)]
2514
+ records = array_ops.split(records, self._batch_size, 0)
2515
+ for index, protobuf in enumerate(records):
2516
+ batch_index = index % self._batches
2517
+ batch_list[batch_index].append(array_ops.reshape(protobuf, []))
2518
+ return batch_list
videochat2/lib/python3.10/site-packages/tensorflow/python/ops/embedding_ops.py ADDED
@@ -0,0 +1,1184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Operations for embeddings."""
16
+
17
+ from tensorflow.python.compat import compat
18
+ from tensorflow.python.framework import composite_tensor
19
+ from tensorflow.python.framework import constant_op
20
+ from tensorflow.python.framework import dtypes
21
+ from tensorflow.python.framework import indexed_slices
22
+ from tensorflow.python.framework import ops
23
+ from tensorflow.python.framework import sparse_tensor
24
+ from tensorflow.python.framework import tensor_shape
25
+ from tensorflow.python.ops import array_ops
26
+ from tensorflow.python.ops import array_ops_stack
27
+ from tensorflow.python.ops import clip_ops
28
+ # Imports gradient definitions.
29
+ from tensorflow.python.ops import data_flow_grad # pylint: disable=unused-import
30
+ from tensorflow.python.ops import data_flow_ops
31
+ from tensorflow.python.ops import math_ops
32
+ from tensorflow.python.ops import resource_variable_ops
33
+ from tensorflow.python.ops import sparse_ops
34
+ from tensorflow.python.ops import variables
35
+ from tensorflow.python.types import core
36
+ from tensorflow.python.util import dispatch
37
+ from tensorflow.python.util.tf_export import tf_export
38
+
39
+
40
+ def _clip(params, ids, max_norm):
41
+ """Helper function for _embedding_lookup_and_transform.
42
+
43
+ This function optionally clips embeddings to an l2-norm of max_norm.
44
+
45
+ Args:
46
+ params: A `Tensor` of embeddings retrieved by `gather`.
47
+ ids: The `ids` argument that was passed to `gather`.
48
+ max_norm: If not `None`, each embedding is clipped if its l2-norm is larger
49
+ than this value.
50
+
51
+ Returns:
52
+ A `Tensor` with the same type as `params`.
53
+ """
54
+
55
+ def _rank(x):
56
+ """Helper function to retrieve the rank of a tensor.
57
+
58
+ Args:
59
+ x: Something convertible to `Tensor`.
60
+
61
+ Returns:
62
+ Either a pair `(rank, True)` where `rank` is an integer or a pair
63
+ `(rank, False)` where `rank` is an integer `Tensor`. In either case,
64
+ `rank` is the rank of `x`.
65
+ """
66
+ rank = ops.convert_to_tensor(x).get_shape().ndims
67
+ if rank:
68
+ return rank, True
69
+ else:
70
+ return array_ops.rank(x), False
71
+
72
+ if max_norm is None:
73
+ return params
74
+ ids_rank, ids_static = _rank(ids)
75
+ params_rank, params_static = _rank(params)
76
+ return clip_ops.clip_by_norm(
77
+ params,
78
+ max_norm,
79
+ axes=(list(range(ids_rank, params_rank)) if ids_static and params_static
80
+ else math_ops.range(ids_rank, params_rank)))
81
+
82
+
83
+ def _colocate_with(param):
84
+ if ops.inside_function() and hasattr(param, "handle"):
85
+ # The `ops.colocate_with` will hard-code a device string if `param.device`
86
+ # is known, which will then break serving. We capture it here so that it
87
+ # produces a tensor without a device.
88
+ return ops.colocate_with(ops.get_default_graph().capture(param.handle))
89
+ else:
90
+ return ops.colocate_with(param)
91
+
92
+
93
+ def _embedding_lookup_and_transform(params,
94
+ ids,
95
+ partition_strategy="mod",
96
+ name=None,
97
+ max_norm=None,
98
+ transform_fn=None):
99
+ """Helper function for embedding_lookup and _compute_sampled_logits.
100
+
101
+ This function is a generalization of embedding_lookup that optionally
102
+ applies a caller-specified transformation to each embedding. This is
103
+ done through the `transform_fn` argument. If provided, the function is
104
+ applied to each partitioned tensor of retrieved embeddings, colocated
105
+ with the embeddings. This function will be called with a single `Tensor`
106
+ argument of the same type as the `params` tensor and should return a
107
+ `Tensor`. The shape of the argument will be the same as `params` except
108
+ for the size of the first dimension. The first dimension of the result's
109
+ shape must be the same size as the argument's.
110
+
111
+ Args:
112
+ params: See embedding_lookup.
113
+ ids: See embedding_lookup.
114
+ partition_strategy: See embedding_lookup.
115
+ name: See embedding_lookup.
116
+ max_norm: See embedding_lookup.
117
+ transform_fn: An optional function to apply to each retrieved embedding. If
118
+ max_norm is provided, transform_fn is applied to the norm-limited
119
+ embeddings.
120
+
121
+ Returns:
122
+ See embedding_lookup for details.
123
+ Raises:
124
+ ValueError: If `params` is empty.
125
+ """
126
+ if params is None:
127
+ raise ValueError("params must be specified")
128
+ if isinstance(params, (list, tuple)) and not params:
129
+ raise ValueError("Length of params is currently 0. "
130
+ "Need at least one param.")
131
+ if isinstance(params, variables.PartitionedVariable):
132
+ params = list(params) # Iterate to get the underlying Variables.
133
+ if not isinstance(params, list):
134
+ params = [params]
135
+
136
+ with ops.name_scope(name, "embedding_lookup", params + [ids]) as name:
137
+ np = len(params) # Number of partitions
138
+ # Preserve the resource variable status to avoid accidental dense reads.
139
+ if not any(
140
+ isinstance(p, resource_variable_ops.BaseResourceVariable)
141
+ for p in params):
142
+ params = indexed_slices.convert_n_to_tensor_or_indexed_slices(
143
+ params, name="params")
144
+ ids = ops.convert_to_tensor(ids, name="ids")
145
+ if np == 1 and (not transform_fn or ids.get_shape().ndims == 1):
146
+ with _colocate_with(params[0]):
147
+ result = _clip(
148
+ array_ops.gather(params[0], ids, name=name), ids, max_norm)
149
+ if transform_fn:
150
+ result = transform_fn(result)
151
+ # Make sure the final result does not have colocation constraints on the
152
+ # params. Similar to the case np > 1 where parallel_dynamic_stitch is
153
+ # outside the scope of all with _colocate_with(params[p]).
154
+ return array_ops.identity(result)
155
+ else:
156
+ # Flatten the ids. There are two cases where we need to do this.
157
+ # - There is more than one params tensor.
158
+ # - There is a transform_fn and ids is not statically known to be 1-D.
159
+ # We must flatten in this case because transform_fn expects a flat
160
+ # tensor of embeddings.
161
+ flat_ids = array_ops.reshape(ids, [-1])
162
+ original_indices = math_ops.range(array_ops.size(flat_ids))
163
+
164
+ # Create p_assignments and set new_ids depending on the strategy.
165
+ if partition_strategy == "mod":
166
+ p_assignments = flat_ids % np
167
+ new_ids = flat_ids // np
168
+ elif partition_strategy == "div":
169
+ # Compute num_total_ids as the sum of dim-0 of params, then assign to
170
+ # partitions based on a constant number of ids per partition. Optimize
171
+ # if we already know the full shape statically.
172
+ dim_0_size = tensor_shape.Dimension(
173
+ tensor_shape.dimension_value(params[0].get_shape()[0]))
174
+ for p in range(1, np):
175
+ dim_0_size += tensor_shape.Dimension(
176
+ tensor_shape.dimension_value(params[p].get_shape()[0]))
177
+ if dim_0_size.value:
178
+ num_total_ids = constant_op.constant(dim_0_size.value, flat_ids.dtype)
179
+ else:
180
+ dim_0_sizes = []
181
+ for p in range(np):
182
+ param_p_dim = tensor_shape.dimension_value(params[p].get_shape()[0])
183
+ if param_p_dim is not None:
184
+ dim_0_sizes.append(param_p_dim)
185
+ else:
186
+ with _colocate_with(params[p]):
187
+ dim_0_sizes.append(array_ops.shape(params[p])[0])
188
+ num_total_ids = math_ops.reduce_sum(
189
+ math_ops.cast(array_ops_stack.stack(dim_0_sizes), flat_ids.dtype))
190
+ ids_per_partition = num_total_ids // np
191
+ extras = num_total_ids % np
192
+
193
+ p_assignments = math_ops.maximum(flat_ids // (ids_per_partition + 1),
194
+ (flat_ids - extras) //
195
+ ids_per_partition)
196
+
197
+ # Emulate a conditional using a boolean indicator tensor
198
+ new_ids = array_ops.where(p_assignments < extras,
199
+ flat_ids % (ids_per_partition + 1),
200
+ (flat_ids - extras) % ids_per_partition)
201
+ else:
202
+ raise ValueError(
203
+ f"Unrecognized partition strategy: {partition_strategy}."
204
+ "Must be one of either `mod` or `div`.")
205
+
206
+ # Cast partition assignments to int32 for use in dynamic_partition.
207
+ # There really should not be more than 2^32 partitions.
208
+ p_assignments = math_ops.cast(p_assignments, dtypes.int32)
209
+ # Partition list of ids based on assignments into np separate lists
210
+ gather_ids = data_flow_ops.dynamic_partition(new_ids, p_assignments, np)
211
+ # Similarly, partition the original indices.
212
+ pindices = data_flow_ops.dynamic_partition(original_indices,
213
+ p_assignments, np)
214
+ # Do np separate lookups, finding embeddings for plist[p] in params[p]
215
+ partitioned_result = []
216
+ for p in range(np):
217
+ pids = gather_ids[p]
218
+ with ops.device_v2(None):
219
+ with _colocate_with(params[p]):
220
+ result = array_ops.gather(params[p], pids)
221
+ if transform_fn:
222
+ # If transform_fn is provided, the clip_by_norm precedes
223
+ # the transform and hence must be co-located. See below
224
+ # for the counterpart if transform_fn is not provided.
225
+ result = transform_fn(_clip(result, pids, max_norm))
226
+ partitioned_result.append(result)
227
+ # Stitch these back together
228
+ ret = data_flow_ops.parallel_dynamic_stitch(
229
+ pindices, partitioned_result, name=name)
230
+
231
+ # Determine the static element shape.
232
+ if transform_fn is None:
233
+ element_shape_s = params[0].get_shape()[1:]
234
+ for p in params[1:]:
235
+ element_shape_s = element_shape_s.merge_with(p.get_shape()[1:])
236
+ else:
237
+ element_shape_s = ret.get_shape()[1:]
238
+
239
+ # Compute the dynamic element shape.
240
+ if element_shape_s.is_fully_defined():
241
+ element_shape_d = element_shape_s
242
+ elif transform_fn is None:
243
+ # It's important that we compute params[0].shape on the right device
244
+ # to avoid data motion.
245
+ with _colocate_with(params[0]):
246
+ params_shape = array_ops.shape(params[0])
247
+ element_shape_d = params_shape[1:]
248
+ else:
249
+ element_shape_d = array_ops.shape(ret)[1:]
250
+
251
+ # Reshape to reverse the flattening of ids.
252
+ ret = array_ops.reshape(
253
+ ret, array_ops.concat([array_ops.shape(ids), element_shape_d], 0))
254
+
255
+ # Normally the reshape is sufficient, but setting shape explicitly
256
+ # teaches shape inference that params[1:].get_shape() matters
257
+ # (in the case that transform_fn is None).
258
+ ret.set_shape(ids.get_shape().concatenate(element_shape_s))
259
+ if not transform_fn:
260
+ # If transform_fn was provided, the clip_by_norm was done above.
261
+ ret = _clip(ret, ids, max_norm)
262
+ return ret
263
+
264
+
265
+ @tf_export(v1=["nn.embedding_lookup"])
266
+ @dispatch.add_dispatch_support
267
+ def embedding_lookup(
268
+ params,
269
+ ids,
270
+ partition_strategy="mod",
271
+ name=None,
272
+ validate_indices=True, # pylint: disable=unused-argument
273
+ max_norm=None):
274
+ """Looks up embeddings for the given `ids` from a list of tensors.
275
+
276
+ This function is used to perform parallel lookups on the list of tensors in
277
+ `params`. It is a generalization of `tf.gather`, where `params` is
278
+ interpreted as a partitioning of a large embedding tensor. `params` may be
279
+ a `PartitionedVariable` as returned by using `tf.compat.v1.get_variable()`
280
+ with a partitioner.
281
+
282
+ If `len(params) > 1`, each element `id` of `ids` is partitioned between
283
+ the elements of `params` according to the `partition_strategy`.
284
+ In all strategies, if the id space does not evenly divide the number of
285
+ partitions, each of the first `(max_id + 1) % len(params)` partitions will
286
+ be assigned one more id.
287
+
288
+ If `partition_strategy` is `"mod"`, we assign each id to partition
289
+ `p = id % len(params)`. For instance,
290
+ 13 ids are split across 5 partitions as:
291
+ `[[0, 5, 10], [1, 6, 11], [2, 7, 12], [3, 8], [4, 9]]`
292
+
293
+ If `partition_strategy` is `"div"`, we assign ids to partitions in a
294
+ contiguous manner. In this case, 13 ids are split across 5 partitions as:
295
+ `[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`
296
+
297
+ If the input ids are ragged tensors, partition variables are not supported and
298
+ the partition strategy and the max_norm are ignored.
299
+ The results of the lookup are concatenated into a dense
300
+ tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`.
301
+
302
+ Args:
303
+ params: A single tensor representing the complete embedding tensor, or a
304
+ list of P tensors all of same shape except for the first dimension,
305
+ representing sharded embedding tensors. Alternatively, a
306
+ `PartitionedVariable`, created by partitioning along dimension 0. Each
307
+ element must be appropriately sized for the given `partition_strategy`.
308
+ ids: A `Tensor` or a 'RaggedTensor' with type `int32` or `int64` containing
309
+ the ids to be looked up in `params`.
310
+ Caution: Out-of-bounds indices will result in undefined behavior, which
311
+ will differ between devices and backends.
312
+ partition_strategy: A string specifying the partitioning strategy, relevant
313
+ if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
314
+ is `"mod"`.
315
+ name: A name for the operation (optional).
316
+ validate_indices: DEPRECATED. If this operation is assigned to CPU, values
317
+ in `indices` are always validated to be within range. If assigned to GPU,
318
+ out-of-bound indices result in safe but unspecified behavior, which may
319
+ include raising an error.
320
+ max_norm: If not `None`, each embedding is clipped if its l2-norm is larger
321
+ than this value.
322
+
323
+ Returns:
324
+ A `Tensor` or a 'RaggedTensor', depending on the input, with the same type
325
+ as the tensors in `params`.
326
+
327
+ Raises:
328
+ ValueError: If `params` is empty.
329
+ """
330
+
331
+ """
332
+ **Behavior Difference between CPU and GPU**
333
+
334
+ Please note that when using `tf.nn.embedding_lookup` on a GPU, if an out-of-bound
335
+ index is encountered, a value of 0 will be stored in the corresponding output value.
336
+ On the other hand, when using `tf.nn.embedding_lookup` on a CPU, an error will be
337
+ returned if an out-of-bound index is found.
338
+
339
+ This behavior difference can impact the results of your computation, especially when
340
+ dealing with indices that may go beyond the bounds of the tensor.
341
+ Make sure to be mindful of this distinction when using the `tf.nn.embedding_lookup`
342
+ function in your computations.
343
+
344
+ **Usage Example**
345
+
346
+ Here's an example demonstrating how to use `tf.nn.embedding_lookup`:
347
+
348
+ ```python
349
+ import tensorflow as tf
350
+
351
+ # Example embedding matrix and indices
352
+ embedding_matrix = tf.constant([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]])
353
+ indices = tf.constant([1, 0, 2])
354
+
355
+ # Perform embedding lookup
356
+ embeddings = tf.nn.embedding_lookup(embedding_matrix, indices)
357
+
358
+ # Print the result
359
+ print("Embeddings:")
360
+ print(embeddings.numpy())
361
+ ```
362
+ """
363
+
364
+ return _embedding_lookup_and_transform(
365
+ params=params,
366
+ ids=ids,
367
+ partition_strategy=partition_strategy,
368
+ name=name,
369
+ max_norm=max_norm,
370
+ transform_fn=None)
371
+
372
+
373
+ @tf_export("nn.embedding_lookup", v1=[])
374
+ @dispatch.add_dispatch_support
375
+ def embedding_lookup_v2(params, ids, max_norm=None, name=None):
376
+ """Looks up embeddings for the given `ids` from a list of tensors.
377
+
378
+ This function is used to perform parallel lookups on the list of tensors in
379
+ `params`. It is a generalization of `tf.gather`, where `params` is
380
+ interpreted as a partitioning of a large embedding tensor.
381
+
382
+ If `len(params) > 1`, each element `id` of `ids` is partitioned between the
383
+ elements of `params` according to the "div" partition strategy, which means we
384
+ assign ids to partitions in a contiguous manner. For instance, 13 ids are
385
+ split across 5 partitions as:
386
+ `[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`.
387
+
388
+ If the id space does not evenly divide the number of partitions, each of the
389
+ first `(max_id + 1) % len(params)` partitions will be assigned one more id.
390
+
391
+ The results of the lookup are concatenated into a dense
392
+ tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`.
393
+
394
+ Args:
395
+ params: A single tensor representing the complete embedding tensor, or a
396
+ list of tensors all of same shape except for the first dimension,
397
+ representing sharded embedding tensors following "div" partition strategy.
398
+ ids: A `Tensor` with type `int32` or `int64` containing the ids to be looked
399
+ up in `params`.
400
+ max_norm: If not `None`, each embedding is clipped if its l2-norm is larger
401
+ than this value.
402
+ name: A name for the operation (optional).
403
+
404
+ Returns:
405
+ A `Tensor` with the same type as the tensors in `params`.
406
+
407
+ For instance, if `params` is a 5x2 matrix:
408
+
409
+ ```python
410
+ [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
411
+ ```
412
+
413
+ or a list of matrices:
414
+
415
+ ```python
416
+ params[0]: [[1, 2], [3, 4]]
417
+ params[1]: [[5, 6], [7, 8]]
418
+ params[2]: [[9, 10]]
419
+ ```
420
+
421
+ and `ids` is:
422
+
423
+ ```python
424
+ [0, 3, 4]
425
+ ```
426
+
427
+ The output will be a 3x2 matrix:
428
+
429
+ ```python
430
+ [[1, 2], [7, 8], [9, 10]]
431
+ ```
432
+
433
+ Raises:
434
+ ValueError: If `params` is empty.
435
+ """
436
+ return embedding_lookup(params, ids, "div", name, max_norm=max_norm)
437
+
438
+
439
+ @tf_export(v1=["nn.embedding_lookup_sparse"])
440
+ @dispatch.add_dispatch_support
441
+ def embedding_lookup_sparse(
442
+ params,
443
+ sp_ids,
444
+ sp_weights,
445
+ partition_strategy="mod",
446
+ name=None,
447
+ combiner=None,
448
+ max_norm=None,
449
+ allow_fast_lookup=False,
450
+ ):
451
+ """Looks up embeddings for the given ids and weights from a list of tensors.
452
+
453
+ This op assumes that there is at least one id for each row in the dense tensor
454
+ represented by sp_ids (i.e. there are no rows with empty features), and that
455
+ all the indices of sp_ids are in canonical row-major order.
456
+
457
+ `sp_ids` and `sp_weights` (if not None) are `SparseTensor`s or `RaggedTensor`s
458
+ with rank of 2. For `SpareTensor`s with left-aligned non-zero entries which
459
+ can be described as `RaggedTensor`s, use of `RaggedTensor`s can yield higher
460
+ performance.
461
+
462
+ It also assumes that all id values lie in the range [0, p0), where p0
463
+ is the sum of the size of params along dimension 0.
464
+
465
+ Args:
466
+ params: A single tensor representing the complete embedding tensor, or a
467
+ list tensors all of same shape except for the first dimension,
468
+ representing sharded embedding tensors. Alternatively, a
469
+ `PartitionedVariable`, created by partitioning along dimension 0. Each
470
+ element must be appropriately sized for the given `partition_strategy`.
471
+ sp_ids: N x M `SparseTensor` of int64 ids where N is typically batch size
472
+ and M is arbitrary or a `RaggedTensor` with rank 2.
473
+ sparse_weights: `SparseTensor` or `RaggedTensor` of same type and shape as
474
+ `sparse_ids`, containing float / double weights corresponding to
475
+ `sparse_ids`, or `None` if all weights are assumed to be 1.0.
476
+ partition_strategy: A string specifying the partitioning strategy, relevant
477
+ if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
478
+ is `"mod"`. See `tf.nn.embedding_lookup` for more details.
479
+ name: Optional name for the op.
480
+ combiner: A string specifying the reduction op. Currently "mean", "sqrtn"
481
+ and "sum" are supported. "sum" computes the weighted sum of the embedding
482
+ results for each row. "mean" is the weighted sum divided by the total
483
+ weight. "sqrtn" is the weighted sum divided by the square root of the sum
484
+ of the squares of the weights. Defaults to `mean`.
485
+ max_norm: If not `None`, each embedding is clipped if its l2-norm is larger
486
+ than this value, before combining.
487
+ allow_fast_lookup: An optional boolean specifying whether to allow
488
+ simplified embedding lookups when `params` is a single tensor and
489
+ `max_norm` is `None`. Setting this flag to `True` during training can
490
+ cause the use of dense gradients with increased memory footprint.
491
+
492
+ Returns:
493
+ A dense tensor representing the combined embeddings for the
494
+ sparse ids. For each row in the dense tensor represented by `sp_ids`, the op
495
+ looks up the embeddings for all ids in that row, multiplies them by the
496
+ corresponding weight, and combines these embeddings as specified.
497
+
498
+ In other words, if
499
+
500
+ `shape(combined params) = [p0, p1, ..., pm]`
501
+
502
+ and
503
+
504
+ `shape(sp_ids) = shape(sp_weights) = [d0, d1]`
505
+
506
+ then
507
+
508
+ `shape(output) = [d0, p1, ..., pm]`.
509
+
510
+ For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are
511
+
512
+ ```python
513
+ [0, 0]: id 1, weight 2.0
514
+ [0, 1]: id 3, weight 0.5
515
+ [1, 0]: id 0, weight 1.0
516
+ [2, 3]: id 1, weight 3.0
517
+ ```
518
+
519
+ with `combiner`="mean", then the output will be a 3x20 matrix where
520
+
521
+ ```python
522
+ output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5)
523
+ output[1, :] = (params[0, :] * 1.0) / 1.0
524
+ output[2, :] = (params[1, :] * 3.0) / 3.0
525
+ ```
526
+
527
+ Raises:
528
+ TypeError: If `sp_ids` is not a `SparseTensor` or `RaggedTensor`, or if
529
+ `sp_weights` is neither `None` nor of the same type as `sp_ids`.
530
+ ValueError: If `combiner` is not one of {"mean", "sqrtn", "sum"}.
531
+ """
532
+ if combiner is None:
533
+ combiner = "mean"
534
+ if combiner not in ("mean", "sqrtn", "sum"):
535
+ raise ValueError(
536
+ f"combiner must be one of 'mean', 'sqrtn' or 'sum', got {combiner}")
537
+ if isinstance(params, variables.PartitionedVariable):
538
+ params = list(params) # Iterate to get the underlying Variables.
539
+ if not isinstance(params, list):
540
+ params = [params]
541
+ if not isinstance(sp_ids, sparse_tensor.SparseTensor):
542
+ raise TypeError(f"sp_ids must be SparseTensor, got {type(sp_ids)}")
543
+ ignore_weights = sp_weights is None
544
+ if not ignore_weights:
545
+ if not isinstance(sp_weights, sparse_tensor.SparseTensor):
546
+ raise TypeError(f"sp_weights must be either None or SparseTensor,"
547
+ f"got {type(sp_weights)}")
548
+ sp_ids.values.get_shape().assert_is_compatible_with(
549
+ sp_weights.values.get_shape())
550
+ sp_ids.indices.get_shape().assert_is_compatible_with(
551
+ sp_weights.indices.get_shape())
552
+ sp_ids.dense_shape.get_shape().assert_is_compatible_with(
553
+ sp_weights.dense_shape.get_shape())
554
+ # TODO(yleon): Add enhanced node assertions to verify that sp_ids and
555
+ # sp_weights have equal indices and shapes.
556
+
557
+ with ops.name_scope(name, "embedding_lookup_sparse",
558
+ params + [sp_ids]) as name:
559
+
560
+ segment_ids = sp_ids.indices[:, 0]
561
+ ids = sp_ids.values
562
+
563
+ return embedding_lookup_sparse_impl(
564
+ params,
565
+ segment_ids,
566
+ sp_weights,
567
+ ids,
568
+ combiner,
569
+ ignore_weights,
570
+ max_norm,
571
+ allow_fast_lookup,
572
+ partition_strategy,
573
+ name,
574
+ )
575
+
576
+
577
+ @tf_export("nn.embedding_lookup_sparse", v1=[])
578
+ @dispatch.add_dispatch_support
579
+ def embedding_lookup_sparse_v2(
580
+ params,
581
+ sp_ids,
582
+ sp_weights,
583
+ combiner=None,
584
+ max_norm=None,
585
+ name=None,
586
+ allow_fast_lookup=False,
587
+ ):
588
+ """Looks up embeddings for the given ids and weights from a list of tensors.
589
+
590
+ `params` is a dense tensor or a list of dense tensors, and `sp_ids` is a 2D
591
+ `tf.SparseTensor` or `tf.RaggedTensor` indicating the indices of `params` to
592
+ gather.
593
+
594
+ This op is best described with an example. Suppose `params` is an embedding
595
+ table of size `(4, 2)` and `sp_ids` has 3 rows. Since `sp_ids` is sparse or
596
+ ragged, not every row has the same number of elements. The output has shape
597
+ (3, 2). Each row of `sp_ids` is a list of indices, where each index selects a
598
+ row of `params`. For a given row of `sp_ids`, the rows of `params` are
599
+ gathered based on the indices in `sp_ids`, then combined by taking their sum
600
+ or mean.
601
+
602
+ >>> params = tf.constant([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=tf.float32)
603
+ >>> sp_ids = tf.SparseTensor(indices=[[0, 0], [0, 1], [1, 0], [2, 0]],
604
+ ... values=[0, 1, 3, 2], dense_shape=(3, 2))
605
+ >>> tf.nn.embedding_lookup_sparse(params, sp_ids, sp_weights=None,
606
+ ... combiner='sum').numpy()
607
+ array([[4., 6.], [7., 8.], [5., 6.]], dtype=float32)
608
+
609
+ In this example, `sp_ids` has 3 rows, so the output has 3 rows. Row 0 of
610
+ `sp_ids` has values 0 and 1, so it selects rows 0 and 1 from `params`, which
611
+ are `[1, 2]` and `[3, 4]`. The rows are summed since `combiner='sum'`,
612
+ resulting in the output row of `[4, 6]`.
613
+
614
+ Since row 1 and 2 of `sp_ids` only have one value each, they simply select the
615
+ corresponding row from `params` as the output row. Row 1 has value `3` so
616
+ it selects the `params` elements `[7, 8]` and row 2 has the value 2 so it
617
+ selects the `params` elements `[5, 6]`.
618
+
619
+ If `sparse_weights` is specified, it must have the same shape as `sp_ids`.
620
+ `sparse_weights` is used to assign a weight to each slice of `params`. For
621
+ example:
622
+
623
+ >>> params = tf.constant([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=tf.float32)
624
+ >>> sp_ids = tf.SparseTensor(indices=[[0, 0], [0, 1], [1, 0], [2, 0]],
625
+ ... values=[0, 1, 3, 2], dense_shape=(3, 2))
626
+ >>> sparse_weights = tf.SparseTensor(indices=[[0, 0], [0, 1], [1, 0], [2, 0]],
627
+ ... values=[0.1, 1.0, 0.5, 2.0],
628
+ ... dense_shape=(3, 2))
629
+ >>> tf.nn.embedding_lookup_sparse(params, sp_ids, sp_weights=sparse_weights,
630
+ ... combiner='sum').numpy()
631
+ array([[3.1, 4.2], [3.5, 4.], [10., 12.]], dtype=float32)
632
+
633
+ In general, `params` can have shape `(p0, ..., pn)` and `sp_ids` can have `M`
634
+ rows, where each row can have any number of elements. The output has shape
635
+ `(M, p1, ..., pn)`. Each slice of the output `output[i, ...]` is obtained as
636
+ follows: The `combiner` argument is used to combine the values
637
+ `params[sp_ids[i, j], ...] * sparse_weights[i, j]` for each `j` in `range(0,
638
+ len(sp_ids[i]))`, e.g. by taking the sum or mean of the values.
639
+
640
+ This op assumes that there is at least one id for each row in the dense tensor
641
+ represented by sp_ids (i.e. there are no rows with empty features), and that
642
+ all the indices of sp_ids are in canonical row-major order.
643
+
644
+ `sp_ids` and `sp_weights` (if not None) are `SparseTensor`s or `RaggedTensor`s
645
+ with rank of 2. For `SpareTensor`s with left-aligned non-zero entries which
646
+ can be described as `RaggedTensor`s, use of `RaggedTensor`s can yield higher
647
+ performance.
648
+
649
+ This op assumes that all id values lie in the range [0, p0), where p0
650
+ is `params.shape[0]`. If you want a version of this op that prunes id values
651
+ less than 0, see `tf.nn.safe_embedding_lookup_sparse`
652
+
653
+ If `len(params) > 1`, each element of `sp_ids` is partitioned between the
654
+ elements of `params` according to the "div" partition strategy, which means we
655
+ assign ids to partitions in a contiguous manner. For instance, 13 ids are
656
+ split across 5 partitions as:
657
+ `[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`.
658
+
659
+ If the id space does not evenly divide the number of partitions, each of the
660
+ first `(max_id + 1) % len(params)` partitions will be assigned one more id.
661
+
662
+ Args:
663
+ params: A single tensor representing the complete embedding tensor, or a
664
+ list of tensors all of same shape except for the first dimension,
665
+ representing sharded embedding tensors following "div" partition strategy.
666
+ sp_ids: N x M `SparseTensor` of int64 ids where N is typically batch size
667
+ and M is arbitrary or a `RaggedTensor` with rank 2.
668
+ sparse_weights: `SparseTensor` or `RaggedTensor` of same type and shape as
669
+ `sparse_ids`, containing float / double weights corresponding to
670
+ `sparse_ids`, or `None` if all weights are assumed to be 1.0.
671
+ combiner: A string specifying the reduction op. Currently "mean", "sqrtn"
672
+ and "sum" are supported. "sum" computes the weighted sum of the embedding
673
+ results for each row. "mean" is the weighted sum divided by the total
674
+ weight. "sqrtn" is the weighted sum divided by the square root of the sum
675
+ of the squares of the weights. Defaults to `mean`.
676
+ max_norm: If not `None`, each embedding is clipped if its l2-norm is larger
677
+ than this value, before combining.
678
+ name: Optional name for the op.
679
+ allow_fast_lookup: An optional boolean specifying whether to allow
680
+ simplified embedding lookups when `params` is a single tensor and
681
+ `max_norm` is `None`. Setting this flag to `True` during training can
682
+ cause the use of dense gradients with increased memory footprint.
683
+
684
+ Returns:
685
+ A dense tensor representing the combined embeddings for the
686
+ sparse ids. For each row in the dense tensor represented by `sp_ids`, the op
687
+ looks up the embeddings for all ids in that row, multiplies them by the
688
+ corresponding weight, and combines these embeddings as specified.
689
+
690
+ In other words, if
691
+
692
+ `shape(combined params) = [p0, p1, ..., pm]`
693
+
694
+ and
695
+
696
+ `shape(sp_ids) = shape(sp_weights) = [d0, d1]`
697
+
698
+ then
699
+
700
+ `shape(output) = [d0, p1, ..., pm]`.
701
+
702
+ For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are
703
+
704
+ ```python
705
+ [0, 0]: id 1, weight 2.0
706
+ [0, 1]: id 3, weight 0.5
707
+ [1, 0]: id 0, weight 1.0
708
+ [2, 3]: id 1, weight 3.0
709
+ ```
710
+
711
+ with `combiner`="mean", then the output will be a 3x20 matrix where
712
+
713
+ ```python
714
+ output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5)
715
+ output[1, :] = (params[0, :] * 1.0) / 1.0
716
+ output[2, :] = (params[1, :] * 3.0) / 3.0
717
+ ```
718
+
719
+ Raises:
720
+ TypeError: If `sp_ids` is not a `SparseTensor`, or if `sp_weights` is
721
+ neither `None` nor `SparseTensor`.
722
+ ValueError: If `combiner` is not one of {"mean", "sqrtn", "sum"}.
723
+ """
724
+ return embedding_lookup_sparse(
725
+ params,
726
+ sp_ids,
727
+ sp_weights,
728
+ "div",
729
+ name,
730
+ combiner,
731
+ max_norm,
732
+ allow_fast_lookup,
733
+ )
734
+
735
+
736
+ @tf_export("nn.safe_embedding_lookup_sparse", v1=[])
737
+ @dispatch.add_dispatch_support
738
+ def safe_embedding_lookup_sparse_v2(
739
+ embedding_weights,
740
+ sparse_ids,
741
+ sparse_weights=None,
742
+ combiner="mean",
743
+ default_id=None,
744
+ max_norm=None,
745
+ name=None,
746
+ allow_fast_lookup=False,
747
+ ):
748
+ """Lookup embedding results, accounting for invalid IDs and empty features.
749
+
750
+ The partitioned embedding in `embedding_weights` must all be the same shape
751
+ except for the first dimension. The first dimension is allowed to vary as the
752
+ vocabulary size is not necessarily a multiple of num of shards.
753
+
754
+ This is similar to `tf.nn.embedding_lookup_sparse`, except invalid IDs (< 0)
755
+ are pruned from input IDs and weights, as well as any IDs with non-positive
756
+ weight. For an entry with no features, the embedding vector for `default_id`
757
+ is returned, or the 0-vector if `default_id` is not supplied. See
758
+ `tf.nn.embedding_lookup_sparse` for more information on how sparse embedding
759
+ lookups work in general.
760
+
761
+ The ids and weights may be multi-dimensional `SparseTensor`s or
762
+ `RaggedTensor`s with rank of 2. For `SpareTensor`s with left-aligned non-zero
763
+ entries which can be described as `RaggedTensor`s, use of `RaggedTensor`s can
764
+ yield higher performance.
765
+
766
+ If `len(embedding_weights) > 1`, each element `id` of `ids` is partitioned
767
+ between the elements of `embedding_weights` according to the "div" partition
768
+ strategy, which means we assign ids to partitions in a contiguous manner. For
769
+ instance, 13 ids are split across 5 partitions as:
770
+ `[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`.
771
+
772
+ If the id space does not evenly divide the number of partitions, each of the
773
+ first `(max_id + 1) % len(embedding_weights)` partitions will be assigned one
774
+ more id.
775
+
776
+ Args:
777
+ embedding_weights: A single tensor representing the complete embedding
778
+ tensor, or a list of tensors all of same shape except for the first
779
+ dimension, representing sharded embedding tensors following "div"
780
+ partition strategy.
781
+ sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
782
+ ids, where `d_0` is typically batch size, or a `RaggedTensor` with rank 2.
783
+ sparse_weights: `SparseTensor` or `RaggedTensor` of same type and shape as
784
+ `sparse_ids`, containing float weights corresponding to `sparse_ids`, or
785
+ `None` if all weights are assumed to be 1.0.
786
+ combiner: A string specifying how to combine embedding results for each
787
+ entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the
788
+ default.
789
+ default_id: The id to use for an entry with no features. Defaults to
790
+ 0-vector.
791
+ max_norm: If not `None`, all embeddings are l2-normalized to max_norm before
792
+ combining.
793
+ name: A name for this operation (optional).
794
+ allow_fast_lookup: An optional boolean specifying whether to allow
795
+ simplified embedding lookups when `params` is a single tensor and
796
+ `max_norm` is `None`. Setting this flag to `True` during training can
797
+ cause the use of dense gradients with increased memory footprint.
798
+
799
+ Returns:
800
+ A dense tensor representing the combined embeddings for the
801
+ sparse ids. For each row in the dense tensor represented by `sparse_ids`,
802
+ the op looks up the embeddings for all ids in that row, multiplies them by
803
+ the corresponding weight, and combines these embeddings as specified.
804
+
805
+ In other words, if
806
+
807
+ `shape(combined embedding_weights) = [p0, p1, ..., pm]`
808
+
809
+ and
810
+
811
+ `shape(sparse_ids) = shape(sparse_weights) = [d0, d1, ..., dn]`
812
+
813
+ then
814
+
815
+ `shape(output) = [d0, d1, ... dn-1, p1, ..., pm]`.
816
+
817
+ For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are
818
+
819
+ ```python
820
+ [0, 0]: id 1, weight 2.0
821
+ [0, 1]: id 3, weight 0.5
822
+ [1, 0]: id -1, weight 1.0
823
+ [2, 3]: id 1, weight 3.0
824
+ ```
825
+
826
+ `default_id` is 0.
827
+
828
+ with `combiner`="mean", then the output will be a 3x20 matrix where
829
+
830
+ ```python
831
+ output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5)
832
+ output[1, :] = (params[0, :] * 1.0) / 1.0
833
+ output[2, :] = (params[1, :] * 3.0) / 3.0
834
+ ```
835
+
836
+ Raises:
837
+ ValueError: if `embedding_weights` is empty.
838
+ """
839
+ return safe_embedding_lookup_sparse(
840
+ embedding_weights,
841
+ sparse_ids,
842
+ sparse_weights=sparse_weights,
843
+ combiner=combiner,
844
+ default_id=default_id,
845
+ name=name,
846
+ partition_strategy="div",
847
+ max_norm=max_norm,
848
+ allow_fast_lookup=allow_fast_lookup,
849
+ )
850
+
851
+
852
+ @tf_export(v1=["nn.safe_embedding_lookup_sparse"])
853
+ @dispatch.add_dispatch_support
854
+ def safe_embedding_lookup_sparse(
855
+ embedding_weights,
856
+ sparse_ids,
857
+ sparse_weights=None,
858
+ combiner="mean",
859
+ default_id=None,
860
+ name=None,
861
+ partition_strategy="div",
862
+ max_norm=None,
863
+ allow_fast_lookup=False,
864
+ ):
865
+ """Lookup embedding results, accounting for invalid IDs and empty features.
866
+
867
+ The partitioned embedding in `embedding_weights` must all be the same shape
868
+ except for the first dimension. The first dimension is allowed to vary as the
869
+ vocabulary size is not necessarily a multiple of `P`. `embedding_weights`
870
+ may be a `PartitionedVariable` as returned by using
871
+ `tf.compat.v1.get_variable()` with a
872
+ partitioner.
873
+
874
+ Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
875
+ with non-positive weight. For an entry with no features, the embedding vector
876
+ for `default_id` is returned, or the 0-vector if `default_id` is not supplied.
877
+
878
+ The ids and weights may be multi-dimensional `SparseTensor`s or
879
+ `RaggedTensor`s with rank of 2. For `SpareTensor`s with left-aligned non-zero
880
+ entries which can be described as `RaggedTensor`s, use of `RaggedTensor`s can
881
+ yield higher performance. Embeddings are always aggregated along the last
882
+ dimension.
883
+
884
+ Args:
885
+ embedding_weights: A single tensor representing the complete embedding
886
+ tensor, or a list tensors all of same shape except for the first
887
+ dimension, representing sharded embedding tensors. Alternatively, a
888
+ `PartitionedVariable`, created by partitioning along dimension 0. Each
889
+ element must be appropriately sized for the given `partition_strategy`.
890
+ sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
891
+ ids, where `d_0` is typically batch size, or a `RaggedTensor` with rank 2.
892
+ sparse_weights: `SparseTensor` or `RaggedTensor` of same type and shape as
893
+ `sparse_ids`, containing float weights corresponding to `sparse_ids`, or
894
+ `None` if all weights are assumed to be 1.0.
895
+ combiner: A string specifying how to combine embedding results for each
896
+ entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the
897
+ default.
898
+ default_id: The id to use for an entry with no features.
899
+ name: A name for this operation (optional).
900
+ partition_strategy: A string specifying the partitioning strategy. Currently
901
+ `"div"` and `"mod"` are supported. Default is `"div"`.
902
+ max_norm: If not `None`, all embeddings are l2-normalized to max_norm before
903
+ combining.
904
+ allow_fast_lookup: An optional boolean specifying whether to allow
905
+ simplified embedding lookups when `params` is a single tensor and
906
+ `max_norm` is `None`. Setting this flag to `True` during training can
907
+ cause the use of dense gradients with increased memory footprint.
908
+
909
+ Returns:
910
+ A dense tensor representing the combined embeddings for the
911
+ sparse ids. For each row in the dense tensor represented by `sp_ids`, the op
912
+ looks up the embeddings for all ids in that row, multiplies them by the
913
+ corresponding weight, and combines these embeddings as specified.
914
+
915
+ In other words, if
916
+
917
+ `shape(combined embedding_weights) = [p0, p1, ..., pm]`
918
+
919
+ and
920
+
921
+ `shape(sparse_ids) = shape(sparse_weights) = [d0, d1, ..., dn]`
922
+
923
+ then
924
+
925
+ `shape(output) = [d0, d1, ... dn-1, p1, ..., pm]`.
926
+
927
+ For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are
928
+
929
+ ```python
930
+ [0, 0]: id 1, weight 2.0
931
+ [0, 1]: id 3, weight 0.5
932
+ [1, 0]: id -1, weight 1.0
933
+ [2, 3]: id 1, weight 3.0
934
+ ```
935
+
936
+ `default_id` is 0.
937
+
938
+ with `combiner`="mean", then the output will be a 3x20 matrix where
939
+
940
+ ```python
941
+ output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5)
942
+ output[1, :] = (params[0, :] * 1.0) / 1.0
943
+ output[2, :] = (params[1, :] * 3.0) / 3.0
944
+ ```
945
+
946
+ Raises:
947
+ ValueError: if `embedding_weights` is empty.
948
+ """
949
+ if embedding_weights is None:
950
+ raise ValueError(f"Missing embedding_weights {embedding_weights}.")
951
+ if isinstance(embedding_weights, variables.PartitionedVariable):
952
+ embedding_weights = list(embedding_weights) # get underlying Variables.
953
+ if not isinstance(embedding_weights, list):
954
+ embedding_weights = [embedding_weights]
955
+ if len(embedding_weights) < 1:
956
+ raise ValueError(f"Missing embedding_weights {embedding_weights}.")
957
+
958
+ dtype = sparse_weights.dtype if sparse_weights is not None else None
959
+ embedding_weights = [
960
+ w if (resource_variable_ops.is_resource_variable(w)
961
+ and dtype in (None, w.dtype))
962
+ else ops.convert_to_tensor(w, dtype=dtype)
963
+ for w in embedding_weights
964
+ ]
965
+
966
+ with ops.name_scope(name, "embedding_lookup", embedding_weights +
967
+ [sparse_ids, sparse_weights]) as scope:
968
+ # Reshape higher-rank sparse ids and weights to linear segment ids.
969
+ original_shape = sparse_ids.dense_shape
970
+ original_rank_dim = tensor_shape.dimension_value(
971
+ sparse_ids.dense_shape.get_shape()[0])
972
+ original_rank = (
973
+ array_ops.size(original_shape)
974
+ if original_rank_dim is None else original_rank_dim)
975
+ sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [
976
+ math_ops.reduce_prod(
977
+ array_ops.slice(original_shape, [0], [original_rank - 1])),
978
+ array_ops.gather(original_shape, original_rank - 1)
979
+ ])
980
+ if sparse_weights is not None:
981
+ sparse_weights = sparse_tensor.SparseTensor(sparse_ids.indices,
982
+ sparse_weights.values,
983
+ sparse_ids.dense_shape)
984
+
985
+ # Prune invalid ids and weights.
986
+ sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights)
987
+ if combiner != "sum":
988
+ sparse_ids, sparse_weights = _prune_invalid_weights(
989
+ sparse_ids, sparse_weights)
990
+
991
+ # Fill in dummy values for empty features, if necessary.
992
+ sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(
993
+ sparse_ids, default_id or 0)
994
+ if sparse_weights is not None:
995
+ sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0)
996
+
997
+ result = embedding_lookup_sparse(
998
+ embedding_weights,
999
+ sparse_ids,
1000
+ sparse_weights,
1001
+ combiner=combiner,
1002
+ partition_strategy=partition_strategy,
1003
+ name=None if default_id is None else scope,
1004
+ max_norm=max_norm,
1005
+ allow_fast_lookup=allow_fast_lookup,
1006
+ )
1007
+
1008
+ if default_id is None:
1009
+ # Broadcast is_row_empty to the same shape as embedding_lookup_result,
1010
+ # for use in Select.
1011
+ is_row_empty = array_ops.tile(
1012
+ array_ops.reshape(is_row_empty, [-1, 1]),
1013
+ array_ops_stack.stack([1, array_ops.shape(result)[1]]))
1014
+
1015
+ result = array_ops.where(
1016
+ is_row_empty, array_ops.zeros_like(result), result, name=scope)
1017
+
1018
+ # Reshape back from linear ids back into higher-dimensional dense result.
1019
+ final_result = array_ops.reshape(
1020
+ result,
1021
+ array_ops.concat([
1022
+ array_ops.slice(
1023
+ math_ops.cast(original_shape, dtypes.int32), [0],
1024
+ [original_rank - 1]),
1025
+ array_ops.slice(array_ops.shape(result), [1], [-1])
1026
+ ], 0))
1027
+ final_result.set_shape(
1028
+ tensor_shape.unknown_shape(
1029
+ (tensor_shape.Dimension(original_rank_dim) - 1).value
1030
+ ).concatenate(result.get_shape()[1:])
1031
+ )
1032
+ return final_result
1033
+
1034
+
1035
+ def embedding_lookup_sparse_impl(
1036
+ params,
1037
+ segment_ids,
1038
+ sp_weights,
1039
+ ids,
1040
+ combiner,
1041
+ ignore_weights,
1042
+ max_norm,
1043
+ allow_fast_lookup,
1044
+ partition_strategy,
1045
+ name,
1046
+ ):
1047
+ """Implementation of sparse embedding aggregation."""
1048
+ need_sparse_segment_gradient = False
1049
+ # Ensure we can query the devices below.
1050
+ segment_ids = ops.convert_to_tensor(segment_ids, name="segment_ids")
1051
+ if len(params) == 1 and not isinstance(
1052
+ params[0], (core.Tensor, composite_tensor.CompositeTensor)
1053
+ ):
1054
+ params = [ops.convert_to_tensor(params[0], name="params")]
1055
+ # Note that if the params are on a different device (e.g., CPU), we must use
1056
+ # embedding_lookup() so that the gather operation is colocated with them.
1057
+ if (
1058
+ len(params) == 1
1059
+ and not isinstance(params[0], composite_tensor.CompositeTensor)
1060
+ and params[0].device == segment_ids.device
1061
+ and max_norm is None
1062
+ and (
1063
+ allow_fast_lookup
1064
+ or (ignore_weights and compat.forward_compatible(2023, 9, 26))
1065
+ )
1066
+ ):
1067
+ idx = ids
1068
+ embeddings = params[0]
1069
+ if isinstance(embeddings, resource_variable_ops.BaseResourceVariable):
1070
+ # Avoid a redundant copy due to copy-on-read semantics for
1071
+ # sparsely-updated variables.
1072
+ embeddings = embeddings.read_value_no_copy()
1073
+ if not allow_fast_lookup:
1074
+ need_sparse_segment_gradient = True
1075
+ else:
1076
+ ids, idx = array_ops.unique(ids)
1077
+ embeddings = embedding_lookup(
1078
+ params, ids, partition_strategy=partition_strategy, max_norm=max_norm
1079
+ )
1080
+
1081
+ if not ignore_weights:
1082
+ if segment_ids.dtype != dtypes.int32:
1083
+ segment_ids = math_ops.cast(segment_ids, dtypes.int32)
1084
+
1085
+ weights = sp_weights.values
1086
+ embeddings = array_ops.gather(embeddings, idx)
1087
+
1088
+ original_dtype = embeddings.dtype
1089
+ if embeddings.dtype in (dtypes.float16, dtypes.bfloat16):
1090
+ # Cast low-precision embeddings to float32 during the computation to
1091
+ # avoid numerical issues.
1092
+ embeddings = math_ops.cast(embeddings, dtypes.float32)
1093
+ if weights.dtype != embeddings.dtype:
1094
+ weights = math_ops.cast(weights, embeddings.dtype)
1095
+
1096
+ # Reshape weights to allow broadcast
1097
+ ones_shape = array_ops.expand_dims(array_ops.rank(embeddings) - 1, 0)
1098
+ ones = array_ops.ones(ones_shape, dtype=dtypes.int32)
1099
+ bcast_weights_shape = array_ops.concat([array_ops.shape(weights), ones], 0)
1100
+
1101
+ orig_weights_shape = weights.get_shape()
1102
+ weights = array_ops.reshape(weights, bcast_weights_shape)
1103
+
1104
+ # Set the weight shape, since after reshaping to bcast_weights_shape,
1105
+ # the shape becomes None.
1106
+ if embeddings.get_shape().ndims is not None:
1107
+ weights.set_shape(
1108
+ orig_weights_shape.concatenate(
1109
+ [1 for _ in range(embeddings.get_shape().ndims - 1)]
1110
+ )
1111
+ )
1112
+
1113
+ embeddings *= weights
1114
+
1115
+ if combiner == "sum":
1116
+ embeddings = math_ops.segment_sum(embeddings, segment_ids, name=name)
1117
+ elif combiner == "mean":
1118
+ embeddings = math_ops.segment_sum(embeddings, segment_ids)
1119
+ weight_sum = math_ops.segment_sum(weights, segment_ids)
1120
+ embeddings = math_ops.div_no_nan(embeddings, weight_sum, name=name)
1121
+ elif combiner == "sqrtn":
1122
+ embeddings = math_ops.segment_sum(embeddings, segment_ids)
1123
+ weights_squared = math_ops.pow(weights, 2)
1124
+ weight_sum = math_ops.segment_sum(weights_squared, segment_ids)
1125
+ weight_sum_sqrt = math_ops.sqrt(weight_sum)
1126
+ embeddings = math_ops.div_no_nan(embeddings, weight_sum_sqrt, name=name)
1127
+ else:
1128
+ assert False, "Unrecognized combiner"
1129
+ if embeddings.dtype != original_dtype:
1130
+ embeddings = math_ops.cast(embeddings, original_dtype)
1131
+ else:
1132
+ if segment_ids.dtype not in (dtypes.int32, dtypes.int64):
1133
+ segment_ids = math_ops.cast(segment_ids, dtypes.int32)
1134
+ assert idx is not None
1135
+ if combiner == "sum":
1136
+ embeddings = math_ops.sparse_segment_sum(
1137
+ embeddings,
1138
+ idx,
1139
+ segment_ids,
1140
+ name=name,
1141
+ sparse_gradient=need_sparse_segment_gradient,
1142
+ )
1143
+ elif combiner == "mean":
1144
+ embeddings = math_ops.sparse_segment_mean(
1145
+ embeddings,
1146
+ idx,
1147
+ segment_ids,
1148
+ name=name,
1149
+ sparse_gradient=need_sparse_segment_gradient,
1150
+ )
1151
+ elif combiner == "sqrtn":
1152
+ embeddings = math_ops.sparse_segment_sqrt_n(
1153
+ embeddings,
1154
+ idx,
1155
+ segment_ids,
1156
+ name=name,
1157
+ sparse_gradient=need_sparse_segment_gradient,
1158
+ )
1159
+ else:
1160
+ assert False, "Unrecognized combiner"
1161
+
1162
+ return embeddings
1163
+
1164
+
1165
+ def _prune_invalid_ids(sparse_ids, sparse_weights):
1166
+ """Prune invalid IDs (< 0) from the input ids and weights."""
1167
+ is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
1168
+ if sparse_weights is not None:
1169
+ is_id_valid = math_ops.logical_and(
1170
+ is_id_valid,
1171
+ array_ops.ones_like(sparse_weights.values, dtype=dtypes.bool))
1172
+ sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
1173
+ if sparse_weights is not None:
1174
+ sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
1175
+ return sparse_ids, sparse_weights
1176
+
1177
+
1178
+ def _prune_invalid_weights(sparse_ids, sparse_weights):
1179
+ """Prune invalid weights (< 0) from the input ids and weights."""
1180
+ if sparse_weights is not None:
1181
+ is_weights_valid = math_ops.greater(sparse_weights.values, 0)
1182
+ sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid)
1183
+ sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid)
1184
+ return sparse_ids, sparse_weights