id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
8,962 | import itertools
from typing import List, Tuple
import torch
import torch._prims_common as utils
The provided code snippet includes necessary dependencies for implementing the `fill_defaults` function. Write a Python function `def fill_defaults(args, n, defaults_tail)` to solve the following problem:
__torch_dispatch__ doesn't guarantee the number of arguments you are passed (e.g., defaulted arguments are not passed); but usually it is convenient to pad out the arguments list with defaults. This function helps you do that. Args: args: the list of positional arguments passed to __torch_dispatch__ n: the number of arguments you are expecting to get defaults_tail: default values for the arguments, starting from the end of the list Example: >>> fill_defaults([1, 2, 3], 5, [3, 4, 5]) [1, 2, 3, 4, 5] >>> fill_defaults([1, 2, 3], 5, [None, None, None]) [1, 2, 3, None, None]]
Here is the function:
def fill_defaults(args, n, defaults_tail):
"""
__torch_dispatch__ doesn't guarantee the number of arguments you are
passed (e.g., defaulted arguments are not passed); but usually it is
convenient to pad out the arguments list with defaults. This function
helps you do that.
Args:
args: the list of positional arguments passed to __torch_dispatch__
n: the number of arguments you are expecting to get
defaults_tail: default values for the arguments, starting from the
end of the list
Example:
>>> fill_defaults([1, 2, 3], 5, [3, 4, 5])
[1, 2, 3, 4, 5]
>>> fill_defaults([1, 2, 3], 5, [None, None, None])
[1, 2, 3, None, None]]
"""
if n - len(defaults_tail) > len(args):
raise RuntimeError("not enough defaults to fill arguments")
r = list(args)
for i in range(len(args), n):
r.append(defaults_tail[i - n + len(defaults_tail)])
return r | __torch_dispatch__ doesn't guarantee the number of arguments you are passed (e.g., defaulted arguments are not passed); but usually it is convenient to pad out the arguments list with defaults. This function helps you do that. Args: args: the list of positional arguments passed to __torch_dispatch__ n: the number of arguments you are expecting to get defaults_tail: default values for the arguments, starting from the end of the list Example: >>> fill_defaults([1, 2, 3], 5, [3, 4, 5]) [1, 2, 3, 4, 5] >>> fill_defaults([1, 2, 3], 5, [None, None, None]) [1, 2, 3, None, None]] |
8,963 | import itertools
from typing import List, Tuple
import torch
import torch._prims_common as utils
def find_arg_of_type(it, t):
for x in it:
if isinstance(x, t):
return x
return None | null |
8,964 | import copy
from collections import OrderedDict
from dataclasses import dataclass, field
from typing import (
Any,
cast,
Dict,
Iterator,
List,
Mapping,
Optional,
Set,
Tuple,
Type,
Union,
)
import torch
from torch import nn, Tensor
from torch.nn.modules.module import _IncompatibleKeys
from torch.nn.parallel import DistributedDataParallel
from torchrec.distributed.embedding_sharding import (
EmbeddingSharding,
EmbeddingShardingContext,
EmbeddingShardingInfo,
KJTListSplitsAwaitable,
Multistreamable,
)
from torchrec.distributed.embedding_types import (
BaseEmbeddingSharder,
EmbeddingComputeKernel,
KJTList,
ShardedEmbeddingModule,
)
from torchrec.distributed.sharding.cw_sharding import CwPooledEmbeddingSharding
from torchrec.distributed.sharding.dp_sharding import DpPooledEmbeddingSharding
from torchrec.distributed.sharding.rw_sharding import RwPooledEmbeddingSharding
from torchrec.distributed.sharding.tw_sharding import TwPooledEmbeddingSharding
from torchrec.distributed.sharding.twcw_sharding import TwCwPooledEmbeddingSharding
from torchrec.distributed.sharding.twrw_sharding import TwRwPooledEmbeddingSharding
from torchrec.distributed.types import (
Awaitable,
EmbeddingModuleShardingPlan,
EnumerableShardingSpec,
LazyAwaitable,
NullShardedModuleContext,
ParameterSharding,
QuantizedCommCodecs,
ShardedTensor,
ShardingEnv,
ShardingType,
ShardMetadata,
)
from torchrec.distributed.utils import (
add_params_from_parameter_sharding,
append_prefix,
convert_to_fbgemm_types,
merge_fused_params,
optimizer_type_to_emb_opt_type,
PermutePooledEmbeddings,
)
from torchrec.modules.embedding_configs import (
EmbeddingBagConfig,
EmbeddingTableConfig,
PoolingType,
)
from torchrec.modules.embedding_modules import (
EmbeddingBagCollection,
EmbeddingBagCollectionInterface,
)
from torchrec.optim.fused import EmptyFusedOptimizer, FusedOptimizerModule
from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizer
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor, KeyedTensor
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:index_select_ops")
except OSError:
pass
def _pin_and_move(tensor: torch.Tensor, device: torch.device) -> torch.Tensor:
return (
tensor
if device.type == "cpu"
else tensor.pin_memory().to(device=device, non_blocking=True)
) | null |
8,965 | import copy
from collections import OrderedDict
from dataclasses import dataclass, field
from typing import (
Any,
cast,
Dict,
Iterator,
List,
Mapping,
Optional,
Set,
Tuple,
Type,
Union,
)
import torch
from torch import nn, Tensor
from torch.nn.modules.module import _IncompatibleKeys
from torch.nn.parallel import DistributedDataParallel
from torchrec.distributed.embedding_sharding import (
EmbeddingSharding,
EmbeddingShardingContext,
EmbeddingShardingInfo,
KJTListSplitsAwaitable,
Multistreamable,
)
from torchrec.distributed.embedding_types import (
BaseEmbeddingSharder,
EmbeddingComputeKernel,
KJTList,
ShardedEmbeddingModule,
)
from torchrec.distributed.sharding.cw_sharding import CwPooledEmbeddingSharding
from torchrec.distributed.sharding.dp_sharding import DpPooledEmbeddingSharding
from torchrec.distributed.sharding.rw_sharding import RwPooledEmbeddingSharding
from torchrec.distributed.sharding.tw_sharding import TwPooledEmbeddingSharding
from torchrec.distributed.sharding.twcw_sharding import TwCwPooledEmbeddingSharding
from torchrec.distributed.sharding.twrw_sharding import TwRwPooledEmbeddingSharding
from torchrec.distributed.types import (
Awaitable,
EmbeddingModuleShardingPlan,
EnumerableShardingSpec,
LazyAwaitable,
NullShardedModuleContext,
ParameterSharding,
QuantizedCommCodecs,
ShardedTensor,
ShardingEnv,
ShardingType,
ShardMetadata,
)
from torchrec.distributed.utils import (
add_params_from_parameter_sharding,
append_prefix,
convert_to_fbgemm_types,
merge_fused_params,
optimizer_type_to_emb_opt_type,
PermutePooledEmbeddings,
)
from torchrec.modules.embedding_configs import (
EmbeddingBagConfig,
EmbeddingTableConfig,
PoolingType,
)
from torchrec.modules.embedding_modules import (
EmbeddingBagCollection,
EmbeddingBagCollectionInterface,
)
from torchrec.optim.fused import EmptyFusedOptimizer, FusedOptimizerModule
from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizer
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor, KeyedTensor
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:index_select_ops")
except OSError:
pass
def replace_placement_with_meta_device(
sharding_infos: List[EmbeddingShardingInfo],
) -> None:
"""Placement device and tensor device could be unmatched in some
scenarios, e.g. passing meta device to DMP and passing cuda
to EmbeddingShardingPlanner. We need to make device consistent
after getting sharding planner.
"""
for info in sharding_infos:
sharding_spec = info.param_sharding.sharding_spec
if sharding_spec is None:
continue
if isinstance(sharding_spec, EnumerableShardingSpec):
for shard_metadata in sharding_spec.shards:
placement = shard_metadata.placement
if isinstance(placement, str):
placement = torch.distributed._remote_device(placement)
assert isinstance(placement, torch.distributed._remote_device)
placement._device = torch.device("meta")
shard_metadata.placement = placement
else:
# We only support EnumerableShardingSpec at present.
raise RuntimeError(
f"Unsupported ShardingSpec {type(sharding_spec)} with meta device"
)
class EmbeddingShardingContext(Multistreamable):
# Torch Dynamo does not support default_factory=list:
# https://github.com/pytorch/pytorch/issues/120108
# TODO(ivankobzarev) Make this a dataclass once supported
def __init__(
self,
batch_size_per_rank: Optional[List[int]] = None,
batch_size_per_rank_per_feature: Optional[List[List[int]]] = None,
batch_size_per_feature_pre_a2a: Optional[List[int]] = None,
variable_batch_per_feature: bool = False,
) -> None:
super().__init__()
self.batch_size_per_rank: List[int] = (
batch_size_per_rank if batch_size_per_rank is not None else []
)
self.batch_size_per_rank_per_feature: List[List[int]] = (
batch_size_per_rank_per_feature
if batch_size_per_rank_per_feature is not None
else []
)
self.batch_size_per_feature_pre_a2a: List[int] = (
batch_size_per_feature_pre_a2a
if batch_size_per_feature_pre_a2a is not None
else []
)
self.variable_batch_per_feature: bool = variable_batch_per_feature
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
pass
class EmbeddingSharding(abc.ABC, Generic[C, F, T, W], FeatureShardingMixIn):
"""
Used to implement different sharding types for `EmbeddingBagCollection`, e.g.
table_wise.
"""
def __init__(
self, qcomm_codecs_registry: Optional[Dict[str, QuantizedCommCodecs]] = None
) -> None:
self._qcomm_codecs_registry = qcomm_codecs_registry
def qcomm_codecs_registry(self) -> Optional[Dict[str, QuantizedCommCodecs]]:
return self._qcomm_codecs_registry
def create_input_dist(
self,
device: Optional[torch.device] = None,
) -> BaseSparseFeaturesDist[F]:
pass
def create_output_dist(
self,
device: Optional[torch.device] = None,
) -> BaseEmbeddingDist[C, T, W]:
pass
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup[F, T]:
pass
def embedding_dims(self) -> List[int]:
pass
def embedding_shard_metadata(self) -> List[Optional[ShardMetadata]]:
pass
def embedding_names(self) -> List[str]:
pass
def embedding_names_per_rank(self) -> List[List[str]]:
pass
def embedding_tables(self) -> List[ShardedEmbeddingTable]:
raise NotImplementedError
def uncombined_embedding_dims(self) -> List[int]:
return self.embedding_dims()
def uncombined_embedding_names(self) -> List[str]:
return self.embedding_names()
class EmbeddingShardingInfo:
embedding_config: EmbeddingTableConfig
param_sharding: ParameterSharding
param: torch.Tensor
fused_params: Optional[Dict[str, Any]] = None
class CwPooledEmbeddingSharding(
BaseCwEmbeddingSharding[
EmbeddingShardingContext, KeyedJaggedTensor, torch.Tensor, torch.Tensor
]
):
"""
Shards embedding bags column-wise, i.e.. a given embedding table is partitioned
along its columns and placed on specified ranks.
"""
def create_input_dist(
self,
device: Optional[torch.device] = None,
) -> BaseSparseFeaturesDist[KeyedJaggedTensor]:
assert self._pg is not None
return TwSparseFeaturesDist(
self._pg,
self.features_per_rank(),
)
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup:
return GroupedPooledEmbeddingsLookup(
grouped_configs=self._grouped_embedding_configs,
pg=self._pg,
device=device if device is not None else self._device,
feature_processor=feature_processor,
)
def create_output_dist(
self,
device: Optional[torch.device] = None,
) -> BaseEmbeddingDist[EmbeddingShardingContext, torch.Tensor, torch.Tensor]:
device = device if device is not None else self._device
embedding_permute_op: Optional[PermutePooledEmbeddingsSplit] = None
callbacks: Optional[List[Callable[[torch.Tensor], torch.Tensor]]] = None
if self._permute_embeddings and self._embedding_order != list(
range(len(self._embedding_order))
):
assert len(self._embedding_order) == len(self._embedding_dims)
embedding_permute_op = PermutePooledEmbeddingsSplit(
self._embedding_dims, self._embedding_order, device=device
)
callbacks = [embedding_permute_op]
assert self._pg is not None
return TwPooledEmbeddingDist(
pg=self._pg,
dim_sum_per_rank=self._dim_sum_per_rank(),
emb_dim_per_rank_per_feature=self._emb_dim_per_rank_per_feature(),
device=device,
callbacks=callbacks,
qcomm_codecs_registry=self.qcomm_codecs_registry,
)
class DpPooledEmbeddingSharding(
BaseDpEmbeddingSharding[
EmbeddingShardingContext, KeyedJaggedTensor, torch.Tensor, torch.Tensor
]
):
"""
Shards embedding bags data-parallel, with no table sharding i.e.. a given embedding
table is replicated across all ranks.
"""
def create_input_dist(
self, device: Optional[torch.device] = None
) -> BaseSparseFeaturesDist[KeyedJaggedTensor]:
return DpSparseFeaturesDist()
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup:
return GroupedPooledEmbeddingsLookup(
grouped_configs=self._grouped_embedding_configs,
pg=self._env.process_group,
device=device if device is not None else self._device,
feature_processor=feature_processor,
# For data parallel we need to turn always gradient scaling in for weights
# because get_gradient_scaling from comm_ops only affects model_parallel tables, not DP
scale_weight_gradients=False,
)
def create_output_dist(
self,
device: Optional[torch.device] = None,
) -> BaseEmbeddingDist[EmbeddingShardingContext, torch.Tensor, torch.Tensor]:
return DpPooledEmbeddingDist()
class RwPooledEmbeddingSharding(
BaseRwEmbeddingSharding[
EmbeddingShardingContext, KeyedJaggedTensor, torch.Tensor, torch.Tensor
]
):
"""
Shards embedding bags row-wise, i.e.. a given embedding table is evenly distributed
by rows and table slices are placed on all ranks.
"""
def create_input_dist(
self,
device: Optional[torch.device] = None,
) -> BaseSparseFeaturesDist[KeyedJaggedTensor]:
num_features = self._get_num_features()
feature_hash_sizes = self._get_feature_hash_sizes()
return RwSparseFeaturesDist(
# pyre-fixme[6]: For 1st param expected `ProcessGroup` but got
# `Optional[ProcessGroup]`.
pg=self._pg,
num_features=num_features,
feature_hash_sizes=feature_hash_sizes,
device=device if device is not None else self._device,
is_sequence=False,
has_feature_processor=self._has_feature_processor,
need_pos=self._need_pos,
)
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup:
return GroupedPooledEmbeddingsLookup(
grouped_configs=self._grouped_embedding_configs,
pg=self._pg,
device=device if device is not None else self._device,
feature_processor=feature_processor,
)
def create_output_dist(
self,
device: Optional[torch.device] = None,
) -> BaseEmbeddingDist[EmbeddingShardingContext, torch.Tensor, torch.Tensor]:
return RwPooledEmbeddingDist(
# pyre-fixme[6]: For 1st param expected `ProcessGroup` but got
# `Optional[ProcessGroup]`.
self._pg,
qcomm_codecs_registry=self.qcomm_codecs_registry,
embedding_dims=self.embedding_dims(),
)
class TwPooledEmbeddingSharding(
BaseTwEmbeddingSharding[
EmbeddingShardingContext, KeyedJaggedTensor, torch.Tensor, torch.Tensor
]
):
"""
Shards embedding bags table-wise, i.e.. a given embedding table is entirely placed
on a selected rank.
"""
def create_input_dist(
self,
device: Optional[torch.device] = None,
) -> BaseSparseFeaturesDist[KeyedJaggedTensor]:
assert self._pg is not None
return TwSparseFeaturesDist(
self._pg,
self.features_per_rank(),
)
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup:
return GroupedPooledEmbeddingsLookup(
grouped_configs=self._grouped_embedding_configs,
pg=self._pg,
device=device if device is not None else self._device,
feature_processor=feature_processor,
)
def create_output_dist(
self,
device: Optional[torch.device] = None,
) -> BaseEmbeddingDist[EmbeddingShardingContext, torch.Tensor, torch.Tensor]:
assert self._pg is not None
return TwPooledEmbeddingDist(
pg=self._pg,
dim_sum_per_rank=self._dim_sum_per_rank(),
emb_dim_per_rank_per_feature=self._emb_dim_per_rank_per_feature(),
device=device if device is not None else self._device,
qcomm_codecs_registry=self.qcomm_codecs_registry,
)
class TwCwPooledEmbeddingSharding(CwPooledEmbeddingSharding):
"""
Shards embedding bags table-wise column-wise, i.e.. a given embedding table is
partitioned along its columns and the table slices are placed on all ranks
within a host group.
"""
def __init__(
self,
sharding_infos: List[EmbeddingShardingInfo],
env: ShardingEnv,
device: Optional[torch.device] = None,
permute_embeddings: bool = False,
qcomm_codecs_registry: Optional[Dict[str, QuantizedCommCodecs]] = None,
) -> None:
super().__init__(
sharding_infos,
env,
device,
permute_embeddings=permute_embeddings,
qcomm_codecs_registry=qcomm_codecs_registry,
)
class TwRwPooledEmbeddingSharding(
BaseTwRwEmbeddingSharding[
EmbeddingShardingContext, KeyedJaggedTensor, torch.Tensor, torch.Tensor
]
):
"""
Shards embedding bags table-wise then row-wise.
"""
def create_input_dist(
self, device: Optional[torch.device] = None
) -> BaseSparseFeaturesDist[KeyedJaggedTensor]:
features_per_rank = self._features_per_rank(
self._grouped_embedding_configs_per_rank
)
feature_hash_sizes = self._get_feature_hash_sizes()
assert self._pg is not None
assert self._intra_pg is not None
return TwRwSparseFeaturesDist(
pg=self._pg,
local_size=self._intra_pg.size(),
features_per_rank=features_per_rank,
feature_hash_sizes=feature_hash_sizes,
device=device if device is not None else self._device,
has_feature_processor=self._has_feature_processor,
need_pos=self._need_pos,
)
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup:
return GroupedPooledEmbeddingsLookup(
grouped_configs=self._grouped_embedding_configs_per_rank[self._rank],
pg=self._pg,
device=device if device is not None else self._device,
feature_processor=feature_processor,
)
def create_output_dist(
self,
device: Optional[torch.device] = None,
) -> BaseEmbeddingDist[EmbeddingShardingContext, torch.Tensor, torch.Tensor]:
return TwRwPooledEmbeddingDist(
rank=self._rank,
cross_pg=cast(dist.ProcessGroup, self._cross_pg),
intra_pg=cast(dist.ProcessGroup, self._intra_pg),
dim_sum_per_node=self._dim_sum_per_node(),
emb_dim_per_node_per_feature=self._emb_dim_per_node_per_feature(),
device=device if device is not None else self._device,
qcomm_codecs_registry=self.qcomm_codecs_registry,
)
class ShardingType(Enum):
"""
Well-known sharding types, used by inter-module optimizations.
"""
# Replicated on all ranks
DATA_PARALLEL = "data_parallel"
# Placed on a single rank
TABLE_WISE = "table_wise"
# Placed on multiple ranks as different sharded tables
COLUMN_WISE = "column_wise"
# Range-split on the first dimension across all ranks
ROW_WISE = "row_wise"
# Row-wise on the same node and table-wise across nodes
# Useful when having multiple ranks per node
# and comms within a single node are more efficient than across nodes.
TABLE_ROW_WISE = "table_row_wise"
# Column-wise on the same node and table-wise across nodes
TABLE_COLUMN_WISE = "table_column_wise"
class QuantizedCommCodecs:
"""
The quantization codecs to use for the forward and backward pass respectively of a comm op (e.g. pooled_all_to_all, reduce_scatter, sequence_all_to_all).
"""
# pyre-ignore
forward: QuantizedCommCodec = NoOpQuantizedCommCodec()
# pyre-ignore
backward: QuantizedCommCodec = NoOpQuantizedCommCodec()
class ShardingEnv:
"""
Provides an abstraction over `torch.distributed.ProcessGroup`, which practically
enables `DistributedModelParallel` to be used during inference.
"""
def __init__(
self,
world_size: int,
rank: int,
pg: Optional[dist.ProcessGroup] = None,
) -> None:
self.world_size = world_size
self.rank = rank
self.process_group: Optional[dist.ProcessGroup] = pg
def from_process_group(cls, pg: dist.ProcessGroup) -> "ShardingEnv":
"""
Creates ProcessGroup-based sharding environment.
NOTE:
Typically used during training.
"""
return cls(dist.get_world_size(pg), dist.get_rank(pg), pg)
def from_local(cls, world_size: int, rank: int) -> "ShardingEnv":
"""
Creates a local host-based sharding environment.
NOTE:
Typically used during single host inference.
"""
return cls(world_size, rank, None)
# pyre-ignore
class KeyedJaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""Represents an (optionally weighted) keyed jagged tensor.
A `KeyedJaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. Keyed on first dimension and jagged on the last
dimension.
Implementation is torch.jit.script-able.
Args:
keys (List[str]): keys to the jagged Tensor.
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if the values have weights. Tensor with the
same shape as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
stride (Optional[int]): number of examples per batch.
stride_per_key_per_rank (Optional[List[List[int]]]): batch size
(number of examples) per key per rank, with the outer list representing the
keys and the inner list representing the values.
Each value in the inner list represents the number of examples in the batch
from the rank of its index in a distributed context.
length_per_key (Optional[List[int]]): start length for each key.
offset_per_key (Optional[List[int]]): start offset for each key and final
offset.
index_per_key (Optional[Dict[str, int]]): index for each key.
jt_dict (Optional[Dict[str, JaggedTensor]]):
inverse_indices (Optional[Tuple[List[str], torch.Tensor]]): inverse indices to
expand deduplicated embedding output for variable stride per key.
Example::
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
dim_0: keyed dimension (ie. `Feature0`, `Feature1`)
dim_1: optional second dimension (ie. batch size)
dim_2: The jagged dimension which has slice lengths between 0-3 in the above example
# We represent this data with following inputs:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
"""
# This is the subset of fields on KJT which are required (all other fields
# can be derived from these fields, and are only cached)
_fields = [
"_values",
"_weights",
"_lengths",
"_offsets",
]
def __init__(
self,
keys: List[str],
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
# Below exposed to ensure torch.script-able
length_per_key: Optional[List[int]] = None,
offset_per_key: Optional[List[int]] = None,
index_per_key: Optional[Dict[str, int]] = None,
jt_dict: Optional[Dict[str, JaggedTensor]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> None:
self._keys: List[str] = keys
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
self._stride_per_key_per_rank: List[List[int]] = []
self._stride_per_key: List[int] = []
self._variable_stride_per_key: bool = False
self._stride: int = -1
if stride_per_key_per_rank is not None:
if stride is not None:
raise ValueError(
"Cannot initialize KJT with both `stride` and `stride_per_key_per_rank`"
)
self._stride_per_key_per_rank = stride_per_key_per_rank
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
self._variable_stride_per_key = True
if not stride_per_key_per_rank:
self._stride = 0
elif all(s == self.stride_per_key()[0] for s in self.stride_per_key()):
self._stride = self.stride_per_key()[0]
else:
if torch.jit.is_tracing():
stride = _maybe_compute_stride_kjt_scripted(
keys, stride, lengths, offsets
)[0]
else:
stride = _maybe_compute_stride_kjt(keys, stride, lengths, offsets)
self._stride = stride
self._stride_per_key_per_rank = [[stride]] * len(self._keys)
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
# lazy fields
self._length_per_key: Optional[List[int]] = length_per_key
self._offset_per_key: Optional[List[int]] = offset_per_key
self._index_per_key: Optional[Dict[str, int]] = index_per_key
self._jt_dict: Optional[Dict[str, JaggedTensor]] = jt_dict
self._inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = (
inverse_indices
)
self._lengths_offset_per_key: List[int] = []
def from_offsets_sync(
keys: List[str],
values: torch.Tensor,
offsets: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
offsets=offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def from_lengths_sync(
keys: List[str],
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def concat(
kjt_list: List["KeyedJaggedTensor"],
) -> "KeyedJaggedTensor":
if len(kjt_list) == 0:
raise ValueError("Can't concat empty KJT list")
is_weighted: bool = kjt_list[0].weights_or_none() is not None
has_length_per_key: bool = True
length_per_key: List[int] = []
keys: List[str] = []
value_list: List[torch.Tensor] = []
weight_list: List[torch.Tensor] = []
length_list: List[torch.Tensor] = []
stride_per_key_per_rank: List[List[int]] = []
stride: Optional[int] = None
variable_stride_per_key_list = [
kjt.variable_stride_per_key() for kjt in kjt_list
]
assert all(variable_stride_per_key_list) or not any(
variable_stride_per_key_list
), "variable stride per key must be consistent for all KJTs"
variable_stride_per_key = all(variable_stride_per_key_list)
for kjt in kjt_list:
curr_is_weighted: bool = kjt.weights_or_none() is not None
if is_weighted != curr_is_weighted:
raise ValueError("Can't merge weighted KJT with unweighted KJT")
_length_per_key: Optional[List[int]] = None
if kjt._length_per_key is None:
has_length_per_key = False
else:
_length_per_key = kjt._length_per_key
if has_length_per_key and _length_per_key is not None:
length_per_key += _length_per_key
keys += kjt.keys()
value_list.append(kjt.values())
if is_weighted:
weight_list.append(kjt.weights())
length_list.append(kjt.lengths())
if variable_stride_per_key:
stride_per_key_per_rank += kjt.stride_per_key_per_rank()
elif stride is None:
stride = kjt.stride()
else:
assert stride == kjt.stride(), "strides must be consistent for all KJTs"
return KeyedJaggedTensor(
keys=keys,
values=torch.cat(value_list, dim=0),
weights=torch.cat(weight_list, dim=0) if is_weighted else None,
lengths=torch.cat(length_list, dim=0),
stride=stride,
stride_per_key_per_rank=(
stride_per_key_per_rank if variable_stride_per_key else None
),
length_per_key=length_per_key if has_length_per_key else None,
)
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "KeyedJaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return KeyedJaggedTensor(
keys=torch.jit.annotate(List[str], []),
values=torch.empty(0, dtype=values_dtype, device=device),
weights=weights,
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
stride=0,
)
def empty_like(kjt: "KeyedJaggedTensor") -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, kjt.stride_per_key_per_rank())
if kjt.variable_stride_per_key()
else (kjt.stride(), None)
)
return KeyedJaggedTensor(
keys=[],
values=torch.empty(0, device=kjt.device(), dtype=kjt.values().dtype),
weights=(
None
if kjt.weights_or_none() is None
else torch.empty(0, device=kjt.device(), dtype=kjt.weights().dtype)
),
lengths=torch.empty(0, device=kjt.device(), dtype=kjt.lengths().dtype),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
)
def from_jt_dict(jt_dict: Dict[str, JaggedTensor]) -> "KeyedJaggedTensor":
"""
Constructs a KeyedJaggedTensor from a Dict[str, JaggedTensor],
but this function will ONLY work if the JaggedTensors all
have the same "implicit" batch_size dimension.
Basically, we can visualize JaggedTensors as 2-D tensors
of the format of [batch_size x variable_feature_dim].
In case, we have some batch without a feature value,
the input JaggedTensor could just not include any values.
But KeyedJaggedTensor (by default) typically pad "None"
so that all the JaggedTensors stored in the KeyedJaggedTensor
have the same batch_size dimension. That is, in the case,
the JaggedTensor input didn't automatically pad
for the empty batches, this function would error / not work.
Consider the visualization of the following KeyedJaggedTensor:
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
Notice that the inputs for this KeyedJaggedTensor would have looked like:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
Now if the input jt_dict = {
# "Feature0" [V0,V1] [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
} and the "None" is left out from each JaggedTensor,
then this function would fail as we would not correctly
be able to pad "None" as it does not technically know
the correct batch / place to pad within the JaggedTensor.
Essentially, the lengths Tensor inferred by this function
would be [2, 1, 1, 1, 3] indicating variable batch_size
dim_1 violates the existing assumption / precondition
that KeyedJaggedTensor's should have fixed batch_size dimension.
"""
kjt_keys = list(jt_dict.keys())
kjt_vals_list: List[torch.Tensor] = []
kjt_lens_list: List[torch.Tensor] = []
kjt_weights_list: List[torch.Tensor] = []
stride_per_key: List[int] = []
for jt in jt_dict.values():
stride_per_key.append(len(jt.lengths()))
kjt_vals_list.append(jt.values())
kjt_lens_list.append(jt.lengths())
weight = jt.weights_or_none()
if weight is not None:
kjt_weights_list.append(weight)
kjt_vals = torch.concat(kjt_vals_list)
kjt_lens = torch.concat(kjt_lens_list)
kjt_weights = (
torch.concat(kjt_weights_list) if len(kjt_weights_list) > 0 else None
)
kjt_stride, kjt_stride_per_key_per_rank = (
(stride_per_key[0], None)
if all(s == stride_per_key[0] for s in stride_per_key)
else (None, [[stride] for stride in stride_per_key])
)
kjt = KeyedJaggedTensor(
keys=kjt_keys,
values=kjt_vals,
weights=kjt_weights,
lengths=kjt_lens,
stride=kjt_stride,
stride_per_key_per_rank=kjt_stride_per_key_per_rank,
).sync()
return kjt
def sync(self) -> "KeyedJaggedTensor":
self.length_per_key()
self.offset_per_key()
return self
def unsync(self) -> "KeyedJaggedTensor":
self._length_per_key = None
self._offset_per_key = None
return self
def device(self) -> torch.device:
return self._values.device
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def keys(self) -> List[str]:
return self._keys
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def stride(self) -> int:
return self._stride
def stride_per_key(self) -> List[int]:
return self._stride_per_key
def stride_per_key_per_rank(self) -> List[List[int]]:
return self._stride_per_key_per_rank
def variable_stride_per_key(self) -> bool:
return self._variable_stride_per_key
def inverse_indices(self) -> Tuple[List[str], torch.Tensor]:
return _get_inverse_indices_or_throw(self._inverse_indices)
def inverse_indices_or_none(self) -> Optional[Tuple[List[str], torch.Tensor]]:
return self._inverse_indices
def _key_indices(self) -> Dict[str, int]:
_index_per_key: Dict[str, int] = _maybe_compute_index_per_key(
self._keys,
self._index_per_key,
)
self._index_per_key = _index_per_key
return _index_per_key
def length_per_key(self) -> List[int]:
_length_per_key = _maybe_compute_length_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
return _length_per_key
def length_per_key_or_none(self) -> Optional[List[int]]:
return self._length_per_key
def offset_per_key(self) -> List[int]:
_length_per_key, _offset_per_key = _maybe_compute_offset_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
self._offset_per_key = _offset_per_key
return _offset_per_key
def offset_per_key_or_none(self) -> Optional[List[int]]:
return self._offset_per_key
def lengths_offset_per_key(self) -> List[int]:
if not self._lengths_offset_per_key:
self._lengths_offset_per_key = _cumsum(self.stride_per_key())
return self._lengths_offset_per_key
def split(self, segments: List[int]) -> List["KeyedJaggedTensor"]:
split_list: List[KeyedJaggedTensor] = []
start = 0
start_offset = 0
_length_per_key = self.length_per_key()
_offset_per_key = self.offset_per_key()
for segment in segments:
end = start + segment
end_offset = _offset_per_key[end]
keys: List[str] = self._keys[start:end]
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank()[start:end])
if self.variable_stride_per_key()
else (self._stride, None)
)
if segment == len(self._keys):
# no torch slicing required
split_list.append(
KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self.weights_or_none(),
lengths=self._lengths,
offsets=self._offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=self._jt_dict,
inverse_indices=None,
)
)
elif segment == 0:
empty_int_list: List[int] = torch.jit.annotate(List[int], [])
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=torch.tensor(
empty_int_list,
device=self.device(),
dtype=self._values.dtype,
),
weights=(
None
if self.weights_or_none() is None
else torch.tensor(
empty_int_list,
device=self.device(),
dtype=self.weights().dtype,
)
),
lengths=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
offsets=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
else:
split_length_per_key = _length_per_key[start:end]
if not torch.jit.is_scripting() and is_torchdynamo_compiling():
# Checks for dynamo dynamic shapes tracing
torch._check_is_size(start_offset)
torch._check_is_size(end_offset)
torch._check_is_size(end_offset - start_offset)
torch._check(start_offset <= self._values.size(0))
torch._check(end_offset <= self._values.size(0))
torch._check(end_offset >= start_offset)
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[
start
] : self.lengths_offset_per_key()[end]
],
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=split_length_per_key,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
start = end
start_offset = end_offset
return split_list
def permute(
self,
indices: List[int],
indices_tensor: Optional[torch.Tensor] = None,
include_inverse_indices: bool = False,
) -> "KeyedJaggedTensor":
if indices_tensor is None:
indices_tensor = torch.tensor(
indices, dtype=torch.int, device=self.device()
)
length_per_key = self.length_per_key()
permuted_keys: List[str] = []
permuted_stride_per_key_per_rank: List[List[int]] = []
permuted_length_per_key: List[int] = []
permuted_lengths_sum = 0
for index in indices:
key = self.keys()[index]
permuted_keys.append(key)
permuted_stride_per_key_per_rank.append(
self.stride_per_key_per_rank()[index]
)
permuted_length_per_key.append(length_per_key[index])
permuted_lengths_sum += length_per_key[index]
if self.variable_stride_per_key():
length_per_key_tensor = _pin_and_move(
torch.tensor(self.length_per_key()), self.device()
)
stride_per_key_tensor = _pin_and_move(
torch.tensor(self.stride_per_key()), self.device()
)
permuted_lengths, _ = _permute_tensor_by_segments(
self.lengths(),
stride_per_key_tensor,
indices_tensor,
None,
)
permuted_values, permuted_weights = _permute_tensor_by_segments(
self.values(),
length_per_key_tensor,
indices_tensor,
self.weights_or_none(),
)
else:
(
permuted_lengths,
permuted_values,
permuted_weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
indices_tensor,
self.lengths().view(len(self._keys), -1),
self.values(),
self.weights_or_none(),
permuted_lengths_sum,
)
stride, optional_permuted_stride_per_key_per_rank = (
(None, permuted_stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
kjt = KeyedJaggedTensor(
keys=permuted_keys,
values=permuted_values,
weights=permuted_weights,
lengths=permuted_lengths.view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=optional_permuted_stride_per_key_per_rank,
length_per_key=permuted_length_per_key if len(permuted_keys) > 0 else None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=(
self.inverse_indices_or_none() if include_inverse_indices else None
),
)
return kjt
def flatten_lengths(self) -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank())
if self.variable_stride_per_key()
else (self._stride, None)
)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self._weights,
lengths=self.lengths().view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self.length_per_key(),
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
def __getitem__(self, key: str) -> JaggedTensor:
offset_per_key = self.offset_per_key()
index = self._key_indices()[key]
start_offset = offset_per_key[index]
end_offset = (
offset_per_key[index + 1]
if index + 1 < len(offset_per_key)
else start_offset
)
return JaggedTensor(
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[index] : self.lengths_offset_per_key()[
index + 1
]
],
offsets=None,
)
def to_dict(self) -> Dict[str, JaggedTensor]:
_jt_dict = _maybe_compute_kjt_to_jt_dict(
stride=self.stride(),
stride_per_key=self.stride_per_key(),
keys=self.keys(),
length_per_key=self.length_per_key(),
lengths=self.lengths(),
values=self.values(),
variable_stride_per_key=self.variable_stride_per_key(),
weights=self.weights_or_none(),
jt_dict=self._jt_dict,
)
self._jt_dict = _jt_dict
return _jt_dict
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def to(
self,
device: torch.device,
non_blocking: bool = False,
dtype: Optional[torch.dtype] = None,
) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
length_per_key = self._length_per_key
offset_per_key = self._offset_per_key
index_per_key = self._index_per_key
jt_dict = self._jt_dict
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (
inverse_indices[0],
inverse_indices[1].to(device, non_blocking=non_blocking),
)
if weights is not None:
if dtype is not None:
weights = weights.to(
dtype=dtype, device=device, non_blocking=non_blocking
)
else:
weights = weights.to(device=device, non_blocking=non_blocking)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.to(device, non_blocking=non_blocking),
weights=weights,
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=length_per_key,
offset_per_key=offset_per_key,
index_per_key=index_per_key,
jt_dict=jt_dict,
inverse_indices=inverse_indices,
)
def __str__(self) -> str:
if len(self._keys) == 0 or self._offsets is None and self._lengths is None:
return "KeyedJaggedTensor()\n"
offsets = self.offsets()
return (
"KeyedJaggedTensor({\n"
+ ",\n".join(
[
" "
+ _jagged_tensor_string(
self._keys[index],
self._values,
self._weights,
offsets,
sum(self.stride_per_key()[:index]),
sum(self.stride_per_key()[: index + 1]),
)
for index in range(len(self._keys))
]
)
+ "\n})\n"
)
def pin_memory(self) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (inverse_indices[0], inverse_indices[1].pin_memory())
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.pin_memory(),
weights=weights.pin_memory() if weights is not None else None,
lengths=lengths.pin_memory() if lengths is not None else None,
offsets=offsets.pin_memory() if offsets is not None else None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=None,
inverse_indices=inverse_indices,
)
def dist_labels(self) -> List[str]:
labels = ["lengths", "values"]
if self.variable_stride_per_key():
labels.append("strides")
if self.weights_or_none() is not None:
labels.append("weights")
return labels
def dist_splits(self, key_splits: List[int]) -> List[List[int]]:
batch_size_per_split = _sum_by_splits(self.stride_per_key(), key_splits)
length_per_split = _sum_by_splits(self.length_per_key(), key_splits)
splits = [batch_size_per_split, length_per_split]
if self.variable_stride_per_key():
splits.append(key_splits)
if self.weights_or_none() is not None:
splits.append(length_per_split)
return splits
def dist_tensors(self) -> List[torch.Tensor]:
tensors = [self.lengths(), self.values()]
if self.variable_stride_per_key():
strides = _pin_and_move(torch.tensor(self.stride_per_key()), self.device())
tensors.append(strides)
if self.weights_or_none() is not None:
tensors.append(self.weights())
return tensors
def dist_init(
keys: List[str],
tensors: List[torch.Tensor],
variable_stride_per_key: bool,
num_workers: int,
recat: Optional[torch.Tensor],
stride_per_rank: Optional[List[int]],
stagger: int = 1,
) -> "KeyedJaggedTensor":
assert len(tensors) in [2, 3, 4]
lengths = tensors[0]
values = tensors[1]
stride_per_rank_per_key = tensors[2] if variable_stride_per_key else None
weights = (
tensors[-1]
if (variable_stride_per_key and len(tensors) == 4)
or (not variable_stride_per_key and len(tensors) == 3)
else None
)
if variable_stride_per_key:
assert stride_per_rank_per_key is not None
stride_per_key_per_rank: List[List[int]] = stride_per_rank_per_key.view(
num_workers, len(keys)
).T.tolist()
strides_cumsum: List[int] = torch.ops.fbgemm.asynchronous_complete_cumsum(
stride_per_rank_per_key
).tolist()
cumsum_lengths = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
length_per_key = (
cumsum_lengths[strides_cumsum[1:]] - cumsum_lengths[strides_cumsum[:-1]]
)
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
lengths, _ = _permute_tensor_by_segments(
lengths,
stride_per_rank_per_key,
recat,
None,
)
values, weights = _permute_tensor_by_segments(
values,
length_per_key,
recat,
weights,
)
if not stride_per_key_per_rank:
stride_per_key_per_rank = [[0]] * len(keys)
if stagger > 1:
stride_per_key_per_rank_stagger: List[List[int]] = []
local_world_size = num_workers // stagger
for i in range(len(keys)):
stride_per_rank_stagger: List[int] = []
for j in range(local_world_size):
stride_per_rank_stagger.extend(
stride_per_key_per_rank[i][j::local_world_size]
)
stride_per_key_per_rank_stagger.append(stride_per_rank_stagger)
stride_per_key_per_rank = stride_per_key_per_rank_stagger
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride_per_key_per_rank=stride_per_key_per_rank,
)
return kjt.sync()
else:
assert stride_per_rank is not None
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
stride = stride_per_rank[0]
if all(s == stride for s in stride_per_rank):
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
recat,
lengths.view(-1, stride),
values,
weights,
values.numel(),
)
lengths = lengths.view(-1)
else: # variable batch size per rank
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_1D_sparse_data(
recat,
lengths.view(-1),
values,
weights,
values.numel(),
)
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=sum(stride_per_rank),
)
return kjt.sync()
def create_embedding_bag_sharding(
sharding_type: str,
sharding_infos: List[EmbeddingShardingInfo],
env: ShardingEnv,
device: Optional[torch.device] = None,
permute_embeddings: bool = False,
qcomm_codecs_registry: Optional[Dict[str, QuantizedCommCodecs]] = None,
) -> EmbeddingSharding[
EmbeddingShardingContext, KeyedJaggedTensor, torch.Tensor, torch.Tensor
]:
if device is not None and device.type == "meta":
replace_placement_with_meta_device(sharding_infos)
if sharding_type == ShardingType.TABLE_WISE.value:
return TwPooledEmbeddingSharding(
sharding_infos,
env,
device,
qcomm_codecs_registry=qcomm_codecs_registry,
)
elif sharding_type == ShardingType.ROW_WISE.value:
return RwPooledEmbeddingSharding(
sharding_infos,
env,
device,
qcomm_codecs_registry=qcomm_codecs_registry,
)
elif sharding_type == ShardingType.DATA_PARALLEL.value:
return DpPooledEmbeddingSharding(sharding_infos, env, device)
elif sharding_type == ShardingType.TABLE_ROW_WISE.value:
return TwRwPooledEmbeddingSharding(
sharding_infos,
env,
device,
qcomm_codecs_registry=qcomm_codecs_registry,
)
elif sharding_type == ShardingType.COLUMN_WISE.value:
return CwPooledEmbeddingSharding(
sharding_infos,
env,
device,
permute_embeddings=permute_embeddings,
qcomm_codecs_registry=qcomm_codecs_registry,
)
elif sharding_type == ShardingType.TABLE_COLUMN_WISE.value:
return TwCwPooledEmbeddingSharding(
sharding_infos,
env,
device,
permute_embeddings=permute_embeddings,
qcomm_codecs_registry=qcomm_codecs_registry,
)
else:
raise ValueError(f"Sharding type not supported {sharding_type}") | null |
8,966 | import copy
from collections import OrderedDict
from dataclasses import dataclass, field
from typing import (
Any,
cast,
Dict,
Iterator,
List,
Mapping,
Optional,
Set,
Tuple,
Type,
Union,
)
import torch
from torch import nn, Tensor
from torch.nn.modules.module import _IncompatibleKeys
from torch.nn.parallel import DistributedDataParallel
from torchrec.distributed.embedding_sharding import (
EmbeddingSharding,
EmbeddingShardingContext,
EmbeddingShardingInfo,
KJTListSplitsAwaitable,
Multistreamable,
)
from torchrec.distributed.embedding_types import (
BaseEmbeddingSharder,
EmbeddingComputeKernel,
KJTList,
ShardedEmbeddingModule,
)
from torchrec.distributed.sharding.cw_sharding import CwPooledEmbeddingSharding
from torchrec.distributed.sharding.dp_sharding import DpPooledEmbeddingSharding
from torchrec.distributed.sharding.rw_sharding import RwPooledEmbeddingSharding
from torchrec.distributed.sharding.tw_sharding import TwPooledEmbeddingSharding
from torchrec.distributed.sharding.twcw_sharding import TwCwPooledEmbeddingSharding
from torchrec.distributed.sharding.twrw_sharding import TwRwPooledEmbeddingSharding
from torchrec.distributed.types import (
Awaitable,
EmbeddingModuleShardingPlan,
EnumerableShardingSpec,
LazyAwaitable,
NullShardedModuleContext,
ParameterSharding,
QuantizedCommCodecs,
ShardedTensor,
ShardingEnv,
ShardingType,
ShardMetadata,
)
from torchrec.distributed.utils import (
add_params_from_parameter_sharding,
append_prefix,
convert_to_fbgemm_types,
merge_fused_params,
optimizer_type_to_emb_opt_type,
PermutePooledEmbeddings,
)
from torchrec.modules.embedding_configs import (
EmbeddingBagConfig,
EmbeddingTableConfig,
PoolingType,
)
from torchrec.modules.embedding_modules import (
EmbeddingBagCollection,
EmbeddingBagCollectionInterface,
)
from torchrec.optim.fused import EmptyFusedOptimizer, FusedOptimizerModule
from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizer
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor, KeyedTensor
class EmbeddingShardingInfo:
class EmbeddingComputeKernel(Enum):
class ParameterSharding:
def optimizer_type_to_emb_opt_type(
optimizer_class: Type[torch.optim.Optimizer],
) -> Optional[EmbOptimType]:
def merge_fused_params(
fused_params: Optional[Dict[str, Any]] = None,
param_fused_params: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
def add_params_from_parameter_sharding(
fused_params: Optional[Dict[str, Any]],
parameter_sharding: ParameterSharding,
) -> Dict[str, Any]:
def convert_to_fbgemm_types(fused_params: Dict[str, Any]) -> Dict[str, Any]:
class EmbeddingTableConfig(BaseEmbeddingConfig):
class EmbeddingBagCollectionInterface(abc.ABC, nn.Module):
def forward(
self,
features: KeyedJaggedTensor,
) -> KeyedTensor:
def embedding_bag_configs(
self,
) -> List[EmbeddingBagConfig]:
def is_weighted(self) -> bool:
def create_sharding_infos_by_sharding(
module: EmbeddingBagCollectionInterface,
table_name_to_parameter_sharding: Dict[str, ParameterSharding],
prefix: str,
fused_params: Optional[Dict[str, Any]],
suffix: Optional[str] = "weight",
) -> Dict[str, List[EmbeddingShardingInfo]]:
if fused_params is None:
fused_params = {}
shared_feature: Dict[str, bool] = {}
for embedding_config in module.embedding_bag_configs():
if not embedding_config.feature_names:
embedding_config.feature_names = [embedding_config.name]
for feature_name in embedding_config.feature_names:
if feature_name not in shared_feature:
shared_feature[feature_name] = False
else:
shared_feature[feature_name] = True
sharding_type_to_sharding_infos: Dict[str, List[EmbeddingShardingInfo]] = {}
# state_dict returns parameter.Tensor, which loses parameter level attributes
parameter_by_name = dict(module.named_parameters())
# QuantEBC registers weights as buffers (since they are INT8), and so we need to grab it there
state_dict = module.state_dict()
for config in module.embedding_bag_configs():
table_name = config.name
assert (
table_name in table_name_to_parameter_sharding
), f"{table_name} not in table_name_to_parameter_sharding"
parameter_sharding = table_name_to_parameter_sharding[table_name]
if parameter_sharding.compute_kernel not in [
kernel.value for kernel in EmbeddingComputeKernel
]:
raise ValueError(
f"Compute kernel not supported {parameter_sharding.compute_kernel}"
)
embedding_names: List[str] = []
for feature_name in config.feature_names:
if shared_feature[feature_name]:
embedding_names.append(feature_name + "@" + config.name)
else:
embedding_names.append(feature_name)
param_name = prefix + table_name
if suffix is not None:
param_name = f"{param_name}.{suffix}"
assert param_name in parameter_by_name or param_name in state_dict
param = parameter_by_name.get(param_name, state_dict[param_name])
if parameter_sharding.sharding_type not in sharding_type_to_sharding_infos:
sharding_type_to_sharding_infos[parameter_sharding.sharding_type] = []
optimizer_params = getattr(param, "_optimizer_kwargs", [{}])
optimizer_classes = getattr(param, "_optimizer_classes", [None])
assert (
len(optimizer_classes) == 1 and len(optimizer_params) == 1
), f"Only support 1 optimizer, given {len(optimizer_classes)} optimizer classes \
and {len(optimizer_params)} optimizer kwargs."
optimizer_class = optimizer_classes[0]
optimizer_params = optimizer_params[0]
if optimizer_class:
optimizer_params["optimizer"] = optimizer_type_to_emb_opt_type(
optimizer_class
)
per_table_fused_params = merge_fused_params(fused_params, optimizer_params)
per_table_fused_params = add_params_from_parameter_sharding(
per_table_fused_params, parameter_sharding
)
per_table_fused_params = convert_to_fbgemm_types(per_table_fused_params)
sharding_type_to_sharding_infos[parameter_sharding.sharding_type].append(
EmbeddingShardingInfo(
embedding_config=EmbeddingTableConfig(
num_embeddings=config.num_embeddings,
embedding_dim=config.embedding_dim,
name=config.name,
data_type=config.data_type,
feature_names=copy.deepcopy(config.feature_names),
pooling=config.pooling,
is_weighted=module.is_weighted(),
has_feature_processor=False,
embedding_names=embedding_names,
weight_init_max=config.weight_init_max,
weight_init_min=config.weight_init_min,
pruning_indices_remapping=config.pruning_indices_remapping,
),
param_sharding=parameter_sharding,
param=param,
fused_params=per_table_fused_params,
)
)
return sharding_type_to_sharding_infos | null |
8,967 | import copy
from collections import OrderedDict
from dataclasses import dataclass, field
from typing import (
Any,
cast,
Dict,
Iterator,
List,
Mapping,
Optional,
Set,
Tuple,
Type,
Union,
)
import torch
from torch import nn, Tensor
from torch.nn.modules.module import _IncompatibleKeys
from torch.nn.parallel import DistributedDataParallel
from torchrec.distributed.embedding_sharding import (
EmbeddingSharding,
EmbeddingShardingContext,
EmbeddingShardingInfo,
KJTListSplitsAwaitable,
Multistreamable,
)
from torchrec.distributed.embedding_types import (
BaseEmbeddingSharder,
EmbeddingComputeKernel,
KJTList,
ShardedEmbeddingModule,
)
from torchrec.distributed.sharding.cw_sharding import CwPooledEmbeddingSharding
from torchrec.distributed.sharding.dp_sharding import DpPooledEmbeddingSharding
from torchrec.distributed.sharding.rw_sharding import RwPooledEmbeddingSharding
from torchrec.distributed.sharding.tw_sharding import TwPooledEmbeddingSharding
from torchrec.distributed.sharding.twcw_sharding import TwCwPooledEmbeddingSharding
from torchrec.distributed.sharding.twrw_sharding import TwRwPooledEmbeddingSharding
from torchrec.distributed.types import (
Awaitable,
EmbeddingModuleShardingPlan,
EnumerableShardingSpec,
LazyAwaitable,
NullShardedModuleContext,
ParameterSharding,
QuantizedCommCodecs,
ShardedTensor,
ShardingEnv,
ShardingType,
ShardMetadata,
)
from torchrec.distributed.utils import (
add_params_from_parameter_sharding,
append_prefix,
convert_to_fbgemm_types,
merge_fused_params,
optimizer_type_to_emb_opt_type,
PermutePooledEmbeddings,
)
from torchrec.modules.embedding_configs import (
EmbeddingBagConfig,
EmbeddingTableConfig,
PoolingType,
)
from torchrec.modules.embedding_modules import (
EmbeddingBagCollection,
EmbeddingBagCollectionInterface,
)
from torchrec.optim.fused import EmptyFusedOptimizer, FusedOptimizerModule
from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizer
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor, KeyedTensor
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:index_select_ops")
except OSError:
pass
# pyre-ignore
class KeyedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""
KeyedTensor holds a concatenated list of dense tensors, each of which can be
accessed by a key.
The keyed dimension can be of variable length (length_per_key).
Common use cases uses include storage of pooled embeddings of different dimensions.
Implementation is torch.jit.script-able.
Args:
keys (List[str]): list of keys.
length_per_key (List[int]): length of each key along key dimension.
values (torch.Tensor): dense tensor, concatenated typically along key dimension.
key_dim (int): key dimension, zero indexed - defaults to 1
(typically B is 0-dimension).
Example::
# kt is KeyedTensor holding
# 0 1 2
# "Embedding A" [1,1] [1,1] [1,1]
# "Embedding B" [2,1,2] [2,1,2] [2,1,2]
# "Embedding C" [3,1,2,3] [3,1,2,3] [3,1,2,3]
tensor_list = [
torch.tensor([[1,1]] * 3),
torch.tensor([[2,1,2]] * 3),
torch.tensor([[3,1,2,3]] * 3),
]
keys = ["Embedding A", "Embedding B", "Embedding C"]
kt = KeyedTensor.from_tensor_list(keys, tensor_list)
kt.values()
# tensor(
# [
# [1, 1, 2, 1, 2, 3, 1, 2, 3],
# [1, 1, 2, 1, 2, 3, 1, 2, 3],
# [1, 1, 2, 1, 2, 3, 1, 2, 3],
# ]
# )
kt["Embedding B"]
# tensor([[2, 1, 2], [2, 1, 2], [2, 1, 2]])
"""
def __init__(
self,
keys: List[str],
length_per_key: List[int],
values: torch.Tensor,
key_dim: int = 1,
# Below exposed to ensure torch.script-able
offset_per_key: Optional[List[int]] = None,
index_per_key: Optional[Dict[str, int]] = None,
) -> None:
self._keys = keys
self._length_per_key = length_per_key
self._values = values
self._key_dim = key_dim
self._offset_per_key: Optional[List[int]] = offset_per_key
self._index_per_key: Optional[Dict[str, int]] = index_per_key
def from_tensor_list(
keys: List[str], tensors: List[torch.Tensor], key_dim: int = 1, cat_dim: int = 1
) -> "KeyedTensor":
length_per_key = [tensor.shape[key_dim] for tensor in tensors]
return KeyedTensor(
keys=keys,
length_per_key=length_per_key,
values=torch.cat(tensors, dim=cat_dim),
key_dim=key_dim,
)
def keys(self) -> List[str]:
return self._keys
def values(self) -> torch.Tensor:
return self._values
def key_dim(self) -> int:
return self._key_dim
def offset_per_key(self) -> List[int]:
_offset_per_key = _maybe_compute_offset_per_key_kt(
self._length_per_key,
self._offset_per_key,
)
self._offset_per_key = _offset_per_key
return _offset_per_key
def length_per_key(self) -> List[int]:
return self._length_per_key
def _key_indices(self) -> Dict[str, int]:
_index_per_key = _maybe_compute_index_per_key(
self._keys,
self._index_per_key,
)
self._index_per_key = _index_per_key
return _index_per_key
def __getitem__(self, key: str) -> torch.Tensor:
index = self._key_indices()[key]
start = self.offset_per_key()[index]
length = self._length_per_key[index]
return self._values.narrow(dim=self._key_dim, start=start, length=length)
def to_dict(self) -> Dict[str, torch.Tensor]:
indices = self._key_indices()
lengths = self._length_per_key
split_values = self._values.split(lengths, dim=self._key_dim)
return {key: split_values[index] for (key, index) in indices.items()}
def regroup(
keyed_tensors: List["KeyedTensor"], groups: List[List[str]]
) -> List[torch.Tensor]:
return _regroup_keyed_tensors(keyed_tensors, groups)
def regroup_as_dict(
keyed_tensors: List["KeyedTensor"], groups: List[List[str]], keys: List[str]
) -> Dict[str, torch.Tensor]:
assert len(groups) == len(keys), "Groups and keys should have same length"
embeddings_list = _regroup_keyed_tensors(keyed_tensors, groups)
embeddings_dict: Dict[str, torch.Tensor] = {}
for i, key in enumerate(keys):
embeddings_dict[key] = embeddings_list[i]
return embeddings_dict
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
def to(self, device: torch.device, non_blocking: bool = False) -> "KeyedTensor":
return KeyedTensor(
keys=self._keys,
length_per_key=self._length_per_key,
values=self._values.to(device, non_blocking=non_blocking),
key_dim=self._key_dim,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
)
def __str__(self) -> str:
if len(self._keys) == 0:
return "KeyedTensor()\n"
return (
"KeyedTensor({\n"
+ ",\n".join(
[
' "{}": '.format(key) + _keyed_values_string(self[key])
for key in self._keys
]
)
+ "\n})\n"
)
def construct_output_kt(
embeddings: List[torch.Tensor],
embedding_names: List[str],
embedding_dims: List[int],
) -> KeyedTensor:
cat_embeddings: torch.Tensor
if len(embeddings) == 1:
cat_embeddings = embeddings[0]
else:
cat_embeddings = torch.cat(embeddings, dim=1)
return KeyedTensor(
keys=embedding_names,
length_per_key=embedding_dims,
values=cat_embeddings,
key_dim=1,
) | null |
8,968 | import copy
from dataclasses import dataclass
from typing import Any, Dict, List, Mapping, Optional, TypeVar, Union
import torch
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from torch.distributed import _remote_device
from torch.distributed._shard.sharded_tensor import (
Shard,
ShardedTensorBase,
ShardedTensorMetadata,
ShardMetadata,
)
from torchrec.distributed.embedding_sharding import EmbeddingShardingInfo
from torchrec.distributed.embedding_types import (
GroupedEmbeddingConfig,
ShardedEmbeddingModule,
)
from torchrec.distributed.types import ParameterSharding, ShardingType
from torchrec.modules.embedding_configs import DataType
from torchrec.streamable import Multistreamable
from torchrec.tensor_types import UInt2Tensor, UInt4Tensor
def _append_table_shard(
d: Dict[str, List[Shard]], table_name: str, shard: Shard
) -> None:
if table_name not in d:
d[table_name] = []
d[table_name].append(shard) | null |
8,969 | import copy
from dataclasses import dataclass
from typing import Any, Dict, List, Mapping, Optional, TypeVar, Union
import torch
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from torch.distributed import _remote_device
from torch.distributed._shard.sharded_tensor import (
Shard,
ShardedTensorBase,
ShardedTensorMetadata,
ShardMetadata,
)
from torchrec.distributed.embedding_sharding import EmbeddingShardingInfo
from torchrec.distributed.embedding_types import (
GroupedEmbeddingConfig,
ShardedEmbeddingModule,
)
from torchrec.distributed.types import ParameterSharding, ShardingType
from torchrec.modules.embedding_configs import DataType
from torchrec.streamable import Multistreamable
from torchrec.tensor_types import UInt2Tensor, UInt4Tensor
class WeightSpec:
fqn: str # "ebc.embedding_bags.table_0.weight"
shard_offsets: List[int] # shard offsets
shard_sizes: List[int] # shard sizes
sharding_type: Optional[str] # e.g. ShardingType.ROW_WISE.value=="row_wise"
class EmbeddingShardingInfo:
embedding_config: EmbeddingTableConfig
param_sharding: ParameterSharding
param: torch.Tensor
fused_params: Optional[Dict[str, Any]] = None
class GroupedEmbeddingConfig:
data_type: DataType
pooling: PoolingType
is_weighted: bool
has_feature_processor: bool
compute_kernel: EmbeddingComputeKernel
embedding_tables: List[ShardedEmbeddingTable]
fused_params: Optional[Dict[str, Any]] = None
def feature_hash_sizes(self) -> List[int]:
feature_hash_sizes = []
for table in self.embedding_tables:
feature_hash_sizes.extend(table.num_features() * [table.num_embeddings])
return feature_hash_sizes
def num_features(self) -> int:
num_features = 0
for table in self.embedding_tables:
num_features += table.num_features()
return num_features
def dim_sum(self) -> int:
dim_sum = 0
for table in self.embedding_tables:
dim_sum += table.num_features() * table.local_cols
return dim_sum
def table_names(self) -> List[str]:
table_names = []
for table in self.embedding_tables:
table_names.append(table.name)
return table_names
def feature_names(self) -> List[str]:
feature_names = []
for table in self.embedding_tables:
feature_names.extend(table.feature_names)
return feature_names
def embedding_dims(self) -> List[int]:
embedding_dims = []
for table in self.embedding_tables:
embedding_dims.extend([table.local_cols] * table.num_features())
return embedding_dims
def embedding_names(self) -> List[str]:
embedding_names = []
for table in self.embedding_tables:
embedding_names.extend(table.embedding_names)
return embedding_names
def embedding_shard_metadata(self) -> List[Optional[ShardMetadata]]:
embedding_shard_metadata: List[Optional[ShardMetadata]] = []
for table in self.embedding_tables:
for _ in table.feature_names:
embedding_shard_metadata.append(table.local_metadata)
return embedding_shard_metadata
def sharded_tbes_weights_spec(
sharded_model: torch.nn.Module,
) -> Dict[str, WeightSpec]:
# OUTPUT:
# Example:
# {
# tbes.0
# table_0 in tbes.0
# "ebc.tbes.0.0.table_0.weight": WeightSpec("ebc.embedding_bags.table_0.weight", [0, 0], [500, 192])
# "ebc.tbes.0.0.table_0.weight_qscale":WeightSpec("ebc.embedding_bags.table_0.weight_qscale", [0, 0], [500, 2])
# "ebc.tbes.0.0.table_0.weight_qbias":WeightSpec("ebc.embedding_bags.table_0.weight_qbias", [0, 0], [500, 2])
# table_1 in tbes.1
# "ebc.tbes.0.1.table_1.weight": WeightSpec("ebc.embedding_bags.table_1.weight", [0, 0], [500, 192])
# "ebc.tbes.0.1.table_1.weight_qscale":WeightSpec("ebc.embedding_bags.table_1.weight_qscale", [0, 0], [500, 2])
# "ebc.tbes.0.1.table_1.weight_qbias":WeightSpec("ebc.embedding_bags.table_1.weight_qbias", [0, 0], [500, 2])
# tbes.1
# table_0 in tbes.1
# "ebc.tbes.1.0.table_0.weight": WeightSpec("ebc.embedding_bags.table_0.weight", [500, 0], [500, 192])
# "ebc.tbes.1.0.table_0.weight_qscale":WeightSpec("ebc.embedding_bags.table_0.weight_qscale", [500, 0], [500, 2])
# "ebc.tbes.1.0.table_0.weight_qbias":WeightSpec("ebc.embedding_bags.table_0.weight_qbias", [500, 0], [500, 2])
# table_1 in tbes.1
# "ebc.tbes.1.1.table_1.weight": WeightSpec("ebc.embedding_bags.table_1.weight", [500, 0], [500, 192])
# "ebc.tbes.1.1.table_1.weight_qscale":WeightSpec("ebc.embedding_bags.table_1.weight_qscale", [500, 0], [500, 2])
# "ebc.tbes.1.1.table_1.weight_qbias":WeightSpec("ebc.embedding_bags.table_1.weight_qbias", [500, 0], [500, 2])
# }
# In the format of ebc.tbes.i.j.table_k.weight, where i is the index of the TBE, j is the index of the embedding bag within TBE i, k is the index of the original table set in the ebc embedding_configs
# e.g. ebc.tbes.1.1.table_1.weight, it represents second embedding bag within the second TBE. This part of weight is from a shard of table_1
ret: Dict[str, WeightSpec] = {}
for module_fqn, module in sharded_model.named_modules():
type_name: str = type(module).__name__
is_sqebc: bool = "ShardedQuantEmbeddingBagCollection" in type_name
is_sqec: bool = "ShardedQuantEmbeddingCollection" in type_name
if is_sqebc or is_sqec:
tbes_configs: Dict[
IntNBitTableBatchedEmbeddingBagsCodegen, GroupedEmbeddingConfig
] = module.tbes_configs()
sharding_type_to_sharding_infos: Dict[str, List[EmbeddingShardingInfo]] = (
module.sharding_type_to_sharding_infos()
)
table_shardings: Dict[str, str] = {}
for (
sharding_type,
sharding_infos,
) in sharding_type_to_sharding_infos.items():
for info in sharding_infos:
table_shardings[info.embedding_config.name] = sharding_type
for tbe_idx, (_tbe, config) in enumerate(tbes_configs.items()):
tables = config.embedding_tables
for table_idx, table in enumerate(tables):
table_name: str = table.name
# pyre-ignore
table_metadata: ShardMetadata = table.local_metadata
# TODO(ivankobzarev) Switch to use table_metadata.shard_sizes when it works correctly with int4 quantized modules
shard_sizes: List[int] = [table.local_rows, table.local_cols]
shard_offsets: List[int] = table_metadata.shard_offsets
s: str = "embedding_bags" if is_sqebc else "embeddings"
unsharded_fqn_weight: str = f"{module_fqn}.{s}.{table_name}.weight"
sharded_fqn_weight: str = (
f"{module_fqn}.tbes.{tbe_idx}.{table_idx}.{table_name}.weight"
)
sharding_type: str = table_shardings[table_name]
ret[sharded_fqn_weight] = WeightSpec(
fqn=unsharded_fqn_weight,
shard_offsets=shard_offsets,
shard_sizes=shard_sizes,
sharding_type=sharding_type,
)
for qcomponent in ["qscale", "qbias"]:
qcomp_shard_offsets: List[int] = copy.deepcopy(shard_offsets)
# handling CW - no columns shift for qscale/qbias
qcomp_shard_offsets[1] = 0
qcomp_shard_sizes: List[int] = copy.deepcopy(shard_sizes)
# Assuming qscale and qbias are always torch.half (float16), represented as tensor of byte type => sizeof(float16) == 2 (bytes)
qcomp_shard_sizes[1] = 2
ret[f"{sharded_fqn_weight}_{qcomponent}"] = WeightSpec(
fqn=f"{unsharded_fqn_weight}_{qcomponent}",
shard_offsets=qcomp_shard_offsets,
shard_sizes=qcomp_shard_sizes,
sharding_type=sharding_type,
)
return ret | null |
8,970 | import copy
import logging
from collections import OrderedDict
from itertools import accumulate
from typing import Any, Dict, List, Optional, Set, Type, TypeVar, Union
import torch
from fbgemm_gpu.split_embedding_configs import EmbOptimType
from torch import nn
from torchrec import optim as trec_optim
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.types import (
DataType,
ParameterSharding,
ShardedModule,
ShardingType,
)
from torchrec.modules.embedding_configs import data_type_to_sparse_type
from torchrec.types import CopyMixIn
The provided code snippet includes necessary dependencies for implementing the `append_prefix` function. Write a Python function `def append_prefix(prefix: str, name: str) -> str` to solve the following problem:
Appends provided prefix to provided name.
Here is the function:
def append_prefix(prefix: str, name: str) -> str:
"""
Appends provided prefix to provided name.
"""
if prefix != "" and name != "":
return prefix + "." + name
else:
return prefix + name | Appends provided prefix to provided name. |
8,971 | import copy
import logging
from collections import OrderedDict
from itertools import accumulate
from typing import Any, Dict, List, Optional, Set, Type, TypeVar, Union
import torch
from fbgemm_gpu.split_embedding_configs import EmbOptimType
from torch import nn
from torchrec import optim as trec_optim
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.types import (
DataType,
ParameterSharding,
ShardedModule,
ShardingType,
)
from torchrec.modules.embedding_configs import data_type_to_sparse_type
from torchrec.types import CopyMixIn
The provided code snippet includes necessary dependencies for implementing the `filter_state_dict` function. Write a Python function `def filter_state_dict( state_dict: "OrderedDict[str, torch.Tensor]", name: str ) -> "OrderedDict[str, torch.Tensor]"` to solve the following problem:
Filters state dict for keys that start with provided name. Strips provided name from beginning of key in the resulting state dict. Args: state_dict (OrderedDict[str, torch.Tensor]): input state dict to filter. name (str): name to filter from state dict keys. Returns: OrderedDict[str, torch.Tensor]: filtered state dict.
Here is the function:
def filter_state_dict(
state_dict: "OrderedDict[str, torch.Tensor]", name: str
) -> "OrderedDict[str, torch.Tensor]":
"""
Filters state dict for keys that start with provided name.
Strips provided name from beginning of key in the resulting state dict.
Args:
state_dict (OrderedDict[str, torch.Tensor]): input state dict to filter.
name (str): name to filter from state dict keys.
Returns:
OrderedDict[str, torch.Tensor]: filtered state dict.
"""
filtered_state_dict = OrderedDict()
for key, value in state_dict.items():
if key.startswith(name + "."):
# + 1 to length is to remove the '.' after the key
filtered_state_dict[key[len(name) + 1 :]] = value
return filtered_state_dict | Filters state dict for keys that start with provided name. Strips provided name from beginning of key in the resulting state dict. Args: state_dict (OrderedDict[str, torch.Tensor]): input state dict to filter. name (str): name to filter from state dict keys. Returns: OrderedDict[str, torch.Tensor]: filtered state dict. |
8,972 | import copy
import logging
from collections import OrderedDict
from itertools import accumulate
from typing import Any, Dict, List, Optional, Set, Type, TypeVar, Union
import torch
from fbgemm_gpu.split_embedding_configs import EmbOptimType
from torch import nn
from torchrec import optim as trec_optim
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.types import (
DataType,
ParameterSharding,
ShardedModule,
ShardingType,
)
from torchrec.modules.embedding_configs import data_type_to_sparse_type
from torchrec.types import CopyMixIn
The provided code snippet includes necessary dependencies for implementing the `add_prefix_to_state_dict` function. Write a Python function `def add_prefix_to_state_dict(state_dict: Dict[str, Any], prefix: str) -> None` to solve the following problem:
Adds prefix to all keys in state dict, in place. Args: state_dict (Dict[str, Any]): input state dict to update. prefix (str): name to filter from state dict keys. Returns: None.
Here is the function:
def add_prefix_to_state_dict(state_dict: Dict[str, Any], prefix: str) -> None:
"""
Adds prefix to all keys in state dict, in place.
Args:
state_dict (Dict[str, Any]): input state dict to update.
prefix (str): name to filter from state dict keys.
Returns:
None.
"""
keys = sorted(state_dict.keys())
for key in keys:
state_dict[prefix + key] = state_dict.pop(key)
if "_metadata" in state_dict:
metadata = state_dict["_metadata"]
for key in list(metadata.keys()):
if len(key) == 0:
continue
metadata[prefix + key] = metadata.pop(key) | Adds prefix to all keys in state dict, in place. Args: state_dict (Dict[str, Any]): input state dict to update. prefix (str): name to filter from state dict keys. Returns: None. |
8,973 | import copy
import logging
from collections import OrderedDict
from itertools import accumulate
from typing import Any, Dict, List, Optional, Set, Type, TypeVar, Union
import torch
from fbgemm_gpu.split_embedding_configs import EmbOptimType
from torch import nn
from torchrec import optim as trec_optim
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.types import (
DataType,
ParameterSharding,
ShardedModule,
ShardingType,
)
from torchrec.modules.embedding_configs import data_type_to_sparse_type
from torchrec.types import CopyMixIn
try:
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:permute_pooled_embedding_ops_cpu"
)
try:
if torch.version.hip:
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:permute_pooled_embedding_ops_gpu_hip"
)
else:
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:permute_pooled_embedding_ops_gpu_cuda"
)
except OSError:
# For backward compatibility
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:permute_pooled_embedding_ops_gpu"
)
except OSError:
pass
def _get_unsharded_module_names_helper(
model: torch.nn.Module,
path: str,
unsharded_module_names: Set[str],
) -> bool:
sharded_children = set()
for name, child in model.named_children():
curr_path = path + name
if isinstance(child, ShardedModule):
sharded_children.add(name)
else:
child_sharded = _get_unsharded_module_names_helper(
child,
curr_path + ".",
unsharded_module_names,
)
if child_sharded:
sharded_children.add(name)
if len(sharded_children) > 0:
for name, _ in model.named_children():
if name not in sharded_children:
unsharded_module_names.add(path + name)
return len(sharded_children) > 0
The provided code snippet includes necessary dependencies for implementing the `get_unsharded_module_names` function. Write a Python function `def get_unsharded_module_names(model: torch.nn.Module) -> List[str]` to solve the following problem:
Retrieves names of top level modules that do not contain any sharded sub-modules. Args: model (torch.nn.Module): model to retrieve unsharded module names from. Returns: List[str]: list of names of modules that don't have sharded sub-modules.
Here is the function:
def get_unsharded_module_names(model: torch.nn.Module) -> List[str]:
"""
Retrieves names of top level modules that do not contain any sharded sub-modules.
Args:
model (torch.nn.Module): model to retrieve unsharded module names from.
Returns:
List[str]: list of names of modules that don't have sharded sub-modules.
"""
unsharded_module_names: Set[str] = set()
_get_unsharded_module_names_helper(
model,
"",
unsharded_module_names,
)
return list(unsharded_module_names) | Retrieves names of top level modules that do not contain any sharded sub-modules. Args: model (torch.nn.Module): model to retrieve unsharded module names from. Returns: List[str]: list of names of modules that don't have sharded sub-modules. |
8,974 | import copy
import logging
from collections import OrderedDict
from itertools import accumulate
from typing import Any, Dict, List, Optional, Set, Type, TypeVar, Union
import torch
from fbgemm_gpu.split_embedding_configs import EmbOptimType
from torch import nn
from torchrec import optim as trec_optim
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.types import (
DataType,
ParameterSharding,
ShardedModule,
ShardingType,
)
from torchrec.modules.embedding_configs import data_type_to_sparse_type
from torchrec.types import CopyMixIn
try:
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:permute_pooled_embedding_ops_cpu"
)
try:
if torch.version.hip:
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:permute_pooled_embedding_ops_gpu_hip"
)
else:
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:permute_pooled_embedding_ops_gpu_cuda"
)
except OSError:
# For backward compatibility
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:permute_pooled_embedding_ops_gpu"
)
except OSError:
pass
class sharded_model_copy:
"""
Allows copying of DistributedModelParallel module to a target device.
Example::
# Copying model to CPU.
m = DistributedModelParallel(m)
with sharded_model_copy("cpu"):
m_cpu = copy.deepcopy(m)
"""
def __init__(self, device: Optional[Union[str, int, torch.device]]) -> None:
self.device = device
def __enter__(self) -> None:
# pyre-ignore [16]
self.t_copy_save_ = torch.Tensor.__deepcopy__
# pyre-ignore [16]
self.p_copy_save_ = torch.nn.Parameter.__deepcopy__
device = self.device
# pyre-ignore [2, 3, 53]
def _tensor_copy(tensor, memo):
if tensor.device != device:
return tensor.detach().to(device)
else:
return tensor.detach().clone()
# pyre-ignore [2, 3]
def _no_copy(obj, memo):
return obj
_copy_or_not = _tensor_copy if self.device is not None else _no_copy
# pyre-ignore [2, 3, 53]
def _param_copy(param, memo):
return torch.nn.Parameter(
_copy_or_not(param, memo), requires_grad=param.requires_grad
)
torch.Tensor.__deepcopy__ = _copy_or_not
torch.nn.Parameter.__deepcopy__ = _param_copy
# pyre-fixme[16]: `Type` has no attribute `__deepcopy__`.
torch._C._distributed_c10d.ProcessGroupNCCL.__deepcopy__ = _no_copy
# pyre-fixme[16]: `Type` has no attribute `__deepcopy__`.
torch._C._distributed_c10d.ProcessGroupGloo.__deepcopy__ = _no_copy
# pyre-fixme[16]: `Type` has no attribute `__deepcopy__`.
torch._C._distributed_c10d.Work.__deepcopy__ = _no_copy
# pyre-ignore [16]
torch.cuda.streams.Stream.__deepcopy__ = _no_copy
# pyre-ignore [2]
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
# pyre-ignore [16]
torch.Tensor.__deepcopy__ = self.t_copy_save_
# pyre-ignore [16]
torch.nn.Parameter.__deepcopy__ = self.p_copy_save_
# pyre-fixme[16]: `Type` has no attribute `__deepcopy__`.
torch._C._distributed_c10d.ProcessGroupNCCL.__deepcopy__ = None
# pyre-fixme[16]: `Type` has no attribute `__deepcopy__`.
torch._C._distributed_c10d.ProcessGroupGloo.__deepcopy__ = None
# pyre-fixme[16]: `Type` has no attribute `__deepcopy__`.
torch._C._distributed_c10d.Work.__deepcopy__ = None
# pyre-ignore [16]
torch.cuda.streams.Stream.__deepcopy__ = None
class CopyMixIn:
def copy(self, device: torch.device) -> nn.Module: ...
def copy_to_device(
module: nn.Module,
current_device: torch.device,
to_device: torch.device,
) -> nn.Module:
with sharded_model_copy(device=None):
copy_module = copy.deepcopy(module)
# Copy only weights with matching device.
def _copy_if_device_match(tensor: torch.Tensor) -> torch.Tensor:
if tensor.device == current_device:
return tensor.to(to_device)
return tensor
# if this is a sharded module, customize the copy
if isinstance(copy_module, CopyMixIn):
return copy_module.copy(to_device)
copied_param = {
name: torch.nn.Parameter(
_copy_if_device_match(param.data), requires_grad=param.requires_grad
)
for name, param in copy_module.named_parameters(recurse=False)
}
copied_buffer = {
name: _copy_if_device_match(buffer)
for name, buffer in copy_module.named_buffers(recurse=False)
}
for name, param in copied_param.items():
copy_module.register_parameter(name, param)
for name, buffer in copied_buffer.items():
copy_module.register_buffer(name, buffer)
for child_name, child in copy_module.named_children():
if not any([isinstance(submodule, CopyMixIn) for submodule in child.modules()]):
child_copy = child._apply(_copy_if_device_match)
else:
child_copy = copy_to_device(child, current_device, to_device)
copy_module.register_module(child_name, child_copy)
return copy_module | null |
8,975 | import itertools
import logging
from typing import Callable, Dict, List, Optional
import torch
import torch.distributed as dist
from torch import nn
from torch.autograd.profiler import record_function
from torchrec.distributed.comm_ops import (
all_gather_base_pooled,
alltoall_pooled,
alltoall_sequence,
reduce_scatter_base_pooled,
reduce_scatter_v_per_feature_pooled,
reduce_scatter_v_pooled,
variable_batch_alltoall_pooled,
)
from torchrec.distributed.embedding_types import KJTList
from torchrec.distributed.types import Awaitable, QuantizedCommCodecs
from torchrec.fx.utils import fx_marker
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
def is_torchdynamo_compiling() -> bool: # type: ignore[misc]
return False | null |
8,976 | import itertools
import logging
from typing import Callable, Dict, List, Optional
import torch
import torch.distributed as dist
from torch import nn
from torch.autograd.profiler import record_function
from torchrec.distributed.comm_ops import (
all_gather_base_pooled,
alltoall_pooled,
alltoall_sequence,
reduce_scatter_base_pooled,
reduce_scatter_v_per_feature_pooled,
reduce_scatter_v_pooled,
variable_batch_alltoall_pooled,
)
from torchrec.distributed.embedding_types import KJTList
from torchrec.distributed.types import Awaitable, QuantizedCommCodecs
from torchrec.fx.utils import fx_marker
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:merge_pooled_embeddings")
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:merge_pooled_embeddings_cpu"
)
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
The provided code snippet includes necessary dependencies for implementing the `_get_recat` function. Write a Python function `def _get_recat( local_split: int, num_splits: int, stagger: int = 1, device: Optional[torch.device] = None, batch_size_per_rank: Optional[List[int]] = None, ) -> Optional[torch.Tensor]` to solve the following problem:
Calculates relevant recat indices required to reorder AlltoAll collective. Args: local_split (int): number of features in local split. num_splits (int): number of splits (typically WORLD_SIZE). stagger (int): secondary reordering, (typically 1, but `WORLD_SIZE/LOCAL_WORLD_SIZE` for TWRW). device (Optional[torch.device]): device on which buffer will be allocated. batch_size_per_rank (Optional[List[int]]): batch size per rank, needed for variable batch size. Returns: Optional[torch.Tensor]: recat tensor, None if local rank is empty. Example:: _recat(2, 4, 1) # [0, 2, 4, 6, 1, 3, 5, 7] _recat(2, 4, 2) # [0, 4, 2, 6, 1, 5, 3, 7] _recat(0, 4, 2) # None
Here is the function:
def _get_recat(
local_split: int,
num_splits: int,
stagger: int = 1,
device: Optional[torch.device] = None,
batch_size_per_rank: Optional[List[int]] = None,
) -> Optional[torch.Tensor]:
"""
Calculates relevant recat indices required to reorder AlltoAll collective.
Args:
local_split (int): number of features in local split.
num_splits (int): number of splits (typically WORLD_SIZE).
stagger (int): secondary reordering, (typically 1, but
`WORLD_SIZE/LOCAL_WORLD_SIZE` for TWRW).
device (Optional[torch.device]): device on which buffer will be allocated.
batch_size_per_rank (Optional[List[int]]): batch size per rank, needed for
variable batch size.
Returns:
Optional[torch.Tensor]: recat tensor, None if local rank is empty.
Example::
_recat(2, 4, 1)
# [0, 2, 4, 6, 1, 3, 5, 7]
_recat(2, 4, 2)
# [0, 4, 2, 6, 1, 5, 3, 7]
_recat(0, 4, 2)
# None
"""
with record_function("## all2all_data:recat_permute_gen ##"):
if local_split == 0:
return None
recat: List[int] = []
feature_order: List[int] = [
x + num_splits // stagger * y
for x in range(num_splits // stagger)
for y in range(stagger)
]
for i in range(local_split):
for j in feature_order: # range(num_splits):
recat.append(i + j * local_split)
# variable batch size
if batch_size_per_rank is not None and any(
bs != batch_size_per_rank[0] for bs in batch_size_per_rank
):
batch_size_per_feature = list(
itertools.chain.from_iterable(
itertools.repeat(x, local_split) for x in batch_size_per_rank
)
)
permuted_batch_size_per_feature = [batch_size_per_feature[r] for r in recat]
input_offset = [0] + list(itertools.accumulate(batch_size_per_feature))
output_offset = [0] + list(
itertools.accumulate(permuted_batch_size_per_feature)
)
recat_tensor = torch.tensor(
recat,
device=device,
dtype=torch.int32,
)
input_offset_tensor = torch.tensor(
input_offset,
device=device,
dtype=torch.int32,
)
output_offset_tensor = torch.tensor(
output_offset,
device=device,
dtype=torch.int32,
)
recat = torch.ops.fbgemm.expand_into_jagged_permute(
recat_tensor,
input_offset_tensor,
output_offset_tensor,
output_offset[-1],
)
return recat
else:
return torch.tensor(recat, device=device, dtype=torch.int32) | Calculates relevant recat indices required to reorder AlltoAll collective. Args: local_split (int): number of features in local split. num_splits (int): number of splits (typically WORLD_SIZE). stagger (int): secondary reordering, (typically 1, but `WORLD_SIZE/LOCAL_WORLD_SIZE` for TWRW). device (Optional[torch.device]): device on which buffer will be allocated. batch_size_per_rank (Optional[List[int]]): batch size per rank, needed for variable batch size. Returns: Optional[torch.Tensor]: recat tensor, None if local rank is empty. Example:: _recat(2, 4, 1) # [0, 2, 4, 6, 1, 3, 5, 7] _recat(2, 4, 2) # [0, 4, 2, 6, 1, 5, 3, 7] _recat(0, 4, 2) # None |
8,977 | import abc
import operator
from dataclasses import dataclass
from enum import Enum, unique
from typing import (
Any,
Callable,
cast,
Dict,
Generic,
Iterator,
List,
Optional,
Type,
TypeVar,
Union,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
BoundsCheckMode,
CacheAlgorithm,
)
from torch.autograd.profiler import record_function
from torchrec.tensor_types import UInt2Tensor, UInt4Tensor
from torchrec.types import DataType, ModuleNoCopyMixin
import torch
import torch.distributed as dist
import torch.fx
from torch import nn
from torch.distributed._shard.sharded_tensor import ( # noqa
Shard,
ShardedTensor,
ShardedTensorMetadata,
TensorProperties,
)
from torch.distributed._shard.sharding_spec import ( # noqa
EnumerableShardingSpec,
ShardingSpec,
ShardMetadata,
)
from torchrec.streamable import Multistreamable
The provided code snippet includes necessary dependencies for implementing the `_tabulate` function. Write a Python function `def _tabulate( table: List[List[Union[str, int]]], headers: Optional[List[str]] = None ) -> str` to solve the following problem:
Format a table as a string. Parameters: table (list of lists or list of tuples): The data to be formatted as a table. headers (list of strings, optional): The column headers for the table. If not provided, the first row of the table will be used as the headers. Returns: str: A string representation of the table.
Here is the function:
def _tabulate(
table: List[List[Union[str, int]]], headers: Optional[List[str]] = None
) -> str:
"""
Format a table as a string.
Parameters:
table (list of lists or list of tuples): The data to be formatted as a table.
headers (list of strings, optional): The column headers for the table. If not provided, the first row of the table will be used as the headers.
Returns:
str: A string representation of the table.
"""
if headers is None:
headers = table[0]
table = table[1:]
headers = cast(List[str], headers)
rows = []
# Determine the maximum width of each column
col_widths = [max([len(str(item)) for item in column]) for column in zip(*table)]
col_widths = [max(i, len(j)) for i, j in zip(col_widths, headers)]
# Format each row of the table
for row in table:
row_str = " | ".join(
[str(item).ljust(width) for item, width in zip(row, col_widths)]
)
rows.append(row_str)
# Add the header row and the separator line
rows.insert(
0,
" | ".join(
[header.center(width) for header, width in zip(headers, col_widths)]
),
)
rows.insert(1, " | ".join(["-" * width for width in col_widths]))
return "\n".join(rows) | Format a table as a string. Parameters: table (list of lists or list of tuples): The data to be formatted as a table. headers (list of strings, optional): The column headers for the table. If not provided, the first row of the table will be used as the headers. Returns: str: A string representation of the table. |
8,978 | import abc
import operator
from dataclasses import dataclass
from enum import Enum, unique
from typing import (
Any,
Callable,
cast,
Dict,
Generic,
Iterator,
List,
Optional,
Type,
TypeVar,
Union,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
BoundsCheckMode,
CacheAlgorithm,
)
from torch.autograd.profiler import record_function
from torchrec.tensor_types import UInt2Tensor, UInt4Tensor
from torchrec.types import DataType, ModuleNoCopyMixin
import torch
import torch.distributed as dist
import torch.fx
from torch import nn
from torch.distributed._shard.sharded_tensor import ( # noqa
Shard,
ShardedTensor,
ShardedTensorMetadata,
TensorProperties,
)
from torch.distributed._shard.sharding_spec import ( # noqa
EnumerableShardingSpec,
ShardingSpec,
ShardMetadata,
)
from torchrec.streamable import Multistreamable
class LazyAwaitable(Awaitable[W], metaclass=_LazyAwaitableMeta):
"""
The LazyAwaitable type which exposes a `wait()` API, concrete types
can control how to initialize and how the `wait()` behavior should
be in order to achieve specific async operation.
This base LazyAwaitable type is a "lazy" async type, which means it will
delay `wait()` as late as possible, see details in `__torch_function__`
below. This could help the model automatically enable computation and
communication overlap, model author doesn't need to manually call
`wait()` if the results is used by a pytorch function, or by other python
operations (NOTE: need to implement corresponding magic methods
like __getattr__ below)
Some caveats:
* This works with Pytorch functions, but not any generic method, if
you would like to do arbitary python operations, you need to
implement the corresponding magic methods
* In the case that one function have two or more arguments are LazyAwaitable,
the lazy wait mechanism can't ensure perfect computation/communication
overlap (i.e. quickly waited the first one but long wait on the second)
"""
def __init__(
self,
) -> None:
super().__init__()
# _result is used to cache the results after the wait() is called.
self._result: Optional[W] = None
# pyre-ignore [2, 3]
def _wait_async(obj: Any) -> Any:
"""
This method is used internally to automatically wait when necessary
and cache the results of the `LazyAwaitable.wait()` call
"""
if isinstance(obj, LazyAwaitable):
if obj._result is None:
obj._result = obj.wait()
return obj._result
else:
return obj
# pyre-ignore [2, 3]
def __torch_function__(self, func, types, args=(), kwargs=None):
"""
The LazyAwaitable type has a `__torch_function__` implementation.
This means when this type is seens as an argument to a PyTorch
function in a position where it expects a W, the PyTorch's
dispatcher will call into this function for special handling
Our `__torch_function__` implementation goes through all of the
args and kwargs and checks if any of them are `LazyAwaitable`.
If it is, it will call `wait()` on it and replace the LazyAwaitable
type object with the result of wait. In this way, async values
are waited on when the concrete value is first needed and without
the user having to write an explicit `wait()` call.
"""
kwargs = kwargs or {}
# wait() on all LazyAwaitable args/kwargs and replace
# them with the resulting value.
new_args = torch.fx.node.map_aggregate(args, LazyAwaitable._wait_async)
new_kwargs = torch.fx.node.map_aggregate(kwargs, LazyAwaitable._wait_async)
return func(*new_args, **new_kwargs)
# pyre-ignore [2, 3]
def __getattr__(self, name):
"""
Overrides __getattr__ to allow LazyAwaitable to first wait and then call getattr
on the wait results.
"""
if name == "_result":
raise RuntimeError(
f"LazyAwaitable type {type(self)} has not been initialized properly, "
f"did you forget to call 'super()'?"
)
res = LazyAwaitable._wait_async(self)
return getattr(res, name)
def scope(method):
def impl(*args, **kwargs):
lhs = args[0]
op_fn = getattr(operator, method)
if len(args) == 1:
return op_fn(LazyAwaitable._wait_async(lhs))
elif len(args) == 2:
rhs = args[1]
return op_fn(
LazyAwaitable._wait_async(lhs), LazyAwaitable._wait_async(rhs)
)
else:
raise RuntimeError(f"magic method {as_magic} not supported!")
impl.__name__ = as_magic
setattr(LazyAwaitable, as_magic, impl) | null |
8,979 | import abc
import operator
from dataclasses import dataclass
from enum import Enum, unique
from typing import (
Any,
Callable,
cast,
Dict,
Generic,
Iterator,
List,
Optional,
Type,
TypeVar,
Union,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
BoundsCheckMode,
CacheAlgorithm,
)
from torch.autograd.profiler import record_function
from torchrec.tensor_types import UInt2Tensor, UInt4Tensor
from torchrec.types import DataType, ModuleNoCopyMixin
import torch
import torch.distributed as dist
import torch.fx
from torch import nn
from torch.distributed._shard.sharded_tensor import ( # noqa
Shard,
ShardedTensor,
ShardedTensorMetadata,
TensorProperties,
)
from torch.distributed._shard.sharding_spec import ( # noqa
EnumerableShardingSpec,
ShardingSpec,
ShardMetadata,
)
from torchrec.streamable import Multistreamable
class LazyAwaitable(Awaitable[W], metaclass=_LazyAwaitableMeta):
"""
The LazyAwaitable type which exposes a `wait()` API, concrete types
can control how to initialize and how the `wait()` behavior should
be in order to achieve specific async operation.
This base LazyAwaitable type is a "lazy" async type, which means it will
delay `wait()` as late as possible, see details in `__torch_function__`
below. This could help the model automatically enable computation and
communication overlap, model author doesn't need to manually call
`wait()` if the results is used by a pytorch function, or by other python
operations (NOTE: need to implement corresponding magic methods
like __getattr__ below)
Some caveats:
* This works with Pytorch functions, but not any generic method, if
you would like to do arbitary python operations, you need to
implement the corresponding magic methods
* In the case that one function have two or more arguments are LazyAwaitable,
the lazy wait mechanism can't ensure perfect computation/communication
overlap (i.e. quickly waited the first one but long wait on the second)
"""
def __init__(
self,
) -> None:
super().__init__()
# _result is used to cache the results after the wait() is called.
self._result: Optional[W] = None
# pyre-ignore [2, 3]
def _wait_async(obj: Any) -> Any:
"""
This method is used internally to automatically wait when necessary
and cache the results of the `LazyAwaitable.wait()` call
"""
if isinstance(obj, LazyAwaitable):
if obj._result is None:
obj._result = obj.wait()
return obj._result
else:
return obj
# pyre-ignore [2, 3]
def __torch_function__(self, func, types, args=(), kwargs=None):
"""
The LazyAwaitable type has a `__torch_function__` implementation.
This means when this type is seens as an argument to a PyTorch
function in a position where it expects a W, the PyTorch's
dispatcher will call into this function for special handling
Our `__torch_function__` implementation goes through all of the
args and kwargs and checks if any of them are `LazyAwaitable`.
If it is, it will call `wait()` on it and replace the LazyAwaitable
type object with the result of wait. In this way, async values
are waited on when the concrete value is first needed and without
the user having to write an explicit `wait()` call.
"""
kwargs = kwargs or {}
# wait() on all LazyAwaitable args/kwargs and replace
# them with the resulting value.
new_args = torch.fx.node.map_aggregate(args, LazyAwaitable._wait_async)
new_kwargs = torch.fx.node.map_aggregate(kwargs, LazyAwaitable._wait_async)
return func(*new_args, **new_kwargs)
# pyre-ignore [2, 3]
def __getattr__(self, name):
"""
Overrides __getattr__ to allow LazyAwaitable to first wait and then call getattr
on the wait results.
"""
if name == "_result":
raise RuntimeError(
f"LazyAwaitable type {type(self)} has not been initialized properly, "
f"did you forget to call 'super()'?"
)
res = LazyAwaitable._wait_async(self)
return getattr(res, name)
def scope(method):
# pyre-ignore [2, 3, 53]
def impl(self, rhs):
op_fn = getattr(operator, method)
return op_fn(
LazyAwaitable._wait_async(rhs), LazyAwaitable._wait_async(self)
)
impl.__name__ = as_magic
impl.__qualname__ = as_magic
setattr(LazyAwaitable, as_magic, impl) | null |
8,980 | import copy
from collections import defaultdict, OrderedDict
from dataclasses import dataclass
from typing import Any, DefaultDict, Dict, Iterator, List, Optional, Type
import torch
import torch.distributed as dist
from torch import nn
from torch.distributed._shard.sharded_tensor import Shard
from torchrec.distributed.comm import get_local_rank
from torchrec.distributed.embedding import EmbeddingCollectionContext
from torchrec.distributed.embedding_sharding import (
EmbeddingSharding,
EmbeddingShardingContext,
EmbeddingShardingInfo,
KJTListSplitsAwaitable,
)
from torchrec.distributed.embedding_types import (
BaseEmbeddingSharder,
GroupedEmbeddingConfig,
KJTList,
)
from torchrec.distributed.sharding.rw_sequence_sharding import (
RwSequenceEmbeddingDist,
RwSequenceEmbeddingSharding,
)
from torchrec.distributed.sharding.rw_sharding import (
BaseRwEmbeddingSharding,
RwSparseFeaturesDist,
)
from torchrec.distributed.sharding.sequence_sharding import SequenceShardingContext
from torchrec.distributed.types import (
Awaitable,
LazyAwaitable,
ParameterSharding,
QuantizedCommCodecs,
ShardedModule,
ShardedTensor,
ShardingEnv,
ShardingType,
ShardMetadata,
)
from torchrec.distributed.utils import append_prefix
from torchrec.modules.mc_modules import (
apply_mc_method_to_jt_dict,
ManagedCollisionCollection,
)
from torchrec.modules.utils import construct_jagged_tensors
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
class EmbeddingSharding(abc.ABC, Generic[C, F, T, W], FeatureShardingMixIn):
"""
Used to implement different sharding types for `EmbeddingBagCollection`, e.g.
table_wise.
"""
def __init__(
self, qcomm_codecs_registry: Optional[Dict[str, QuantizedCommCodecs]] = None
) -> None:
self._qcomm_codecs_registry = qcomm_codecs_registry
def qcomm_codecs_registry(self) -> Optional[Dict[str, QuantizedCommCodecs]]:
return self._qcomm_codecs_registry
def create_input_dist(
self,
device: Optional[torch.device] = None,
) -> BaseSparseFeaturesDist[F]:
pass
def create_output_dist(
self,
device: Optional[torch.device] = None,
) -> BaseEmbeddingDist[C, T, W]:
pass
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup[F, T]:
pass
def embedding_dims(self) -> List[int]:
pass
def embedding_shard_metadata(self) -> List[Optional[ShardMetadata]]:
pass
def embedding_names(self) -> List[str]:
pass
def embedding_names_per_rank(self) -> List[List[str]]:
pass
def embedding_tables(self) -> List[ShardedEmbeddingTable]:
raise NotImplementedError
def uncombined_embedding_dims(self) -> List[int]:
return self.embedding_dims()
def uncombined_embedding_names(self) -> List[str]:
return self.embedding_names()
class EmbeddingShardingInfo:
embedding_config: EmbeddingTableConfig
param_sharding: ParameterSharding
param: torch.Tensor
fused_params: Optional[Dict[str, Any]] = None
class RwSequenceEmbeddingSharding(
BaseRwEmbeddingSharding[
SequenceShardingContext, KeyedJaggedTensor, torch.Tensor, torch.Tensor
]
):
"""
Shards sequence (unpooled) row-wise, i.e.. a given embedding table is evenly
distributed by rows and table slices are placed on all ranks.
"""
def create_input_dist(
self,
device: Optional[torch.device] = None,
) -> BaseSparseFeaturesDist[KeyedJaggedTensor]:
num_features = self._get_num_features()
feature_hash_sizes = self._get_feature_hash_sizes()
return RwSparseFeaturesDist(
# pyre-fixme[6]: For 1st param expected `ProcessGroup` but got
# `Optional[ProcessGroup]`.
pg=self._pg,
num_features=num_features,
feature_hash_sizes=feature_hash_sizes,
device=device if device is not None else self._device,
is_sequence=True,
has_feature_processor=self._has_feature_processor,
need_pos=False,
)
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup:
return GroupedEmbeddingsLookup(
grouped_configs=self._grouped_embedding_configs,
pg=self._pg,
device=device if device is not None else self._device,
)
def create_output_dist(
self,
device: Optional[torch.device] = None,
) -> BaseEmbeddingDist[SequenceShardingContext, torch.Tensor, torch.Tensor]:
return RwSequenceEmbeddingDist(
# pyre-fixme[6]: For 1st param expected `ProcessGroup` but got
# `Optional[ProcessGroup]`.
self._pg,
self._get_num_features(),
device if device is not None else self._device,
qcomm_codecs_registry=self.qcomm_codecs_registry,
)
class SequenceShardingContext(EmbeddingShardingContext):
"""
Stores KJTAllToAll context and reuses it in SequenceEmbeddingsAllToAll.
SequenceEmbeddingsAllToAll has the same comm pattern as KJTAllToAll.
Attributes:
features_before_input_dist (Optional[KeyedJaggedTensor]): stores the original
KJT before input dist.
input_splits(List[int]): stores the input splits of KJT AlltoAll.
output_splits (List[int]): stores the output splits of KJT AlltoAll.
unbucketize_permute_tensor (Optional[torch.Tensor]): stores the permute order of
KJT bucketize (for row-wise sharding only).
lengths_after_input_dist (Optional[torch.Tensor]): stores the KJT length after
input dist.
"""
# Torch Dynamo does not support default_factory=list:
# https://github.com/pytorch/pytorch/issues/120108
# TODO(ivankobzarev): Make this a dataclass once supported
def __init__(
self,
# Fields of EmbeddingShardingContext
batch_size_per_rank: Optional[List[int]] = None,
batch_size_per_rank_per_feature: Optional[List[List[int]]] = None,
batch_size_per_feature_pre_a2a: Optional[List[int]] = None,
variable_batch_per_feature: bool = False,
# Fields of SequenceShardingContext
features_before_input_dist: Optional[KeyedJaggedTensor] = None,
input_splits: Optional[List[int]] = None,
output_splits: Optional[List[int]] = None,
sparse_features_recat: Optional[torch.Tensor] = None,
unbucketize_permute_tensor: Optional[torch.Tensor] = None,
lengths_after_input_dist: Optional[torch.Tensor] = None,
) -> None:
super().__init__(
batch_size_per_rank,
batch_size_per_rank_per_feature,
batch_size_per_feature_pre_a2a,
variable_batch_per_feature,
)
self.features_before_input_dist: Optional[KeyedJaggedTensor] = (
features_before_input_dist
)
self.input_splits: List[int] = input_splits if input_splits is not None else []
self.output_splits: List[int] = (
output_splits if output_splits is not None else []
)
self.sparse_features_recat: Optional[torch.Tensor] = sparse_features_recat
self.unbucketize_permute_tensor: Optional[torch.Tensor] = (
unbucketize_permute_tensor
)
self.lengths_after_input_dist: Optional[torch.Tensor] = lengths_after_input_dist
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
if self.features_before_input_dist is not None:
self.features_before_input_dist.record_stream(stream)
if self.sparse_features_recat is not None:
self.sparse_features_recat.record_stream(stream)
if self.unbucketize_permute_tensor is not None:
self.unbucketize_permute_tensor.record_stream(stream)
if self.lengths_after_input_dist is not None:
self.lengths_after_input_dist.record_stream(stream)
class ShardingType(Enum):
"""
Well-known sharding types, used by inter-module optimizations.
"""
# Replicated on all ranks
DATA_PARALLEL = "data_parallel"
# Placed on a single rank
TABLE_WISE = "table_wise"
# Placed on multiple ranks as different sharded tables
COLUMN_WISE = "column_wise"
# Range-split on the first dimension across all ranks
ROW_WISE = "row_wise"
# Row-wise on the same node and table-wise across nodes
# Useful when having multiple ranks per node
# and comms within a single node are more efficient than across nodes.
TABLE_ROW_WISE = "table_row_wise"
# Column-wise on the same node and table-wise across nodes
TABLE_COLUMN_WISE = "table_column_wise"
class ShardingEnv:
"""
Provides an abstraction over `torch.distributed.ProcessGroup`, which practically
enables `DistributedModelParallel` to be used during inference.
"""
def __init__(
self,
world_size: int,
rank: int,
pg: Optional[dist.ProcessGroup] = None,
) -> None:
self.world_size = world_size
self.rank = rank
self.process_group: Optional[dist.ProcessGroup] = pg
def from_process_group(cls, pg: dist.ProcessGroup) -> "ShardingEnv":
"""
Creates ProcessGroup-based sharding environment.
NOTE:
Typically used during training.
"""
return cls(dist.get_world_size(pg), dist.get_rank(pg), pg)
def from_local(cls, world_size: int, rank: int) -> "ShardingEnv":
"""
Creates a local host-based sharding environment.
NOTE:
Typically used during single host inference.
"""
return cls(world_size, rank, None)
# pyre-ignore
class KeyedJaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""Represents an (optionally weighted) keyed jagged tensor.
A `KeyedJaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. Keyed on first dimension and jagged on the last
dimension.
Implementation is torch.jit.script-able.
Args:
keys (List[str]): keys to the jagged Tensor.
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if the values have weights. Tensor with the
same shape as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
stride (Optional[int]): number of examples per batch.
stride_per_key_per_rank (Optional[List[List[int]]]): batch size
(number of examples) per key per rank, with the outer list representing the
keys and the inner list representing the values.
Each value in the inner list represents the number of examples in the batch
from the rank of its index in a distributed context.
length_per_key (Optional[List[int]]): start length for each key.
offset_per_key (Optional[List[int]]): start offset for each key and final
offset.
index_per_key (Optional[Dict[str, int]]): index for each key.
jt_dict (Optional[Dict[str, JaggedTensor]]):
inverse_indices (Optional[Tuple[List[str], torch.Tensor]]): inverse indices to
expand deduplicated embedding output for variable stride per key.
Example::
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
dim_0: keyed dimension (ie. `Feature0`, `Feature1`)
dim_1: optional second dimension (ie. batch size)
dim_2: The jagged dimension which has slice lengths between 0-3 in the above example
# We represent this data with following inputs:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
"""
# This is the subset of fields on KJT which are required (all other fields
# can be derived from these fields, and are only cached)
_fields = [
"_values",
"_weights",
"_lengths",
"_offsets",
]
def __init__(
self,
keys: List[str],
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
# Below exposed to ensure torch.script-able
length_per_key: Optional[List[int]] = None,
offset_per_key: Optional[List[int]] = None,
index_per_key: Optional[Dict[str, int]] = None,
jt_dict: Optional[Dict[str, JaggedTensor]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> None:
self._keys: List[str] = keys
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
self._stride_per_key_per_rank: List[List[int]] = []
self._stride_per_key: List[int] = []
self._variable_stride_per_key: bool = False
self._stride: int = -1
if stride_per_key_per_rank is not None:
if stride is not None:
raise ValueError(
"Cannot initialize KJT with both `stride` and `stride_per_key_per_rank`"
)
self._stride_per_key_per_rank = stride_per_key_per_rank
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
self._variable_stride_per_key = True
if not stride_per_key_per_rank:
self._stride = 0
elif all(s == self.stride_per_key()[0] for s in self.stride_per_key()):
self._stride = self.stride_per_key()[0]
else:
if torch.jit.is_tracing():
stride = _maybe_compute_stride_kjt_scripted(
keys, stride, lengths, offsets
)[0]
else:
stride = _maybe_compute_stride_kjt(keys, stride, lengths, offsets)
self._stride = stride
self._stride_per_key_per_rank = [[stride]] * len(self._keys)
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
# lazy fields
self._length_per_key: Optional[List[int]] = length_per_key
self._offset_per_key: Optional[List[int]] = offset_per_key
self._index_per_key: Optional[Dict[str, int]] = index_per_key
self._jt_dict: Optional[Dict[str, JaggedTensor]] = jt_dict
self._inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = (
inverse_indices
)
self._lengths_offset_per_key: List[int] = []
def from_offsets_sync(
keys: List[str],
values: torch.Tensor,
offsets: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
offsets=offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def from_lengths_sync(
keys: List[str],
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def concat(
kjt_list: List["KeyedJaggedTensor"],
) -> "KeyedJaggedTensor":
if len(kjt_list) == 0:
raise ValueError("Can't concat empty KJT list")
is_weighted: bool = kjt_list[0].weights_or_none() is not None
has_length_per_key: bool = True
length_per_key: List[int] = []
keys: List[str] = []
value_list: List[torch.Tensor] = []
weight_list: List[torch.Tensor] = []
length_list: List[torch.Tensor] = []
stride_per_key_per_rank: List[List[int]] = []
stride: Optional[int] = None
variable_stride_per_key_list = [
kjt.variable_stride_per_key() for kjt in kjt_list
]
assert all(variable_stride_per_key_list) or not any(
variable_stride_per_key_list
), "variable stride per key must be consistent for all KJTs"
variable_stride_per_key = all(variable_stride_per_key_list)
for kjt in kjt_list:
curr_is_weighted: bool = kjt.weights_or_none() is not None
if is_weighted != curr_is_weighted:
raise ValueError("Can't merge weighted KJT with unweighted KJT")
_length_per_key: Optional[List[int]] = None
if kjt._length_per_key is None:
has_length_per_key = False
else:
_length_per_key = kjt._length_per_key
if has_length_per_key and _length_per_key is not None:
length_per_key += _length_per_key
keys += kjt.keys()
value_list.append(kjt.values())
if is_weighted:
weight_list.append(kjt.weights())
length_list.append(kjt.lengths())
if variable_stride_per_key:
stride_per_key_per_rank += kjt.stride_per_key_per_rank()
elif stride is None:
stride = kjt.stride()
else:
assert stride == kjt.stride(), "strides must be consistent for all KJTs"
return KeyedJaggedTensor(
keys=keys,
values=torch.cat(value_list, dim=0),
weights=torch.cat(weight_list, dim=0) if is_weighted else None,
lengths=torch.cat(length_list, dim=0),
stride=stride,
stride_per_key_per_rank=(
stride_per_key_per_rank if variable_stride_per_key else None
),
length_per_key=length_per_key if has_length_per_key else None,
)
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "KeyedJaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return KeyedJaggedTensor(
keys=torch.jit.annotate(List[str], []),
values=torch.empty(0, dtype=values_dtype, device=device),
weights=weights,
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
stride=0,
)
def empty_like(kjt: "KeyedJaggedTensor") -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, kjt.stride_per_key_per_rank())
if kjt.variable_stride_per_key()
else (kjt.stride(), None)
)
return KeyedJaggedTensor(
keys=[],
values=torch.empty(0, device=kjt.device(), dtype=kjt.values().dtype),
weights=(
None
if kjt.weights_or_none() is None
else torch.empty(0, device=kjt.device(), dtype=kjt.weights().dtype)
),
lengths=torch.empty(0, device=kjt.device(), dtype=kjt.lengths().dtype),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
)
def from_jt_dict(jt_dict: Dict[str, JaggedTensor]) -> "KeyedJaggedTensor":
"""
Constructs a KeyedJaggedTensor from a Dict[str, JaggedTensor],
but this function will ONLY work if the JaggedTensors all
have the same "implicit" batch_size dimension.
Basically, we can visualize JaggedTensors as 2-D tensors
of the format of [batch_size x variable_feature_dim].
In case, we have some batch without a feature value,
the input JaggedTensor could just not include any values.
But KeyedJaggedTensor (by default) typically pad "None"
so that all the JaggedTensors stored in the KeyedJaggedTensor
have the same batch_size dimension. That is, in the case,
the JaggedTensor input didn't automatically pad
for the empty batches, this function would error / not work.
Consider the visualization of the following KeyedJaggedTensor:
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
Notice that the inputs for this KeyedJaggedTensor would have looked like:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
Now if the input jt_dict = {
# "Feature0" [V0,V1] [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
} and the "None" is left out from each JaggedTensor,
then this function would fail as we would not correctly
be able to pad "None" as it does not technically know
the correct batch / place to pad within the JaggedTensor.
Essentially, the lengths Tensor inferred by this function
would be [2, 1, 1, 1, 3] indicating variable batch_size
dim_1 violates the existing assumption / precondition
that KeyedJaggedTensor's should have fixed batch_size dimension.
"""
kjt_keys = list(jt_dict.keys())
kjt_vals_list: List[torch.Tensor] = []
kjt_lens_list: List[torch.Tensor] = []
kjt_weights_list: List[torch.Tensor] = []
stride_per_key: List[int] = []
for jt in jt_dict.values():
stride_per_key.append(len(jt.lengths()))
kjt_vals_list.append(jt.values())
kjt_lens_list.append(jt.lengths())
weight = jt.weights_or_none()
if weight is not None:
kjt_weights_list.append(weight)
kjt_vals = torch.concat(kjt_vals_list)
kjt_lens = torch.concat(kjt_lens_list)
kjt_weights = (
torch.concat(kjt_weights_list) if len(kjt_weights_list) > 0 else None
)
kjt_stride, kjt_stride_per_key_per_rank = (
(stride_per_key[0], None)
if all(s == stride_per_key[0] for s in stride_per_key)
else (None, [[stride] for stride in stride_per_key])
)
kjt = KeyedJaggedTensor(
keys=kjt_keys,
values=kjt_vals,
weights=kjt_weights,
lengths=kjt_lens,
stride=kjt_stride,
stride_per_key_per_rank=kjt_stride_per_key_per_rank,
).sync()
return kjt
def sync(self) -> "KeyedJaggedTensor":
self.length_per_key()
self.offset_per_key()
return self
def unsync(self) -> "KeyedJaggedTensor":
self._length_per_key = None
self._offset_per_key = None
return self
def device(self) -> torch.device:
return self._values.device
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def keys(self) -> List[str]:
return self._keys
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def stride(self) -> int:
return self._stride
def stride_per_key(self) -> List[int]:
return self._stride_per_key
def stride_per_key_per_rank(self) -> List[List[int]]:
return self._stride_per_key_per_rank
def variable_stride_per_key(self) -> bool:
return self._variable_stride_per_key
def inverse_indices(self) -> Tuple[List[str], torch.Tensor]:
return _get_inverse_indices_or_throw(self._inverse_indices)
def inverse_indices_or_none(self) -> Optional[Tuple[List[str], torch.Tensor]]:
return self._inverse_indices
def _key_indices(self) -> Dict[str, int]:
_index_per_key: Dict[str, int] = _maybe_compute_index_per_key(
self._keys,
self._index_per_key,
)
self._index_per_key = _index_per_key
return _index_per_key
def length_per_key(self) -> List[int]:
_length_per_key = _maybe_compute_length_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
return _length_per_key
def length_per_key_or_none(self) -> Optional[List[int]]:
return self._length_per_key
def offset_per_key(self) -> List[int]:
_length_per_key, _offset_per_key = _maybe_compute_offset_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
self._offset_per_key = _offset_per_key
return _offset_per_key
def offset_per_key_or_none(self) -> Optional[List[int]]:
return self._offset_per_key
def lengths_offset_per_key(self) -> List[int]:
if not self._lengths_offset_per_key:
self._lengths_offset_per_key = _cumsum(self.stride_per_key())
return self._lengths_offset_per_key
def split(self, segments: List[int]) -> List["KeyedJaggedTensor"]:
split_list: List[KeyedJaggedTensor] = []
start = 0
start_offset = 0
_length_per_key = self.length_per_key()
_offset_per_key = self.offset_per_key()
for segment in segments:
end = start + segment
end_offset = _offset_per_key[end]
keys: List[str] = self._keys[start:end]
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank()[start:end])
if self.variable_stride_per_key()
else (self._stride, None)
)
if segment == len(self._keys):
# no torch slicing required
split_list.append(
KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self.weights_or_none(),
lengths=self._lengths,
offsets=self._offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=self._jt_dict,
inverse_indices=None,
)
)
elif segment == 0:
empty_int_list: List[int] = torch.jit.annotate(List[int], [])
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=torch.tensor(
empty_int_list,
device=self.device(),
dtype=self._values.dtype,
),
weights=(
None
if self.weights_or_none() is None
else torch.tensor(
empty_int_list,
device=self.device(),
dtype=self.weights().dtype,
)
),
lengths=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
offsets=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
else:
split_length_per_key = _length_per_key[start:end]
if not torch.jit.is_scripting() and is_torchdynamo_compiling():
# Checks for dynamo dynamic shapes tracing
torch._check_is_size(start_offset)
torch._check_is_size(end_offset)
torch._check_is_size(end_offset - start_offset)
torch._check(start_offset <= self._values.size(0))
torch._check(end_offset <= self._values.size(0))
torch._check(end_offset >= start_offset)
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[
start
] : self.lengths_offset_per_key()[end]
],
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=split_length_per_key,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
start = end
start_offset = end_offset
return split_list
def permute(
self,
indices: List[int],
indices_tensor: Optional[torch.Tensor] = None,
include_inverse_indices: bool = False,
) -> "KeyedJaggedTensor":
if indices_tensor is None:
indices_tensor = torch.tensor(
indices, dtype=torch.int, device=self.device()
)
length_per_key = self.length_per_key()
permuted_keys: List[str] = []
permuted_stride_per_key_per_rank: List[List[int]] = []
permuted_length_per_key: List[int] = []
permuted_lengths_sum = 0
for index in indices:
key = self.keys()[index]
permuted_keys.append(key)
permuted_stride_per_key_per_rank.append(
self.stride_per_key_per_rank()[index]
)
permuted_length_per_key.append(length_per_key[index])
permuted_lengths_sum += length_per_key[index]
if self.variable_stride_per_key():
length_per_key_tensor = _pin_and_move(
torch.tensor(self.length_per_key()), self.device()
)
stride_per_key_tensor = _pin_and_move(
torch.tensor(self.stride_per_key()), self.device()
)
permuted_lengths, _ = _permute_tensor_by_segments(
self.lengths(),
stride_per_key_tensor,
indices_tensor,
None,
)
permuted_values, permuted_weights = _permute_tensor_by_segments(
self.values(),
length_per_key_tensor,
indices_tensor,
self.weights_or_none(),
)
else:
(
permuted_lengths,
permuted_values,
permuted_weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
indices_tensor,
self.lengths().view(len(self._keys), -1),
self.values(),
self.weights_or_none(),
permuted_lengths_sum,
)
stride, optional_permuted_stride_per_key_per_rank = (
(None, permuted_stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
kjt = KeyedJaggedTensor(
keys=permuted_keys,
values=permuted_values,
weights=permuted_weights,
lengths=permuted_lengths.view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=optional_permuted_stride_per_key_per_rank,
length_per_key=permuted_length_per_key if len(permuted_keys) > 0 else None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=(
self.inverse_indices_or_none() if include_inverse_indices else None
),
)
return kjt
def flatten_lengths(self) -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank())
if self.variable_stride_per_key()
else (self._stride, None)
)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self._weights,
lengths=self.lengths().view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self.length_per_key(),
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
def __getitem__(self, key: str) -> JaggedTensor:
offset_per_key = self.offset_per_key()
index = self._key_indices()[key]
start_offset = offset_per_key[index]
end_offset = (
offset_per_key[index + 1]
if index + 1 < len(offset_per_key)
else start_offset
)
return JaggedTensor(
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[index] : self.lengths_offset_per_key()[
index + 1
]
],
offsets=None,
)
def to_dict(self) -> Dict[str, JaggedTensor]:
_jt_dict = _maybe_compute_kjt_to_jt_dict(
stride=self.stride(),
stride_per_key=self.stride_per_key(),
keys=self.keys(),
length_per_key=self.length_per_key(),
lengths=self.lengths(),
values=self.values(),
variable_stride_per_key=self.variable_stride_per_key(),
weights=self.weights_or_none(),
jt_dict=self._jt_dict,
)
self._jt_dict = _jt_dict
return _jt_dict
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def to(
self,
device: torch.device,
non_blocking: bool = False,
dtype: Optional[torch.dtype] = None,
) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
length_per_key = self._length_per_key
offset_per_key = self._offset_per_key
index_per_key = self._index_per_key
jt_dict = self._jt_dict
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (
inverse_indices[0],
inverse_indices[1].to(device, non_blocking=non_blocking),
)
if weights is not None:
if dtype is not None:
weights = weights.to(
dtype=dtype, device=device, non_blocking=non_blocking
)
else:
weights = weights.to(device=device, non_blocking=non_blocking)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.to(device, non_blocking=non_blocking),
weights=weights,
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=length_per_key,
offset_per_key=offset_per_key,
index_per_key=index_per_key,
jt_dict=jt_dict,
inverse_indices=inverse_indices,
)
def __str__(self) -> str:
if len(self._keys) == 0 or self._offsets is None and self._lengths is None:
return "KeyedJaggedTensor()\n"
offsets = self.offsets()
return (
"KeyedJaggedTensor({\n"
+ ",\n".join(
[
" "
+ _jagged_tensor_string(
self._keys[index],
self._values,
self._weights,
offsets,
sum(self.stride_per_key()[:index]),
sum(self.stride_per_key()[: index + 1]),
)
for index in range(len(self._keys))
]
)
+ "\n})\n"
)
def pin_memory(self) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (inverse_indices[0], inverse_indices[1].pin_memory())
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.pin_memory(),
weights=weights.pin_memory() if weights is not None else None,
lengths=lengths.pin_memory() if lengths is not None else None,
offsets=offsets.pin_memory() if offsets is not None else None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=None,
inverse_indices=inverse_indices,
)
def dist_labels(self) -> List[str]:
labels = ["lengths", "values"]
if self.variable_stride_per_key():
labels.append("strides")
if self.weights_or_none() is not None:
labels.append("weights")
return labels
def dist_splits(self, key_splits: List[int]) -> List[List[int]]:
batch_size_per_split = _sum_by_splits(self.stride_per_key(), key_splits)
length_per_split = _sum_by_splits(self.length_per_key(), key_splits)
splits = [batch_size_per_split, length_per_split]
if self.variable_stride_per_key():
splits.append(key_splits)
if self.weights_or_none() is not None:
splits.append(length_per_split)
return splits
def dist_tensors(self) -> List[torch.Tensor]:
tensors = [self.lengths(), self.values()]
if self.variable_stride_per_key():
strides = _pin_and_move(torch.tensor(self.stride_per_key()), self.device())
tensors.append(strides)
if self.weights_or_none() is not None:
tensors.append(self.weights())
return tensors
def dist_init(
keys: List[str],
tensors: List[torch.Tensor],
variable_stride_per_key: bool,
num_workers: int,
recat: Optional[torch.Tensor],
stride_per_rank: Optional[List[int]],
stagger: int = 1,
) -> "KeyedJaggedTensor":
assert len(tensors) in [2, 3, 4]
lengths = tensors[0]
values = tensors[1]
stride_per_rank_per_key = tensors[2] if variable_stride_per_key else None
weights = (
tensors[-1]
if (variable_stride_per_key and len(tensors) == 4)
or (not variable_stride_per_key and len(tensors) == 3)
else None
)
if variable_stride_per_key:
assert stride_per_rank_per_key is not None
stride_per_key_per_rank: List[List[int]] = stride_per_rank_per_key.view(
num_workers, len(keys)
).T.tolist()
strides_cumsum: List[int] = torch.ops.fbgemm.asynchronous_complete_cumsum(
stride_per_rank_per_key
).tolist()
cumsum_lengths = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
length_per_key = (
cumsum_lengths[strides_cumsum[1:]] - cumsum_lengths[strides_cumsum[:-1]]
)
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
lengths, _ = _permute_tensor_by_segments(
lengths,
stride_per_rank_per_key,
recat,
None,
)
values, weights = _permute_tensor_by_segments(
values,
length_per_key,
recat,
weights,
)
if not stride_per_key_per_rank:
stride_per_key_per_rank = [[0]] * len(keys)
if stagger > 1:
stride_per_key_per_rank_stagger: List[List[int]] = []
local_world_size = num_workers // stagger
for i in range(len(keys)):
stride_per_rank_stagger: List[int] = []
for j in range(local_world_size):
stride_per_rank_stagger.extend(
stride_per_key_per_rank[i][j::local_world_size]
)
stride_per_key_per_rank_stagger.append(stride_per_rank_stagger)
stride_per_key_per_rank = stride_per_key_per_rank_stagger
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride_per_key_per_rank=stride_per_key_per_rank,
)
return kjt.sync()
else:
assert stride_per_rank is not None
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
stride = stride_per_rank[0]
if all(s == stride for s in stride_per_rank):
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
recat,
lengths.view(-1, stride),
values,
weights,
values.numel(),
)
lengths = lengths.view(-1)
else: # variable batch size per rank
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_1D_sparse_data(
recat,
lengths.view(-1),
values,
weights,
values.numel(),
)
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=sum(stride_per_rank),
)
return kjt.sync()
def create_mc_sharding(
sharding_type: str,
sharding_infos: List[EmbeddingShardingInfo],
env: ShardingEnv,
device: Optional[torch.device] = None,
) -> EmbeddingSharding[
SequenceShardingContext, KeyedJaggedTensor, torch.Tensor, torch.Tensor
]:
if sharding_type == ShardingType.ROW_WISE.value:
return RwSequenceEmbeddingSharding(
sharding_infos=sharding_infos,
env=env,
device=device,
)
else:
raise ValueError(f"Sharding not supported {sharding_type}") | null |
8,981 | from collections import defaultdict, deque
from dataclasses import dataclass
from typing import Any, cast, Dict, List, Optional, Tuple, Type
import torch
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from torch import nn
from torchrec.distributed.embedding import (
create_sharding_infos_by_sharding,
EmbeddingShardingInfo,
)
from torchrec.distributed.embedding_sharding import EmbeddingSharding
from torchrec.distributed.embedding_types import (
BaseQuantEmbeddingSharder,
FeatureShardingMixIn,
GroupedEmbeddingConfig,
KJTList,
ListOfKJTList,
ShardingType,
)
from torchrec.distributed.fused_params import (
FUSED_PARAM_QUANT_STATE_DICT_SPLIT_SCALE_BIAS,
FUSED_PARAM_REGISTER_TBE_BOOL,
get_tbes_to_register_from_iterable,
is_fused_param_quant_state_dict_split_scale_bias,
is_fused_param_register_tbe,
)
from torchrec.distributed.quant_state import ShardedQuantEmbeddingModuleState
from torchrec.distributed.sharding.cw_sequence_sharding import (
InferCwSequenceEmbeddingSharding,
)
from torchrec.distributed.sharding.rw_sequence_sharding import (
InferRwSequenceEmbeddingSharding,
)
from torchrec.distributed.sharding.rw_sharding import InferRwSparseFeaturesDist
from torchrec.distributed.sharding.sequence_sharding import InferSequenceShardingContext
from torchrec.distributed.sharding.tw_sequence_sharding import (
InferTwSequenceEmbeddingSharding,
)
from torchrec.distributed.types import ParameterSharding, ShardingEnv, ShardMetadata
from torchrec.modules.embedding_configs import (
data_type_to_sparse_type,
dtype_to_data_type,
EmbeddingConfig,
)
from torchrec.quant.embedding_modules import (
EmbeddingCollection as QuantEmbeddingCollection,
MODULE_ATTR_QUANT_STATE_DICT_SPLIT_SCALE_BIAS,
)
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
from torchrec.streamable import Multistreamable
torch.fx.wrap("len")
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
class EmbeddingSharding(abc.ABC, Generic[C, F, T, W], FeatureShardingMixIn):
"""
Used to implement different sharding types for `EmbeddingBagCollection`, e.g.
table_wise.
"""
def __init__(
self, qcomm_codecs_registry: Optional[Dict[str, QuantizedCommCodecs]] = None
) -> None:
self._qcomm_codecs_registry = qcomm_codecs_registry
def qcomm_codecs_registry(self) -> Optional[Dict[str, QuantizedCommCodecs]]:
return self._qcomm_codecs_registry
def create_input_dist(
self,
device: Optional[torch.device] = None,
) -> BaseSparseFeaturesDist[F]:
pass
def create_output_dist(
self,
device: Optional[torch.device] = None,
) -> BaseEmbeddingDist[C, T, W]:
pass
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup[F, T]:
pass
def embedding_dims(self) -> List[int]:
pass
def embedding_shard_metadata(self) -> List[Optional[ShardMetadata]]:
pass
def embedding_names(self) -> List[str]:
pass
def embedding_names_per_rank(self) -> List[List[str]]:
pass
def embedding_tables(self) -> List[ShardedEmbeddingTable]:
raise NotImplementedError
def uncombined_embedding_dims(self) -> List[int]:
return self.embedding_dims()
def uncombined_embedding_names(self) -> List[str]:
return self.embedding_names()
class KJTList(Multistreamable):
def __init__(self, features: List[KeyedJaggedTensor]) -> None:
self.features = features
def __len__(self) -> int:
return len(self.features)
def __setitem__(self, key: int, item: KeyedJaggedTensor) -> None:
self.features[key] = item
def __getitem__(self, key: int) -> KeyedJaggedTensor:
return self.features[key]
def __iter__(self) -> Iterator[KeyedJaggedTensor]:
return iter(self.features)
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
for feature in self.features:
feature.record_stream(stream)
def __fx_create_arg__(self, tracer: torch.fx.Tracer) -> fx.node.Argument:
return tracer.create_node(
"call_function",
KJTList,
args=(tracer.create_arg(self.features),),
kwargs={},
)
class InferCwSequenceEmbeddingSharding(
BaseCwEmbeddingSharding[
InferSequenceShardingContext, KJTList, List[torch.Tensor], List[torch.Tensor]
]
):
def create_input_dist(
self, device: Optional[torch.device] = None
) -> BaseSparseFeaturesDist[KJTList]:
return InferTwSparseFeaturesDist(
features_per_rank=self.features_per_rank(),
world_size=self._world_size,
device=device if device is not None else self._device,
)
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup[KJTList, List[torch.Tensor]]:
return InferGroupedEmbeddingsLookup(
grouped_configs_per_rank=self._grouped_embedding_configs_per_rank,
world_size=self._world_size,
fused_params=fused_params,
device=device if device is not None else self._device,
)
def create_output_dist(
self, device: Optional[torch.device] = None
) -> BaseEmbeddingDist[
InferSequenceShardingContext, List[torch.Tensor], List[torch.Tensor]
]:
device = device if device is not None else self._device
assert device is not None
dist_out = InferCwSequenceEmbeddingDist(
device,
self._world_size,
)
return dist_out
class InferRwSequenceEmbeddingSharding(
BaseRwEmbeddingSharding[
InferSequenceShardingContext, KJTList, List[torch.Tensor], List[torch.Tensor]
]
):
"""
Shards sequence (unpooled) row-wise, i.e.. a given embedding table is evenly
distributed by rows and table slices are placed on all ranks for inference.
"""
def create_input_dist(
self,
device: Optional[torch.device] = None,
) -> BaseSparseFeaturesDist[KJTList]:
num_features = self._get_num_features()
feature_hash_sizes = self._get_feature_hash_sizes()
return InferRwSparseFeaturesDist(
world_size=self._world_size,
num_features=num_features,
feature_hash_sizes=feature_hash_sizes,
device=device if device is not None else self._device,
is_sequence=True,
has_feature_processor=self._has_feature_processor,
need_pos=False,
)
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup[KJTList, List[torch.Tensor]]:
return InferGroupedEmbeddingsLookup(
grouped_configs_per_rank=self._grouped_embedding_configs_per_rank,
world_size=self._world_size,
fused_params=fused_params,
device=device if device is not None else self._device,
)
def create_output_dist(
self,
device: Optional[torch.device] = None,
) -> BaseEmbeddingDist[
InferSequenceShardingContext, List[torch.Tensor], List[torch.Tensor]
]:
return InferRwSequenceEmbeddingDist(
device if device is not None else self._device,
self._world_size,
)
class InferSequenceShardingContext(Multistreamable):
"""
Stores inference context and reuses it in sequence embedding output_dist or result return.
Attributes:
features KJTList: stores the shards of KJT after input dist.
features_before_input_dist KJT: stores the original input KJT (before input dist).
unbucketize_permute_tensor Optional[torch.Tensor]: stores unbucketize tensor, only for RowWise sharding.
"""
features: KJTList
features_before_input_dist: Optional[KeyedJaggedTensor] = None
unbucketize_permute_tensor: Optional[torch.Tensor] = None
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
for feature in self.features:
feature.record_stream(stream)
if self.features_before_input_dist is not None:
self.features_before_input_dist.record_stream(stream)
if self.unbucketize_permute_tensor is not None:
self.unbucketize_permute_tensor.record_stream(stream)
class InferTwSequenceEmbeddingSharding(
BaseTwEmbeddingSharding[
InferSequenceShardingContext,
KJTList,
List[torch.Tensor],
List[torch.Tensor],
]
):
"""
Shards sequence (unpooled) embedding table-wise, i.e.. a given embedding table is
placed entirely on a selected rank, for inference.
"""
def create_input_dist(
self, device: Optional[torch.device] = None
) -> BaseSparseFeaturesDist[KJTList]:
return InferTwSparseFeaturesDist(
features_per_rank=self.features_per_rank(),
world_size=self._world_size,
device=device,
)
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup[KJTList, List[torch.Tensor]]:
return InferGroupedEmbeddingsLookup(
grouped_configs_per_rank=self._grouped_embedding_configs_per_rank,
world_size=self._world_size,
fused_params=fused_params,
device=device,
)
def create_output_dist(
self,
device: Optional[torch.device] = None,
) -> BaseEmbeddingDist[
InferSequenceShardingContext, List[torch.Tensor], List[torch.Tensor]
]:
device = device if device is not None else self._device
return InferTwSequenceEmbeddingDist(
# pyre-fixme [6]
device,
self._world_size,
)
class ShardingEnv:
"""
Provides an abstraction over `torch.distributed.ProcessGroup`, which practically
enables `DistributedModelParallel` to be used during inference.
"""
def __init__(
self,
world_size: int,
rank: int,
pg: Optional[dist.ProcessGroup] = None,
) -> None:
self.world_size = world_size
self.rank = rank
self.process_group: Optional[dist.ProcessGroup] = pg
def from_process_group(cls, pg: dist.ProcessGroup) -> "ShardingEnv":
"""
Creates ProcessGroup-based sharding environment.
NOTE:
Typically used during training.
"""
return cls(dist.get_world_size(pg), dist.get_rank(pg), pg)
def from_local(cls, world_size: int, rank: int) -> "ShardingEnv":
"""
Creates a local host-based sharding environment.
NOTE:
Typically used during single host inference.
"""
return cls(world_size, rank, None)
def create_infer_embedding_sharding(
sharding_type: str,
sharding_infos: List[EmbeddingShardingInfo],
env: ShardingEnv,
device: Optional[torch.device] = None,
) -> EmbeddingSharding[
InferSequenceShardingContext,
KJTList,
List[torch.Tensor],
List[torch.Tensor],
]:
if sharding_type == ShardingType.TABLE_WISE.value:
return InferTwSequenceEmbeddingSharding(sharding_infos, env, device)
elif sharding_type == ShardingType.COLUMN_WISE.value:
return InferCwSequenceEmbeddingSharding(sharding_infos, env, device)
elif sharding_type == ShardingType.ROW_WISE.value:
return InferRwSequenceEmbeddingSharding(sharding_infos, env, device)
else:
raise ValueError(f"Sharding type not supported {sharding_type}") | null |
8,982 | from collections import defaultdict, deque
from dataclasses import dataclass
from typing import Any, cast, Dict, List, Optional, Tuple, Type
import torch
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from torch import nn
from torchrec.distributed.embedding import (
create_sharding_infos_by_sharding,
EmbeddingShardingInfo,
)
from torchrec.distributed.embedding_sharding import EmbeddingSharding
from torchrec.distributed.embedding_types import (
BaseQuantEmbeddingSharder,
FeatureShardingMixIn,
GroupedEmbeddingConfig,
KJTList,
ListOfKJTList,
ShardingType,
)
from torchrec.distributed.fused_params import (
FUSED_PARAM_QUANT_STATE_DICT_SPLIT_SCALE_BIAS,
FUSED_PARAM_REGISTER_TBE_BOOL,
get_tbes_to_register_from_iterable,
is_fused_param_quant_state_dict_split_scale_bias,
is_fused_param_register_tbe,
)
from torchrec.distributed.quant_state import ShardedQuantEmbeddingModuleState
from torchrec.distributed.sharding.cw_sequence_sharding import (
InferCwSequenceEmbeddingSharding,
)
from torchrec.distributed.sharding.rw_sequence_sharding import (
InferRwSequenceEmbeddingSharding,
)
from torchrec.distributed.sharding.rw_sharding import InferRwSparseFeaturesDist
from torchrec.distributed.sharding.sequence_sharding import InferSequenceShardingContext
from torchrec.distributed.sharding.tw_sequence_sharding import (
InferTwSequenceEmbeddingSharding,
)
from torchrec.distributed.types import ParameterSharding, ShardingEnv, ShardMetadata
from torchrec.modules.embedding_configs import (
data_type_to_sparse_type,
dtype_to_data_type,
EmbeddingConfig,
)
from torchrec.quant.embedding_modules import (
EmbeddingCollection as QuantEmbeddingCollection,
MODULE_ATTR_QUANT_STATE_DICT_SPLIT_SCALE_BIAS,
)
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
from torchrec.streamable import Multistreamable
torch.fx.wrap("len")
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
def _fx_unwrap_optional_tensor(optional: Optional[torch.Tensor]) -> torch.Tensor:
assert optional is not None, "Expected optional to be non-None Tensor"
return optional | null |
8,983 | from collections import defaultdict, deque
from dataclasses import dataclass
from typing import Any, cast, Dict, List, Optional, Tuple, Type
import torch
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from torch import nn
from torchrec.distributed.embedding import (
create_sharding_infos_by_sharding,
EmbeddingShardingInfo,
)
from torchrec.distributed.embedding_sharding import EmbeddingSharding
from torchrec.distributed.embedding_types import (
BaseQuantEmbeddingSharder,
FeatureShardingMixIn,
GroupedEmbeddingConfig,
KJTList,
ListOfKJTList,
ShardingType,
)
from torchrec.distributed.fused_params import (
FUSED_PARAM_QUANT_STATE_DICT_SPLIT_SCALE_BIAS,
FUSED_PARAM_REGISTER_TBE_BOOL,
get_tbes_to_register_from_iterable,
is_fused_param_quant_state_dict_split_scale_bias,
is_fused_param_register_tbe,
)
from torchrec.distributed.quant_state import ShardedQuantEmbeddingModuleState
from torchrec.distributed.sharding.cw_sequence_sharding import (
InferCwSequenceEmbeddingSharding,
)
from torchrec.distributed.sharding.rw_sequence_sharding import (
InferRwSequenceEmbeddingSharding,
)
from torchrec.distributed.sharding.rw_sharding import InferRwSparseFeaturesDist
from torchrec.distributed.sharding.sequence_sharding import InferSequenceShardingContext
from torchrec.distributed.sharding.tw_sequence_sharding import (
InferTwSequenceEmbeddingSharding,
)
from torchrec.distributed.types import ParameterSharding, ShardingEnv, ShardMetadata
from torchrec.modules.embedding_configs import (
data_type_to_sparse_type,
dtype_to_data_type,
EmbeddingConfig,
)
from torchrec.quant.embedding_modules import (
EmbeddingCollection as QuantEmbeddingCollection,
MODULE_ATTR_QUANT_STATE_DICT_SPLIT_SCALE_BIAS,
)
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
from torchrec.streamable import Multistreamable
torch.fx.wrap("len")
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
def _construct_jagged_tensors(
sharding_type: str,
embeddings: List[torch.Tensor],
features: KJTList,
embedding_names_per_rank: List[List[str]],
features_before_input_dist: KeyedJaggedTensor,
need_indices: bool,
rw_unbucketize_tensor: Optional[torch.Tensor],
cw_features_to_permute_indices: Dict[str, torch.Tensor],
key_to_feature_permuted_coordinates: Dict[str, torch.Tensor],
) -> Dict[str, JaggedTensor]:
# Validating sharding type and parameters
valid_sharding_types = [
ShardingType.ROW_WISE.value,
ShardingType.COLUMN_WISE.value,
ShardingType.TABLE_WISE.value,
]
if sharding_type not in valid_sharding_types:
raise ValueError(f"Unknown sharding type {sharding_type}")
if sharding_type == ShardingType.ROW_WISE.value and rw_unbucketize_tensor is None:
raise ValueError("rw_unbucketize_tensor is required for row-wise sharding")
if (
sharding_type == ShardingType.ROW_WISE.value
and rw_unbucketize_tensor is not None
):
return _construct_jagged_tensors_rw(
embeddings,
features_before_input_dist,
need_indices,
rw_unbucketize_tensor,
)
elif sharding_type == ShardingType.COLUMN_WISE.value:
return _construct_jagged_tensors_cw(
embeddings,
features,
embedding_names_per_rank,
need_indices,
cw_features_to_permute_indices,
key_to_feature_permuted_coordinates,
)
else: # sharding_type == ShardingType.TABLE_WISE.value
return _construct_jagged_tensors_tw(embeddings, features, need_indices)
class KJTList(Multistreamable):
def __init__(self, features: List[KeyedJaggedTensor]) -> None:
self.features = features
def __len__(self) -> int:
return len(self.features)
def __setitem__(self, key: int, item: KeyedJaggedTensor) -> None:
self.features[key] = item
def __getitem__(self, key: int) -> KeyedJaggedTensor:
return self.features[key]
def __iter__(self) -> Iterator[KeyedJaggedTensor]:
return iter(self.features)
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
for feature in self.features:
feature.record_stream(stream)
def __fx_create_arg__(self, tracer: torch.fx.Tracer) -> fx.node.Argument:
return tracer.create_node(
"call_function",
KJTList,
args=(tracer.create_arg(self.features),),
kwargs={},
)
# pyre-ignore
class JaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""
Represents an (optionally weighted) jagged tensor.
A `JaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. See `KeyedJaggedTensor` for full example.
Implementation is torch.jit.script-able.
NOTE:
We will NOT do input validation as it's expensive, you should always pass in the
valid lengths, offsets, etc.
Args:
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if values have weights. Tensor with same shape
as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
"""
_fields = ["_values", "_weights", "_lengths", "_offsets"]
def __init__(
self,
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
) -> None:
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
_assert_offsets_or_lengths_is_provided(offsets, lengths)
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "JaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return JaggedTensor(
values=torch.empty(0, dtype=values_dtype, device=device),
offsets=torch.empty(0, dtype=lengths_dtype, device=device),
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
weights=weights,
)
def from_dense_lengths(
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
) -> "JaggedTensor":
"""
Constructs `JaggedTensor` from dense values/weights of shape (B, N,).
Note that `lengths` is still of shape (B,).
"""
mask2d = (
_arange(end=values.size(1), device=values.device).expand(values.size(0), -1)
) < lengths.unsqueeze(-1)
return JaggedTensor(
values=values[mask2d],
weights=_optional_mask(weights, mask2d),
lengths=lengths,
)
def from_dense(
values: List[torch.Tensor],
weights: Optional[List[torch.Tensor]] = None,
) -> "JaggedTensor":
"""
Constructs `JaggedTensor` from dense values/weights of shape (B, N,).
Note that `lengths` and `offsets` are still of shape (B,).
Args:
values (List[torch.Tensor]): a list of tensors for dense representation
weights (Optional[List[torch.Tensor]]): if values have weights, tensor with
the same shape as values.
Returns:
JaggedTensor: JaggedTensor created from 2D dense tensor.
Example::
values = [
torch.Tensor([1.0]),
torch.Tensor(),
torch.Tensor([7.0, 8.0]),
torch.Tensor([10.0, 11.0, 12.0]),
]
weights = [
torch.Tensor([1.0]),
torch.Tensor(),
torch.Tensor([7.0, 8.0]),
torch.Tensor([10.0, 11.0, 12.0]),
]
j1 = JaggedTensor.from_dense(
values=values,
weights=weights,
)
# j1 = [[1.0], [], [7.0], [8.0], [10.0, 11.0, 12.0]]
"""
values_tensor = torch.cat(values, dim=0)
lengths = torch.tensor(
[value.size(0) for value in values],
dtype=torch.int32,
device=values_tensor.device,
)
weights_tensor = torch.cat(weights, dim=0) if weights is not None else None
return JaggedTensor(
values=values_tensor,
weights=weights_tensor,
lengths=lengths,
)
def to_dense(self) -> List[torch.Tensor]:
"""
Constructs a dense-representation of the JT's values.
Returns:
List[torch.Tensor]: list of tensors.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, offsets=offsets)
values_list = jt.to_dense()
# values_list = [
# torch.tensor([1.0, 2.0]),
# torch.tensor([]),
# torch.tensor([3.0]),
# torch.tensor([4.0]),
# torch.tensor([5.0]),
# torch.tensor([6.0, 7.0, 8.0]),
# ]
"""
tensor_list = []
for index in range(self.offsets().size(0) - 1):
offset = self.offsets()[index].item()
next_offset = self.offsets()[index + 1].item()
tensor_list.append(self.values()[offset:next_offset])
return tensor_list
def to_dense_weights(self) -> Optional[List[torch.Tensor]]:
"""
Constructs a dense-representation of the JT's weights.
Returns:
Optional[List[torch.Tensor]]: list of tensors, `None` if no weights.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, weights=weights, offsets=offsets)
weights_list = jt.to_dense_weights()
# weights_list = [
# torch.tensor([0.1, 0.2]),
# torch.tensor([]),
# torch.tensor([0.3]),
# torch.tensor([0.4]),
# torch.tensor([0.5]),
# torch.tensor([0.6, 0.7, 0.8]),
# ]
"""
if self.weights_or_none() is None:
return None
tensor_list = []
for index in range(self.offsets().size(0) - 1):
offset = self.offsets()[index].item()
next_offset = self.offsets()[index + 1].item()
tensor_list.append(self.weights()[offset:next_offset])
return tensor_list
def to_padded_dense(
self,
desired_length: Optional[int] = None,
padding_value: float = 0.0,
) -> torch.Tensor:
"""
Constructs a 2D dense tensor from the JT's values of shape (B, N,).
Note that `B` is the length of self.lengths() and `N` is the longest feature
length or `desired_length`.
If `desired_length` > `length` we will pad with `padding_value`, otherwise we
will select the last value at `desired_length`.
Args:
desired_length (int): the length of the tensor.
padding_value (float): padding value if we need to pad.
Returns:
torch.Tensor: 2d dense tensor.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, offsets=offsets)
dt = jt.to_padded_dense(
desired_length=2,
padding_value=10.0,
)
# dt = [
# [1.0, 2.0],
# [10.0, 10.0],
# [3.0, 10.0],
# [4.0, 10.0],
# [5.0, 10.0],
# [6.0, 7.0],
# ]
"""
if desired_length is None:
N = int(torch.max(self.lengths()).item())
else:
N = desired_length
return torch.ops.fbgemm.jagged_to_padded_dense(
self.values(), [self.offsets()], [N], padding_value
)
def to_padded_dense_weights(
self,
desired_length: Optional[int] = None,
padding_value: float = 0.0,
) -> Optional[torch.Tensor]:
"""
Constructs a 2D dense tensor from the JT's weights of shape (B, N,).
Note that `B` is the length of self.lengths() and `N` is the longest feature
length or `desired_length`.
If `desired_length` > `length` we will pad with `padding_value`, otherwise we
will select the last value at `desired_length`.
Args:
desired_length (int): the length of the tensor.
padding_value (float): padding value if we need to pad.
Returns:
Optional[torch.Tensor]: 2d dense tensor, `None` if no weights.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, weights=weights, offsets=offsets)
d_wt = jt.to_padded_dense_weights(
desired_length=2,
padding_value=1.0,
)
# d_wt = [
# [0.1, 0.2],
# [1.0, 1.0],
# [0.3, 1.0],
# [0.4, 1.0],
# [0.5, 1.0],
# [0.6, 0.7],
# ]
"""
if self.weights_or_none() is None:
return None
if desired_length is None:
N = int(torch.max(self.lengths()).item())
else:
N = desired_length
return torch.ops.fbgemm.jagged_to_padded_dense(
self.weights(), [self.offsets()], [N], padding_value
)
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def to(self, device: torch.device, non_blocking: bool = False) -> "JaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
return JaggedTensor(
values=self._values.to(device, non_blocking=non_blocking),
weights=(
weights.to(device, non_blocking=non_blocking)
if weights is not None
else None
),
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
)
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def __str__(self) -> str:
offsets = self.offsets()
if self._weights is None:
return (
"JaggedTensor({\n "
+ _jagged_values_string(self._values, offsets, 0, len(offsets) - 1)
+ "\n})\n"
)
return (
"JaggedTensor({\n"
+ ' "values": '
+ _jagged_values_string(self._values, offsets, 0, len(offsets) - 1)
+ ',\n "weights": '
+ _jagged_values_string(
_get_weights_or_throw(self._weights), offsets, 0, len(offsets) - 1
)
+ "\n})\n"
)
class KeyedJaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""Represents an (optionally weighted) keyed jagged tensor.
A `KeyedJaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. Keyed on first dimension and jagged on the last
dimension.
Implementation is torch.jit.script-able.
Args:
keys (List[str]): keys to the jagged Tensor.
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if the values have weights. Tensor with the
same shape as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
stride (Optional[int]): number of examples per batch.
stride_per_key_per_rank (Optional[List[List[int]]]): batch size
(number of examples) per key per rank, with the outer list representing the
keys and the inner list representing the values.
Each value in the inner list represents the number of examples in the batch
from the rank of its index in a distributed context.
length_per_key (Optional[List[int]]): start length for each key.
offset_per_key (Optional[List[int]]): start offset for each key and final
offset.
index_per_key (Optional[Dict[str, int]]): index for each key.
jt_dict (Optional[Dict[str, JaggedTensor]]):
inverse_indices (Optional[Tuple[List[str], torch.Tensor]]): inverse indices to
expand deduplicated embedding output for variable stride per key.
Example::
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
dim_0: keyed dimension (ie. `Feature0`, `Feature1`)
dim_1: optional second dimension (ie. batch size)
dim_2: The jagged dimension which has slice lengths between 0-3 in the above example
# We represent this data with following inputs:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
"""
# This is the subset of fields on KJT which are required (all other fields
# can be derived from these fields, and are only cached)
_fields = [
"_values",
"_weights",
"_lengths",
"_offsets",
]
def __init__(
self,
keys: List[str],
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
# Below exposed to ensure torch.script-able
length_per_key: Optional[List[int]] = None,
offset_per_key: Optional[List[int]] = None,
index_per_key: Optional[Dict[str, int]] = None,
jt_dict: Optional[Dict[str, JaggedTensor]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> None:
self._keys: List[str] = keys
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
self._stride_per_key_per_rank: List[List[int]] = []
self._stride_per_key: List[int] = []
self._variable_stride_per_key: bool = False
self._stride: int = -1
if stride_per_key_per_rank is not None:
if stride is not None:
raise ValueError(
"Cannot initialize KJT with both `stride` and `stride_per_key_per_rank`"
)
self._stride_per_key_per_rank = stride_per_key_per_rank
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
self._variable_stride_per_key = True
if not stride_per_key_per_rank:
self._stride = 0
elif all(s == self.stride_per_key()[0] for s in self.stride_per_key()):
self._stride = self.stride_per_key()[0]
else:
if torch.jit.is_tracing():
stride = _maybe_compute_stride_kjt_scripted(
keys, stride, lengths, offsets
)[0]
else:
stride = _maybe_compute_stride_kjt(keys, stride, lengths, offsets)
self._stride = stride
self._stride_per_key_per_rank = [[stride]] * len(self._keys)
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
# lazy fields
self._length_per_key: Optional[List[int]] = length_per_key
self._offset_per_key: Optional[List[int]] = offset_per_key
self._index_per_key: Optional[Dict[str, int]] = index_per_key
self._jt_dict: Optional[Dict[str, JaggedTensor]] = jt_dict
self._inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = (
inverse_indices
)
self._lengths_offset_per_key: List[int] = []
def from_offsets_sync(
keys: List[str],
values: torch.Tensor,
offsets: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
offsets=offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def from_lengths_sync(
keys: List[str],
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def concat(
kjt_list: List["KeyedJaggedTensor"],
) -> "KeyedJaggedTensor":
if len(kjt_list) == 0:
raise ValueError("Can't concat empty KJT list")
is_weighted: bool = kjt_list[0].weights_or_none() is not None
has_length_per_key: bool = True
length_per_key: List[int] = []
keys: List[str] = []
value_list: List[torch.Tensor] = []
weight_list: List[torch.Tensor] = []
length_list: List[torch.Tensor] = []
stride_per_key_per_rank: List[List[int]] = []
stride: Optional[int] = None
variable_stride_per_key_list = [
kjt.variable_stride_per_key() for kjt in kjt_list
]
assert all(variable_stride_per_key_list) or not any(
variable_stride_per_key_list
), "variable stride per key must be consistent for all KJTs"
variable_stride_per_key = all(variable_stride_per_key_list)
for kjt in kjt_list:
curr_is_weighted: bool = kjt.weights_or_none() is not None
if is_weighted != curr_is_weighted:
raise ValueError("Can't merge weighted KJT with unweighted KJT")
_length_per_key: Optional[List[int]] = None
if kjt._length_per_key is None:
has_length_per_key = False
else:
_length_per_key = kjt._length_per_key
if has_length_per_key and _length_per_key is not None:
length_per_key += _length_per_key
keys += kjt.keys()
value_list.append(kjt.values())
if is_weighted:
weight_list.append(kjt.weights())
length_list.append(kjt.lengths())
if variable_stride_per_key:
stride_per_key_per_rank += kjt.stride_per_key_per_rank()
elif stride is None:
stride = kjt.stride()
else:
assert stride == kjt.stride(), "strides must be consistent for all KJTs"
return KeyedJaggedTensor(
keys=keys,
values=torch.cat(value_list, dim=0),
weights=torch.cat(weight_list, dim=0) if is_weighted else None,
lengths=torch.cat(length_list, dim=0),
stride=stride,
stride_per_key_per_rank=(
stride_per_key_per_rank if variable_stride_per_key else None
),
length_per_key=length_per_key if has_length_per_key else None,
)
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "KeyedJaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return KeyedJaggedTensor(
keys=torch.jit.annotate(List[str], []),
values=torch.empty(0, dtype=values_dtype, device=device),
weights=weights,
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
stride=0,
)
def empty_like(kjt: "KeyedJaggedTensor") -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, kjt.stride_per_key_per_rank())
if kjt.variable_stride_per_key()
else (kjt.stride(), None)
)
return KeyedJaggedTensor(
keys=[],
values=torch.empty(0, device=kjt.device(), dtype=kjt.values().dtype),
weights=(
None
if kjt.weights_or_none() is None
else torch.empty(0, device=kjt.device(), dtype=kjt.weights().dtype)
),
lengths=torch.empty(0, device=kjt.device(), dtype=kjt.lengths().dtype),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
)
def from_jt_dict(jt_dict: Dict[str, JaggedTensor]) -> "KeyedJaggedTensor":
"""
Constructs a KeyedJaggedTensor from a Dict[str, JaggedTensor],
but this function will ONLY work if the JaggedTensors all
have the same "implicit" batch_size dimension.
Basically, we can visualize JaggedTensors as 2-D tensors
of the format of [batch_size x variable_feature_dim].
In case, we have some batch without a feature value,
the input JaggedTensor could just not include any values.
But KeyedJaggedTensor (by default) typically pad "None"
so that all the JaggedTensors stored in the KeyedJaggedTensor
have the same batch_size dimension. That is, in the case,
the JaggedTensor input didn't automatically pad
for the empty batches, this function would error / not work.
Consider the visualization of the following KeyedJaggedTensor:
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
Notice that the inputs for this KeyedJaggedTensor would have looked like:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
Now if the input jt_dict = {
# "Feature0" [V0,V1] [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
} and the "None" is left out from each JaggedTensor,
then this function would fail as we would not correctly
be able to pad "None" as it does not technically know
the correct batch / place to pad within the JaggedTensor.
Essentially, the lengths Tensor inferred by this function
would be [2, 1, 1, 1, 3] indicating variable batch_size
dim_1 violates the existing assumption / precondition
that KeyedJaggedTensor's should have fixed batch_size dimension.
"""
kjt_keys = list(jt_dict.keys())
kjt_vals_list: List[torch.Tensor] = []
kjt_lens_list: List[torch.Tensor] = []
kjt_weights_list: List[torch.Tensor] = []
stride_per_key: List[int] = []
for jt in jt_dict.values():
stride_per_key.append(len(jt.lengths()))
kjt_vals_list.append(jt.values())
kjt_lens_list.append(jt.lengths())
weight = jt.weights_or_none()
if weight is not None:
kjt_weights_list.append(weight)
kjt_vals = torch.concat(kjt_vals_list)
kjt_lens = torch.concat(kjt_lens_list)
kjt_weights = (
torch.concat(kjt_weights_list) if len(kjt_weights_list) > 0 else None
)
kjt_stride, kjt_stride_per_key_per_rank = (
(stride_per_key[0], None)
if all(s == stride_per_key[0] for s in stride_per_key)
else (None, [[stride] for stride in stride_per_key])
)
kjt = KeyedJaggedTensor(
keys=kjt_keys,
values=kjt_vals,
weights=kjt_weights,
lengths=kjt_lens,
stride=kjt_stride,
stride_per_key_per_rank=kjt_stride_per_key_per_rank,
).sync()
return kjt
def sync(self) -> "KeyedJaggedTensor":
self.length_per_key()
self.offset_per_key()
return self
def unsync(self) -> "KeyedJaggedTensor":
self._length_per_key = None
self._offset_per_key = None
return self
def device(self) -> torch.device:
return self._values.device
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def keys(self) -> List[str]:
return self._keys
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def stride(self) -> int:
return self._stride
def stride_per_key(self) -> List[int]:
return self._stride_per_key
def stride_per_key_per_rank(self) -> List[List[int]]:
return self._stride_per_key_per_rank
def variable_stride_per_key(self) -> bool:
return self._variable_stride_per_key
def inverse_indices(self) -> Tuple[List[str], torch.Tensor]:
return _get_inverse_indices_or_throw(self._inverse_indices)
def inverse_indices_or_none(self) -> Optional[Tuple[List[str], torch.Tensor]]:
return self._inverse_indices
def _key_indices(self) -> Dict[str, int]:
_index_per_key: Dict[str, int] = _maybe_compute_index_per_key(
self._keys,
self._index_per_key,
)
self._index_per_key = _index_per_key
return _index_per_key
def length_per_key(self) -> List[int]:
_length_per_key = _maybe_compute_length_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
return _length_per_key
def length_per_key_or_none(self) -> Optional[List[int]]:
return self._length_per_key
def offset_per_key(self) -> List[int]:
_length_per_key, _offset_per_key = _maybe_compute_offset_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
self._offset_per_key = _offset_per_key
return _offset_per_key
def offset_per_key_or_none(self) -> Optional[List[int]]:
return self._offset_per_key
def lengths_offset_per_key(self) -> List[int]:
if not self._lengths_offset_per_key:
self._lengths_offset_per_key = _cumsum(self.stride_per_key())
return self._lengths_offset_per_key
def split(self, segments: List[int]) -> List["KeyedJaggedTensor"]:
split_list: List[KeyedJaggedTensor] = []
start = 0
start_offset = 0
_length_per_key = self.length_per_key()
_offset_per_key = self.offset_per_key()
for segment in segments:
end = start + segment
end_offset = _offset_per_key[end]
keys: List[str] = self._keys[start:end]
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank()[start:end])
if self.variable_stride_per_key()
else (self._stride, None)
)
if segment == len(self._keys):
# no torch slicing required
split_list.append(
KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self.weights_or_none(),
lengths=self._lengths,
offsets=self._offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=self._jt_dict,
inverse_indices=None,
)
)
elif segment == 0:
empty_int_list: List[int] = torch.jit.annotate(List[int], [])
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=torch.tensor(
empty_int_list,
device=self.device(),
dtype=self._values.dtype,
),
weights=(
None
if self.weights_or_none() is None
else torch.tensor(
empty_int_list,
device=self.device(),
dtype=self.weights().dtype,
)
),
lengths=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
offsets=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
else:
split_length_per_key = _length_per_key[start:end]
if not torch.jit.is_scripting() and is_torchdynamo_compiling():
# Checks for dynamo dynamic shapes tracing
torch._check_is_size(start_offset)
torch._check_is_size(end_offset)
torch._check_is_size(end_offset - start_offset)
torch._check(start_offset <= self._values.size(0))
torch._check(end_offset <= self._values.size(0))
torch._check(end_offset >= start_offset)
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[
start
] : self.lengths_offset_per_key()[end]
],
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=split_length_per_key,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
start = end
start_offset = end_offset
return split_list
def permute(
self,
indices: List[int],
indices_tensor: Optional[torch.Tensor] = None,
include_inverse_indices: bool = False,
) -> "KeyedJaggedTensor":
if indices_tensor is None:
indices_tensor = torch.tensor(
indices, dtype=torch.int, device=self.device()
)
length_per_key = self.length_per_key()
permuted_keys: List[str] = []
permuted_stride_per_key_per_rank: List[List[int]] = []
permuted_length_per_key: List[int] = []
permuted_lengths_sum = 0
for index in indices:
key = self.keys()[index]
permuted_keys.append(key)
permuted_stride_per_key_per_rank.append(
self.stride_per_key_per_rank()[index]
)
permuted_length_per_key.append(length_per_key[index])
permuted_lengths_sum += length_per_key[index]
if self.variable_stride_per_key():
length_per_key_tensor = _pin_and_move(
torch.tensor(self.length_per_key()), self.device()
)
stride_per_key_tensor = _pin_and_move(
torch.tensor(self.stride_per_key()), self.device()
)
permuted_lengths, _ = _permute_tensor_by_segments(
self.lengths(),
stride_per_key_tensor,
indices_tensor,
None,
)
permuted_values, permuted_weights = _permute_tensor_by_segments(
self.values(),
length_per_key_tensor,
indices_tensor,
self.weights_or_none(),
)
else:
(
permuted_lengths,
permuted_values,
permuted_weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
indices_tensor,
self.lengths().view(len(self._keys), -1),
self.values(),
self.weights_or_none(),
permuted_lengths_sum,
)
stride, optional_permuted_stride_per_key_per_rank = (
(None, permuted_stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
kjt = KeyedJaggedTensor(
keys=permuted_keys,
values=permuted_values,
weights=permuted_weights,
lengths=permuted_lengths.view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=optional_permuted_stride_per_key_per_rank,
length_per_key=permuted_length_per_key if len(permuted_keys) > 0 else None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=(
self.inverse_indices_or_none() if include_inverse_indices else None
),
)
return kjt
def flatten_lengths(self) -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank())
if self.variable_stride_per_key()
else (self._stride, None)
)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self._weights,
lengths=self.lengths().view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self.length_per_key(),
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
def __getitem__(self, key: str) -> JaggedTensor:
offset_per_key = self.offset_per_key()
index = self._key_indices()[key]
start_offset = offset_per_key[index]
end_offset = (
offset_per_key[index + 1]
if index + 1 < len(offset_per_key)
else start_offset
)
return JaggedTensor(
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[index] : self.lengths_offset_per_key()[
index + 1
]
],
offsets=None,
)
def to_dict(self) -> Dict[str, JaggedTensor]:
_jt_dict = _maybe_compute_kjt_to_jt_dict(
stride=self.stride(),
stride_per_key=self.stride_per_key(),
keys=self.keys(),
length_per_key=self.length_per_key(),
lengths=self.lengths(),
values=self.values(),
variable_stride_per_key=self.variable_stride_per_key(),
weights=self.weights_or_none(),
jt_dict=self._jt_dict,
)
self._jt_dict = _jt_dict
return _jt_dict
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def to(
self,
device: torch.device,
non_blocking: bool = False,
dtype: Optional[torch.dtype] = None,
) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
length_per_key = self._length_per_key
offset_per_key = self._offset_per_key
index_per_key = self._index_per_key
jt_dict = self._jt_dict
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (
inverse_indices[0],
inverse_indices[1].to(device, non_blocking=non_blocking),
)
if weights is not None:
if dtype is not None:
weights = weights.to(
dtype=dtype, device=device, non_blocking=non_blocking
)
else:
weights = weights.to(device=device, non_blocking=non_blocking)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.to(device, non_blocking=non_blocking),
weights=weights,
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=length_per_key,
offset_per_key=offset_per_key,
index_per_key=index_per_key,
jt_dict=jt_dict,
inverse_indices=inverse_indices,
)
def __str__(self) -> str:
if len(self._keys) == 0 or self._offsets is None and self._lengths is None:
return "KeyedJaggedTensor()\n"
offsets = self.offsets()
return (
"KeyedJaggedTensor({\n"
+ ",\n".join(
[
" "
+ _jagged_tensor_string(
self._keys[index],
self._values,
self._weights,
offsets,
sum(self.stride_per_key()[:index]),
sum(self.stride_per_key()[: index + 1]),
)
for index in range(len(self._keys))
]
)
+ "\n})\n"
)
def pin_memory(self) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (inverse_indices[0], inverse_indices[1].pin_memory())
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.pin_memory(),
weights=weights.pin_memory() if weights is not None else None,
lengths=lengths.pin_memory() if lengths is not None else None,
offsets=offsets.pin_memory() if offsets is not None else None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=None,
inverse_indices=inverse_indices,
)
def dist_labels(self) -> List[str]:
labels = ["lengths", "values"]
if self.variable_stride_per_key():
labels.append("strides")
if self.weights_or_none() is not None:
labels.append("weights")
return labels
def dist_splits(self, key_splits: List[int]) -> List[List[int]]:
batch_size_per_split = _sum_by_splits(self.stride_per_key(), key_splits)
length_per_split = _sum_by_splits(self.length_per_key(), key_splits)
splits = [batch_size_per_split, length_per_split]
if self.variable_stride_per_key():
splits.append(key_splits)
if self.weights_or_none() is not None:
splits.append(length_per_split)
return splits
def dist_tensors(self) -> List[torch.Tensor]:
tensors = [self.lengths(), self.values()]
if self.variable_stride_per_key():
strides = _pin_and_move(torch.tensor(self.stride_per_key()), self.device())
tensors.append(strides)
if self.weights_or_none() is not None:
tensors.append(self.weights())
return tensors
def dist_init(
keys: List[str],
tensors: List[torch.Tensor],
variable_stride_per_key: bool,
num_workers: int,
recat: Optional[torch.Tensor],
stride_per_rank: Optional[List[int]],
stagger: int = 1,
) -> "KeyedJaggedTensor":
assert len(tensors) in [2, 3, 4]
lengths = tensors[0]
values = tensors[1]
stride_per_rank_per_key = tensors[2] if variable_stride_per_key else None
weights = (
tensors[-1]
if (variable_stride_per_key and len(tensors) == 4)
or (not variable_stride_per_key and len(tensors) == 3)
else None
)
if variable_stride_per_key:
assert stride_per_rank_per_key is not None
stride_per_key_per_rank: List[List[int]] = stride_per_rank_per_key.view(
num_workers, len(keys)
).T.tolist()
strides_cumsum: List[int] = torch.ops.fbgemm.asynchronous_complete_cumsum(
stride_per_rank_per_key
).tolist()
cumsum_lengths = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
length_per_key = (
cumsum_lengths[strides_cumsum[1:]] - cumsum_lengths[strides_cumsum[:-1]]
)
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
lengths, _ = _permute_tensor_by_segments(
lengths,
stride_per_rank_per_key,
recat,
None,
)
values, weights = _permute_tensor_by_segments(
values,
length_per_key,
recat,
weights,
)
if not stride_per_key_per_rank:
stride_per_key_per_rank = [[0]] * len(keys)
if stagger > 1:
stride_per_key_per_rank_stagger: List[List[int]] = []
local_world_size = num_workers // stagger
for i in range(len(keys)):
stride_per_rank_stagger: List[int] = []
for j in range(local_world_size):
stride_per_rank_stagger.extend(
stride_per_key_per_rank[i][j::local_world_size]
)
stride_per_key_per_rank_stagger.append(stride_per_rank_stagger)
stride_per_key_per_rank = stride_per_key_per_rank_stagger
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride_per_key_per_rank=stride_per_key_per_rank,
)
return kjt.sync()
else:
assert stride_per_rank is not None
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
stride = stride_per_rank[0]
if all(s == stride for s in stride_per_rank):
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
recat,
lengths.view(-1, stride),
values,
weights,
values.numel(),
)
lengths = lengths.view(-1)
else: # variable batch size per rank
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_1D_sparse_data(
recat,
lengths.view(-1),
values,
weights,
values.numel(),
)
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=sum(stride_per_rank),
)
return kjt.sync()
def output_jt_dict(
sharding_types: List[str],
emb_per_sharding: List[List[torch.Tensor]],
features_per_sharding: List[KJTList],
embedding_names_per_rank_per_sharding: List[List[List[str]]],
need_indices: bool,
features_before_input_dist_per_sharding: List[KeyedJaggedTensor],
features_to_permute_indices: Dict[str, torch.Tensor],
unbucketize_tensors: List[torch.Tensor],
unbucketize_tensor_idxs_per_sharding: List[int],
key_to_feature_permuted_coordinates_per_sharding: List[Dict[str, torch.Tensor]],
) -> Dict[str, JaggedTensor]:
jt_dict: Dict[str, JaggedTensor] = {}
for (
sharding_type,
emb_sharding,
features_sharding,
embedding_names_per_rank,
unbucketize_tensor_idx,
features_before_input_dist,
key_to_feature_permuted_coordinates,
) in zip(
sharding_types,
emb_per_sharding,
features_per_sharding,
embedding_names_per_rank_per_sharding,
unbucketize_tensor_idxs_per_sharding,
features_before_input_dist_per_sharding,
key_to_feature_permuted_coordinates_per_sharding,
):
jt_dict.update(
_construct_jagged_tensors(
sharding_type=sharding_type,
embeddings=emb_sharding,
features=features_sharding,
embedding_names_per_rank=embedding_names_per_rank,
features_before_input_dist=features_before_input_dist,
need_indices=need_indices,
rw_unbucketize_tensor=(
unbucketize_tensors[unbucketize_tensor_idx]
if unbucketize_tensor_idx != -1
else None
),
cw_features_to_permute_indices=features_to_permute_indices,
key_to_feature_permuted_coordinates=key_to_feature_permuted_coordinates,
)
)
return jt_dict | null |
8,984 | from typing import Any, Dict, Iterable, Optional
import torch
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from torchrec.distributed.embedding_types import GroupedEmbeddingConfig
class TBEToRegisterMixIn:
def get_tbes_to_register(
self,
) -> Dict[IntNBitTableBatchedEmbeddingBagsCodegen, GroupedEmbeddingConfig]:
raise NotImplementedError
class GroupedEmbeddingConfig:
data_type: DataType
pooling: PoolingType
is_weighted: bool
has_feature_processor: bool
compute_kernel: EmbeddingComputeKernel
embedding_tables: List[ShardedEmbeddingTable]
fused_params: Optional[Dict[str, Any]] = None
def feature_hash_sizes(self) -> List[int]:
feature_hash_sizes = []
for table in self.embedding_tables:
feature_hash_sizes.extend(table.num_features() * [table.num_embeddings])
return feature_hash_sizes
def num_features(self) -> int:
num_features = 0
for table in self.embedding_tables:
num_features += table.num_features()
return num_features
def dim_sum(self) -> int:
dim_sum = 0
for table in self.embedding_tables:
dim_sum += table.num_features() * table.local_cols
return dim_sum
def table_names(self) -> List[str]:
table_names = []
for table in self.embedding_tables:
table_names.append(table.name)
return table_names
def feature_names(self) -> List[str]:
feature_names = []
for table in self.embedding_tables:
feature_names.extend(table.feature_names)
return feature_names
def embedding_dims(self) -> List[int]:
embedding_dims = []
for table in self.embedding_tables:
embedding_dims.extend([table.local_cols] * table.num_features())
return embedding_dims
def embedding_names(self) -> List[str]:
embedding_names = []
for table in self.embedding_tables:
embedding_names.extend(table.embedding_names)
return embedding_names
def embedding_shard_metadata(self) -> List[Optional[ShardMetadata]]:
embedding_shard_metadata: List[Optional[ShardMetadata]] = []
for table in self.embedding_tables:
for _ in table.feature_names:
embedding_shard_metadata.append(table.local_metadata)
return embedding_shard_metadata
def get_tbes_to_register_from_iterable(
iterable: Iterable[torch.nn.Module],
) -> Dict[IntNBitTableBatchedEmbeddingBagsCodegen, GroupedEmbeddingConfig]:
tbes: Dict[IntNBitTableBatchedEmbeddingBagsCodegen, GroupedEmbeddingConfig] = {}
for m in iterable:
if isinstance(m, TBEToRegisterMixIn):
tbes.update(m.get_tbes_to_register())
return tbes | null |
8,985 | from typing import Any, Dict, Iterable, Optional
import torch
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from torchrec.distributed.embedding_types import GroupedEmbeddingConfig
FUSED_PARAM_REGISTER_TBE_BOOL: str = "__register_tbes_in_named_modules"
def is_fused_param_register_tbe(fused_params: Optional[Dict[str, Any]]) -> bool:
return (
fused_params
and FUSED_PARAM_REGISTER_TBE_BOOL in fused_params
and fused_params[FUSED_PARAM_REGISTER_TBE_BOOL]
) | null |
8,986 | from typing import Any, Dict, Iterable, Optional
import torch
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from torchrec.distributed.embedding_types import GroupedEmbeddingConfig
FUSED_PARAM_QUANT_STATE_DICT_SPLIT_SCALE_BIAS: str = (
"__register_quant_state_dict_split_scale_bias"
)
def is_fused_param_quant_state_dict_split_scale_bias(
fused_params: Optional[Dict[str, Any]]
) -> bool:
return (
fused_params
and FUSED_PARAM_QUANT_STATE_DICT_SPLIT_SCALE_BIAS in fused_params
and fused_params[FUSED_PARAM_QUANT_STATE_DICT_SPLIT_SCALE_BIAS]
) | null |
8,987 | from typing import Any, Dict, Iterable, Optional
import torch
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from torchrec.distributed.embedding_types import GroupedEmbeddingConfig
FUSED_PARAM_REGISTER_TBE_BOOL: str = "__register_tbes_in_named_modules"
FUSED_PARAM_QUANT_STATE_DICT_SPLIT_SCALE_BIAS: str = (
"__register_quant_state_dict_split_scale_bias"
)
def tbe_fused_params(
fused_params: Optional[Dict[str, Any]]
) -> Optional[Dict[str, Any]]:
if not fused_params:
return None
fused_params_for_tbe = dict(fused_params)
if FUSED_PARAM_REGISTER_TBE_BOOL in fused_params_for_tbe:
fused_params_for_tbe.pop(FUSED_PARAM_REGISTER_TBE_BOOL)
if FUSED_PARAM_QUANT_STATE_DICT_SPLIT_SCALE_BIAS in fused_params_for_tbe:
fused_params_for_tbe.pop(FUSED_PARAM_QUANT_STATE_DICT_SPLIT_SCALE_BIAS)
return fused_params_for_tbe | null |
8,988 | import logging
import os
from typing import List, Optional, Tuple
import torch
import torch.distributed as dist
logger: logging.Logger = logging.getLogger(__name__)
_INTRA_PG: Optional[dist.ProcessGroup] = None
_CROSS_PG: Optional[dist.ProcessGroup] = None
def get_local_size(world_size: Optional[int] = None) -> int:
if world_size is None:
world_size = dist.get_world_size()
"""
Gets the local world size (see https://pytorch.org/docs/stable/elastic/run.html)
This is usually the size of workers on each node, or nproc_per_node
"""
local_size = _env2int(
[
"LOCAL_WORLD_SIZE",
"MPI_LOCALNRANKS",
"OMPI_COMM_WORLD_LOCAL_SIZE",
"MV2_COMM_WORLD_LOCAL_SIZE",
],
8,
)
if local_size == -1 or world_size % local_size != 0:
logging.warning(
"Could not determine LOCAL_WORLD_SIZE from environment, falling back to WORLD_SIZE."
)
local_size = world_size
return local_size
def get_local_rank(world_size: Optional[int] = None, rank: Optional[int] = None) -> int:
"""
Gets the local rank of the local processes (see https://pytorch.org/docs/stable/elastic/run.html)
This is usually the rank of the worker on its node
"""
my_local_rank = _env2int(
[
"LOCAL_RANK",
"MPI_LOCALRANKID",
"OMPI_COMM_WORLD_LOCAL_RANK",
"MV2_COMM_WORLD_LOCAL_RANK",
],
-1,
)
local_size = get_local_size(world_size)
if my_local_rank == -1 or my_local_rank >= local_size:
logging.warning(
"Could not determine LOCAL_RANK from environment, falling back to GLOBAL_RANK % LOCAL_SIZE."
)
if rank is None:
rank = dist.get_rank()
my_local_rank = rank % local_size
return my_local_rank
def get_group_rank(world_size: Optional[int] = None, rank: Optional[int] = None) -> int:
"""
Gets the group rank of the worker group. Also available with GROUP_RANK environment varible
A number between 0 and get_num_groups() (See https://pytorch.org/docs/stable/elastic/run.html)
"""
if rank is None:
rank = dist.get_rank()
return rank // get_local_size(world_size)
def get_num_groups(world_size: Optional[int] = None) -> int:
"""
Gets the number of worker groups.
Usually equivalent to max_nnodes (See https://pytorch.org/docs/stable/elastic/run.html)
"""
if world_size is None:
world_size = dist.get_world_size()
return world_size // get_local_size(world_size)
The provided code snippet includes necessary dependencies for implementing the `intra_and_cross_node_pg` function. Write a Python function `def intra_and_cross_node_pg( device: Optional[torch.device] = None, backend: Optional[str] = None, ) -> Tuple[Optional[dist.ProcessGroup], Optional[dist.ProcessGroup]]` to solve the following problem:
Creates sub process groups (intra and cross node)
Here is the function:
def intra_and_cross_node_pg(
device: Optional[torch.device] = None,
backend: Optional[str] = None,
) -> Tuple[Optional[dist.ProcessGroup], Optional[dist.ProcessGroup]]:
"""
Creates sub process groups (intra and cross node)
"""
if device is not None and device.type == "meta":
return None, None
global _INTRA_PG # intra node process group
global _CROSS_PG # cross node process group
my_size = dist.get_world_size()
my_rank = dist.get_rank()
my_local_rank = get_local_rank(my_size, my_rank)
local_size = get_local_size(my_size)
my_group_rank = get_group_rank(my_size, my_rank)
group_count = get_num_groups(my_size)
if backend is None:
backend = dist.get_backend()
logger.info(
f"[{my_rank}] my_local_rank = {my_local_rank}, local_size = {local_size},"
f"my_group_rank = {my_group_rank}, group_count = {group_count}, backend = {backend}"
)
if _INTRA_PG is None:
for group_rank in range(group_count):
peers = [group_rank * local_size + r for r in range(local_size)]
curr_intra_group_pg = dist.new_group(backend=backend, ranks=peers)
if my_group_rank == group_rank:
logger.warning(
"[Connection] intra_group: [%d] -> [%s]" % (my_rank, peers)
)
_INTRA_PG = curr_intra_group_pg
dist.barrier()
if _CROSS_PG is None:
for l_rank in range(local_size):
peers = [l_rank + g * local_size for g in range(group_count)]
curr_cross_group_pg = dist.new_group(backend=backend, ranks=peers)
if l_rank == my_local_rank:
logger.warning(
"[Connection] cross_group: [%d] -> [%s]" % (my_rank, peers)
)
_CROSS_PG = curr_cross_group_pg
dist.barrier()
return _INTRA_PG, _CROSS_PG | Creates sub process groups (intra and cross node) |
8,989 | from functools import partial
from typing import Any, Dict, Iterator, List, Optional, Type, Union
import torch
from torch import nn
from torchrec.distributed.embedding_types import (
BaseEmbeddingSharder,
KJTList,
ShardedEmbeddingModule,
)
from torchrec.distributed.embeddingbag import (
EmbeddingBagCollectionContext,
EmbeddingBagCollectionSharder,
ShardedEmbeddingBagCollection,
)
from torchrec.distributed.types import (
Awaitable,
LazyAwaitable,
ParameterSharding,
QuantizedCommCodecs,
ShardingEnv,
ShardingType,
)
from torchrec.distributed.utils import append_prefix, init_parameters
from torchrec.modules.feature_processor_ import FeatureProcessorsCollection
from torchrec.modules.fp_embedding_modules import (
apply_feature_processors_to_kjt,
FeatureProcessedEmbeddingBagCollection,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor, KeyedTensor
# pyre-ignore
class KeyedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""
KeyedTensor holds a concatenated list of dense tensors, each of which can be
accessed by a key.
The keyed dimension can be of variable length (length_per_key).
Common use cases uses include storage of pooled embeddings of different dimensions.
Implementation is torch.jit.script-able.
Args:
keys (List[str]): list of keys.
length_per_key (List[int]): length of each key along key dimension.
values (torch.Tensor): dense tensor, concatenated typically along key dimension.
key_dim (int): key dimension, zero indexed - defaults to 1
(typically B is 0-dimension).
Example::
# kt is KeyedTensor holding
# 0 1 2
# "Embedding A" [1,1] [1,1] [1,1]
# "Embedding B" [2,1,2] [2,1,2] [2,1,2]
# "Embedding C" [3,1,2,3] [3,1,2,3] [3,1,2,3]
tensor_list = [
torch.tensor([[1,1]] * 3),
torch.tensor([[2,1,2]] * 3),
torch.tensor([[3,1,2,3]] * 3),
]
keys = ["Embedding A", "Embedding B", "Embedding C"]
kt = KeyedTensor.from_tensor_list(keys, tensor_list)
kt.values()
# tensor(
# [
# [1, 1, 2, 1, 2, 3, 1, 2, 3],
# [1, 1, 2, 1, 2, 3, 1, 2, 3],
# [1, 1, 2, 1, 2, 3, 1, 2, 3],
# ]
# )
kt["Embedding B"]
# tensor([[2, 1, 2], [2, 1, 2], [2, 1, 2]])
"""
def __init__(
self,
keys: List[str],
length_per_key: List[int],
values: torch.Tensor,
key_dim: int = 1,
# Below exposed to ensure torch.script-able
offset_per_key: Optional[List[int]] = None,
index_per_key: Optional[Dict[str, int]] = None,
) -> None:
self._keys = keys
self._length_per_key = length_per_key
self._values = values
self._key_dim = key_dim
self._offset_per_key: Optional[List[int]] = offset_per_key
self._index_per_key: Optional[Dict[str, int]] = index_per_key
def from_tensor_list(
keys: List[str], tensors: List[torch.Tensor], key_dim: int = 1, cat_dim: int = 1
) -> "KeyedTensor":
length_per_key = [tensor.shape[key_dim] for tensor in tensors]
return KeyedTensor(
keys=keys,
length_per_key=length_per_key,
values=torch.cat(tensors, dim=cat_dim),
key_dim=key_dim,
)
def keys(self) -> List[str]:
return self._keys
def values(self) -> torch.Tensor:
return self._values
def key_dim(self) -> int:
return self._key_dim
def offset_per_key(self) -> List[int]:
_offset_per_key = _maybe_compute_offset_per_key_kt(
self._length_per_key,
self._offset_per_key,
)
self._offset_per_key = _offset_per_key
return _offset_per_key
def length_per_key(self) -> List[int]:
return self._length_per_key
def _key_indices(self) -> Dict[str, int]:
_index_per_key = _maybe_compute_index_per_key(
self._keys,
self._index_per_key,
)
self._index_per_key = _index_per_key
return _index_per_key
def __getitem__(self, key: str) -> torch.Tensor:
index = self._key_indices()[key]
start = self.offset_per_key()[index]
length = self._length_per_key[index]
return self._values.narrow(dim=self._key_dim, start=start, length=length)
def to_dict(self) -> Dict[str, torch.Tensor]:
indices = self._key_indices()
lengths = self._length_per_key
split_values = self._values.split(lengths, dim=self._key_dim)
return {key: split_values[index] for (key, index) in indices.items()}
def regroup(
keyed_tensors: List["KeyedTensor"], groups: List[List[str]]
) -> List[torch.Tensor]:
return _regroup_keyed_tensors(keyed_tensors, groups)
def regroup_as_dict(
keyed_tensors: List["KeyedTensor"], groups: List[List[str]], keys: List[str]
) -> Dict[str, torch.Tensor]:
assert len(groups) == len(keys), "Groups and keys should have same length"
embeddings_list = _regroup_keyed_tensors(keyed_tensors, groups)
embeddings_dict: Dict[str, torch.Tensor] = {}
for i, key in enumerate(keys):
embeddings_dict[key] = embeddings_list[i]
return embeddings_dict
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
def to(self, device: torch.device, non_blocking: bool = False) -> "KeyedTensor":
return KeyedTensor(
keys=self._keys,
length_per_key=self._length_per_key,
values=self._values.to(device, non_blocking=non_blocking),
key_dim=self._key_dim,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
)
def __str__(self) -> str:
if len(self._keys) == 0:
return "KeyedTensor()\n"
return (
"KeyedTensor({\n"
+ ",\n".join(
[
' "{}": '.format(key) + _keyed_values_string(self[key])
for key in self._keys
]
)
+ "\n})\n"
)
def param_dp_sync(kt: KeyedTensor, no_op_tensor: torch.Tensor) -> KeyedTensor:
kt._values.add_(no_op_tensor)
return kt | null |
8,990 | import copy
import logging
import warnings
from collections import defaultdict, deque, OrderedDict
from dataclasses import dataclass, field
from itertools import accumulate
from typing import Any, cast, Dict, List, MutableMapping, Optional, Type, Union
import torch
from torch import nn
from torch.autograd.profiler import record_function
from torch.nn.parallel import DistributedDataParallel
from torchrec.distributed.embedding_sharding import (
EmbeddingSharding,
EmbeddingShardingInfo,
KJTListSplitsAwaitable,
)
from torchrec.distributed.embedding_types import (
BaseEmbeddingSharder,
EmbeddingComputeKernel,
KJTList,
ShardedEmbeddingModule,
ShardingType,
)
from torchrec.distributed.sharding.cw_sequence_sharding import (
CwSequenceEmbeddingSharding,
)
from torchrec.distributed.sharding.dp_sequence_sharding import (
DpSequenceEmbeddingSharding,
)
from torchrec.distributed.sharding.rw_sequence_sharding import (
RwSequenceEmbeddingSharding,
)
from torchrec.distributed.sharding.rw_sharding import RwSparseFeaturesDist
from torchrec.distributed.sharding.sequence_sharding import SequenceShardingContext
from torchrec.distributed.sharding.tw_sequence_sharding import (
TwSequenceEmbeddingSharding,
)
from torchrec.distributed.types import (
Awaitable,
EmbeddingModuleShardingPlan,
LazyAwaitable,
Multistreamable,
ParameterSharding,
QuantizedCommCodecs,
ShardedTensor,
ShardingEnv,
ShardMetadata,
)
from torchrec.distributed.utils import (
add_params_from_parameter_sharding,
convert_to_fbgemm_types,
merge_fused_params,
optimizer_type_to_emb_opt_type,
)
from torchrec.modules.embedding_configs import (
EmbeddingConfig,
EmbeddingTableConfig,
PoolingType,
)
from torchrec.modules.embedding_modules import (
EmbeddingCollection,
EmbeddingCollectionInterface,
)
from torchrec.modules.utils import construct_jagged_tensors
from torchrec.optim.fused import EmptyFusedOptimizer, FusedOptimizerModule
from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizer
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
EC_INDEX_DEDUP: bool = False
def set_ec_index_dedup(val: bool) -> None:
warnings.warn(
"Please set use_index_dedup in EmbeddingCollectionSharder during __init__ instead",
DeprecationWarning,
stacklevel=2,
)
global EC_INDEX_DEDUP
EC_INDEX_DEDUP = val | null |
8,991 | import copy
import logging
import warnings
from collections import defaultdict, deque, OrderedDict
from dataclasses import dataclass, field
from itertools import accumulate
from typing import Any, cast, Dict, List, MutableMapping, Optional, Type, Union
import torch
from torch import nn
from torch.autograd.profiler import record_function
from torch.nn.parallel import DistributedDataParallel
from torchrec.distributed.embedding_sharding import (
EmbeddingSharding,
EmbeddingShardingInfo,
KJTListSplitsAwaitable,
)
from torchrec.distributed.embedding_types import (
BaseEmbeddingSharder,
EmbeddingComputeKernel,
KJTList,
ShardedEmbeddingModule,
ShardingType,
)
from torchrec.distributed.sharding.cw_sequence_sharding import (
CwSequenceEmbeddingSharding,
)
from torchrec.distributed.sharding.dp_sequence_sharding import (
DpSequenceEmbeddingSharding,
)
from torchrec.distributed.sharding.rw_sequence_sharding import (
RwSequenceEmbeddingSharding,
)
from torchrec.distributed.sharding.rw_sharding import RwSparseFeaturesDist
from torchrec.distributed.sharding.sequence_sharding import SequenceShardingContext
from torchrec.distributed.sharding.tw_sequence_sharding import (
TwSequenceEmbeddingSharding,
)
from torchrec.distributed.types import (
Awaitable,
EmbeddingModuleShardingPlan,
LazyAwaitable,
Multistreamable,
ParameterSharding,
QuantizedCommCodecs,
ShardedTensor,
ShardingEnv,
ShardMetadata,
)
from torchrec.distributed.utils import (
add_params_from_parameter_sharding,
convert_to_fbgemm_types,
merge_fused_params,
optimizer_type_to_emb_opt_type,
)
from torchrec.modules.embedding_configs import (
EmbeddingConfig,
EmbeddingTableConfig,
PoolingType,
)
from torchrec.modules.embedding_modules import (
EmbeddingCollection,
EmbeddingCollectionInterface,
)
from torchrec.modules.utils import construct_jagged_tensors
from torchrec.optim.fused import EmptyFusedOptimizer, FusedOptimizerModule
from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizer
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
EC_INDEX_DEDUP: bool = False
def get_ec_index_dedup() -> bool:
global EC_INDEX_DEDUP
return EC_INDEX_DEDUP | null |
8,992 | import copy
import logging
import warnings
from collections import defaultdict, deque, OrderedDict
from dataclasses import dataclass, field
from itertools import accumulate
from typing import Any, cast, Dict, List, MutableMapping, Optional, Type, Union
import torch
from torch import nn
from torch.autograd.profiler import record_function
from torch.nn.parallel import DistributedDataParallel
from torchrec.distributed.embedding_sharding import (
EmbeddingSharding,
EmbeddingShardingInfo,
KJTListSplitsAwaitable,
)
from torchrec.distributed.embedding_types import (
BaseEmbeddingSharder,
EmbeddingComputeKernel,
KJTList,
ShardedEmbeddingModule,
ShardingType,
)
from torchrec.distributed.sharding.cw_sequence_sharding import (
CwSequenceEmbeddingSharding,
)
from torchrec.distributed.sharding.dp_sequence_sharding import (
DpSequenceEmbeddingSharding,
)
from torchrec.distributed.sharding.rw_sequence_sharding import (
RwSequenceEmbeddingSharding,
)
from torchrec.distributed.sharding.rw_sharding import RwSparseFeaturesDist
from torchrec.distributed.sharding.sequence_sharding import SequenceShardingContext
from torchrec.distributed.sharding.tw_sequence_sharding import (
TwSequenceEmbeddingSharding,
)
from torchrec.distributed.types import (
Awaitable,
EmbeddingModuleShardingPlan,
LazyAwaitable,
Multistreamable,
ParameterSharding,
QuantizedCommCodecs,
ShardedTensor,
ShardingEnv,
ShardMetadata,
)
from torchrec.distributed.utils import (
add_params_from_parameter_sharding,
convert_to_fbgemm_types,
merge_fused_params,
optimizer_type_to_emb_opt_type,
)
from torchrec.modules.embedding_configs import (
EmbeddingConfig,
EmbeddingTableConfig,
PoolingType,
)
from torchrec.modules.embedding_modules import (
EmbeddingCollection,
EmbeddingCollectionInterface,
)
from torchrec.modules.utils import construct_jagged_tensors
from torchrec.optim.fused import EmptyFusedOptimizer, FusedOptimizerModule
from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizer
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
class EmbeddingSharding(abc.ABC, Generic[C, F, T, W], FeatureShardingMixIn):
"""
Used to implement different sharding types for `EmbeddingBagCollection`, e.g.
table_wise.
"""
def __init__(
self, qcomm_codecs_registry: Optional[Dict[str, QuantizedCommCodecs]] = None
) -> None:
self._qcomm_codecs_registry = qcomm_codecs_registry
def qcomm_codecs_registry(self) -> Optional[Dict[str, QuantizedCommCodecs]]:
return self._qcomm_codecs_registry
def create_input_dist(
self,
device: Optional[torch.device] = None,
) -> BaseSparseFeaturesDist[F]:
pass
def create_output_dist(
self,
device: Optional[torch.device] = None,
) -> BaseEmbeddingDist[C, T, W]:
pass
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup[F, T]:
pass
def embedding_dims(self) -> List[int]:
pass
def embedding_shard_metadata(self) -> List[Optional[ShardMetadata]]:
pass
def embedding_names(self) -> List[str]:
pass
def embedding_names_per_rank(self) -> List[List[str]]:
pass
def embedding_tables(self) -> List[ShardedEmbeddingTable]:
raise NotImplementedError
def uncombined_embedding_dims(self) -> List[int]:
return self.embedding_dims()
def uncombined_embedding_names(self) -> List[str]:
return self.embedding_names()
class EmbeddingShardingInfo:
embedding_config: EmbeddingTableConfig
param_sharding: ParameterSharding
param: torch.Tensor
fused_params: Optional[Dict[str, Any]] = None
class CwSequenceEmbeddingSharding(
BaseCwEmbeddingSharding[
SequenceShardingContext, KeyedJaggedTensor, torch.Tensor, torch.Tensor
]
):
"""
Shards sequence (unpooled) embeddings column-wise, i.e.. a given embedding is
partitioned along its columns and placed on specified ranks.
"""
def create_input_dist(
self,
device: Optional[torch.device] = None,
) -> BaseSparseFeaturesDist[KeyedJaggedTensor]:
assert self._pg is not None
return TwSparseFeaturesDist(
self._pg,
self.features_per_rank(),
)
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup:
assert feature_processor is None
return GroupedEmbeddingsLookup(
grouped_configs=self._grouped_embedding_configs,
pg=self._pg,
device=device if device is not None else self._device,
)
def create_output_dist(
self,
device: Optional[torch.device] = None,
) -> BaseEmbeddingDist[SequenceShardingContext, torch.Tensor, torch.Tensor]:
assert self._pg is not None
return TwSequenceEmbeddingDist(
self._pg,
self.features_per_rank(),
device if device is not None else self._device,
qcomm_codecs_registry=self.qcomm_codecs_registry,
)
class DpSequenceEmbeddingSharding(
BaseDpEmbeddingSharding[
SequenceShardingContext, KeyedJaggedTensor, torch.Tensor, torch.Tensor
]
):
"""
Shards sequence (unpooled) embedding data-parallel, with no table sharding i.e.. a
given embedding table is replicated across all ranks.
"""
def create_input_dist(
self, device: Optional[torch.device] = None
) -> BaseSparseFeaturesDist[KeyedJaggedTensor]:
return DpSparseFeaturesDist()
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup:
assert feature_processor is None
return GroupedEmbeddingsLookup(
grouped_configs=self._grouped_embedding_configs,
pg=self._env.process_group,
device=device if device is not None else self._device,
)
def create_output_dist(
self, device: Optional[torch.device] = None
) -> BaseEmbeddingDist[SequenceShardingContext, torch.Tensor, torch.Tensor]:
return DpSequenceEmbeddingDist()
class RwSequenceEmbeddingSharding(
BaseRwEmbeddingSharding[
SequenceShardingContext, KeyedJaggedTensor, torch.Tensor, torch.Tensor
]
):
"""
Shards sequence (unpooled) row-wise, i.e.. a given embedding table is evenly
distributed by rows and table slices are placed on all ranks.
"""
def create_input_dist(
self,
device: Optional[torch.device] = None,
) -> BaseSparseFeaturesDist[KeyedJaggedTensor]:
num_features = self._get_num_features()
feature_hash_sizes = self._get_feature_hash_sizes()
return RwSparseFeaturesDist(
# pyre-fixme[6]: For 1st param expected `ProcessGroup` but got
# `Optional[ProcessGroup]`.
pg=self._pg,
num_features=num_features,
feature_hash_sizes=feature_hash_sizes,
device=device if device is not None else self._device,
is_sequence=True,
has_feature_processor=self._has_feature_processor,
need_pos=False,
)
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup:
return GroupedEmbeddingsLookup(
grouped_configs=self._grouped_embedding_configs,
pg=self._pg,
device=device if device is not None else self._device,
)
def create_output_dist(
self,
device: Optional[torch.device] = None,
) -> BaseEmbeddingDist[SequenceShardingContext, torch.Tensor, torch.Tensor]:
return RwSequenceEmbeddingDist(
# pyre-fixme[6]: For 1st param expected `ProcessGroup` but got
# `Optional[ProcessGroup]`.
self._pg,
self._get_num_features(),
device if device is not None else self._device,
qcomm_codecs_registry=self.qcomm_codecs_registry,
)
class SequenceShardingContext(EmbeddingShardingContext):
"""
Stores KJTAllToAll context and reuses it in SequenceEmbeddingsAllToAll.
SequenceEmbeddingsAllToAll has the same comm pattern as KJTAllToAll.
Attributes:
features_before_input_dist (Optional[KeyedJaggedTensor]): stores the original
KJT before input dist.
input_splits(List[int]): stores the input splits of KJT AlltoAll.
output_splits (List[int]): stores the output splits of KJT AlltoAll.
unbucketize_permute_tensor (Optional[torch.Tensor]): stores the permute order of
KJT bucketize (for row-wise sharding only).
lengths_after_input_dist (Optional[torch.Tensor]): stores the KJT length after
input dist.
"""
# Torch Dynamo does not support default_factory=list:
# https://github.com/pytorch/pytorch/issues/120108
# TODO(ivankobzarev): Make this a dataclass once supported
def __init__(
self,
# Fields of EmbeddingShardingContext
batch_size_per_rank: Optional[List[int]] = None,
batch_size_per_rank_per_feature: Optional[List[List[int]]] = None,
batch_size_per_feature_pre_a2a: Optional[List[int]] = None,
variable_batch_per_feature: bool = False,
# Fields of SequenceShardingContext
features_before_input_dist: Optional[KeyedJaggedTensor] = None,
input_splits: Optional[List[int]] = None,
output_splits: Optional[List[int]] = None,
sparse_features_recat: Optional[torch.Tensor] = None,
unbucketize_permute_tensor: Optional[torch.Tensor] = None,
lengths_after_input_dist: Optional[torch.Tensor] = None,
) -> None:
super().__init__(
batch_size_per_rank,
batch_size_per_rank_per_feature,
batch_size_per_feature_pre_a2a,
variable_batch_per_feature,
)
self.features_before_input_dist: Optional[KeyedJaggedTensor] = (
features_before_input_dist
)
self.input_splits: List[int] = input_splits if input_splits is not None else []
self.output_splits: List[int] = (
output_splits if output_splits is not None else []
)
self.sparse_features_recat: Optional[torch.Tensor] = sparse_features_recat
self.unbucketize_permute_tensor: Optional[torch.Tensor] = (
unbucketize_permute_tensor
)
self.lengths_after_input_dist: Optional[torch.Tensor] = lengths_after_input_dist
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
if self.features_before_input_dist is not None:
self.features_before_input_dist.record_stream(stream)
if self.sparse_features_recat is not None:
self.sparse_features_recat.record_stream(stream)
if self.unbucketize_permute_tensor is not None:
self.unbucketize_permute_tensor.record_stream(stream)
if self.lengths_after_input_dist is not None:
self.lengths_after_input_dist.record_stream(stream)
class TwSequenceEmbeddingSharding(
BaseTwEmbeddingSharding[
SequenceShardingContext, KeyedJaggedTensor, torch.Tensor, torch.Tensor
]
):
"""
Shards sequence (unpooled) embedding table-wise, i.e.. a given embedding table is
placed entirely on a selected rank.
"""
def create_input_dist(
self,
device: Optional[torch.device] = None,
) -> BaseSparseFeaturesDist[KeyedJaggedTensor]:
return TwSparseFeaturesDist(
# pyre-fixme[6]: For 1st param expected `ProcessGroup` but got
# `Optional[ProcessGroup]`.
self._pg,
self.features_per_rank(),
)
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup:
assert feature_processor is None
return GroupedEmbeddingsLookup(
grouped_configs=self._grouped_embedding_configs,
pg=self._pg,
device=device if device is not None else self._device,
)
def create_output_dist(
self,
device: Optional[torch.device] = None,
) -> BaseEmbeddingDist[SequenceShardingContext, torch.Tensor, torch.Tensor]:
assert self._pg is not None
return TwSequenceEmbeddingDist(
self._pg,
self.features_per_rank(),
device if device is not None else self._device,
qcomm_codecs_registry=self.qcomm_codecs_registry,
)
class QuantizedCommCodecs:
"""
The quantization codecs to use for the forward and backward pass respectively of a comm op (e.g. pooled_all_to_all, reduce_scatter, sequence_all_to_all).
"""
# pyre-ignore
forward: QuantizedCommCodec = NoOpQuantizedCommCodec()
# pyre-ignore
backward: QuantizedCommCodec = NoOpQuantizedCommCodec()
class ShardingEnv:
"""
Provides an abstraction over `torch.distributed.ProcessGroup`, which practically
enables `DistributedModelParallel` to be used during inference.
"""
def __init__(
self,
world_size: int,
rank: int,
pg: Optional[dist.ProcessGroup] = None,
) -> None:
self.world_size = world_size
self.rank = rank
self.process_group: Optional[dist.ProcessGroup] = pg
def from_process_group(cls, pg: dist.ProcessGroup) -> "ShardingEnv":
"""
Creates ProcessGroup-based sharding environment.
NOTE:
Typically used during training.
"""
return cls(dist.get_world_size(pg), dist.get_rank(pg), pg)
def from_local(cls, world_size: int, rank: int) -> "ShardingEnv":
"""
Creates a local host-based sharding environment.
NOTE:
Typically used during single host inference.
"""
return cls(world_size, rank, None)
# pyre-ignore
class KeyedJaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""Represents an (optionally weighted) keyed jagged tensor.
A `KeyedJaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. Keyed on first dimension and jagged on the last
dimension.
Implementation is torch.jit.script-able.
Args:
keys (List[str]): keys to the jagged Tensor.
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if the values have weights. Tensor with the
same shape as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
stride (Optional[int]): number of examples per batch.
stride_per_key_per_rank (Optional[List[List[int]]]): batch size
(number of examples) per key per rank, with the outer list representing the
keys and the inner list representing the values.
Each value in the inner list represents the number of examples in the batch
from the rank of its index in a distributed context.
length_per_key (Optional[List[int]]): start length for each key.
offset_per_key (Optional[List[int]]): start offset for each key and final
offset.
index_per_key (Optional[Dict[str, int]]): index for each key.
jt_dict (Optional[Dict[str, JaggedTensor]]):
inverse_indices (Optional[Tuple[List[str], torch.Tensor]]): inverse indices to
expand deduplicated embedding output for variable stride per key.
Example::
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
dim_0: keyed dimension (ie. `Feature0`, `Feature1`)
dim_1: optional second dimension (ie. batch size)
dim_2: The jagged dimension which has slice lengths between 0-3 in the above example
# We represent this data with following inputs:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
"""
# This is the subset of fields on KJT which are required (all other fields
# can be derived from these fields, and are only cached)
_fields = [
"_values",
"_weights",
"_lengths",
"_offsets",
]
def __init__(
self,
keys: List[str],
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
# Below exposed to ensure torch.script-able
length_per_key: Optional[List[int]] = None,
offset_per_key: Optional[List[int]] = None,
index_per_key: Optional[Dict[str, int]] = None,
jt_dict: Optional[Dict[str, JaggedTensor]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> None:
self._keys: List[str] = keys
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
self._stride_per_key_per_rank: List[List[int]] = []
self._stride_per_key: List[int] = []
self._variable_stride_per_key: bool = False
self._stride: int = -1
if stride_per_key_per_rank is not None:
if stride is not None:
raise ValueError(
"Cannot initialize KJT with both `stride` and `stride_per_key_per_rank`"
)
self._stride_per_key_per_rank = stride_per_key_per_rank
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
self._variable_stride_per_key = True
if not stride_per_key_per_rank:
self._stride = 0
elif all(s == self.stride_per_key()[0] for s in self.stride_per_key()):
self._stride = self.stride_per_key()[0]
else:
if torch.jit.is_tracing():
stride = _maybe_compute_stride_kjt_scripted(
keys, stride, lengths, offsets
)[0]
else:
stride = _maybe_compute_stride_kjt(keys, stride, lengths, offsets)
self._stride = stride
self._stride_per_key_per_rank = [[stride]] * len(self._keys)
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
# lazy fields
self._length_per_key: Optional[List[int]] = length_per_key
self._offset_per_key: Optional[List[int]] = offset_per_key
self._index_per_key: Optional[Dict[str, int]] = index_per_key
self._jt_dict: Optional[Dict[str, JaggedTensor]] = jt_dict
self._inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = (
inverse_indices
)
self._lengths_offset_per_key: List[int] = []
def from_offsets_sync(
keys: List[str],
values: torch.Tensor,
offsets: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
offsets=offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def from_lengths_sync(
keys: List[str],
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def concat(
kjt_list: List["KeyedJaggedTensor"],
) -> "KeyedJaggedTensor":
if len(kjt_list) == 0:
raise ValueError("Can't concat empty KJT list")
is_weighted: bool = kjt_list[0].weights_or_none() is not None
has_length_per_key: bool = True
length_per_key: List[int] = []
keys: List[str] = []
value_list: List[torch.Tensor] = []
weight_list: List[torch.Tensor] = []
length_list: List[torch.Tensor] = []
stride_per_key_per_rank: List[List[int]] = []
stride: Optional[int] = None
variable_stride_per_key_list = [
kjt.variable_stride_per_key() for kjt in kjt_list
]
assert all(variable_stride_per_key_list) or not any(
variable_stride_per_key_list
), "variable stride per key must be consistent for all KJTs"
variable_stride_per_key = all(variable_stride_per_key_list)
for kjt in kjt_list:
curr_is_weighted: bool = kjt.weights_or_none() is not None
if is_weighted != curr_is_weighted:
raise ValueError("Can't merge weighted KJT with unweighted KJT")
_length_per_key: Optional[List[int]] = None
if kjt._length_per_key is None:
has_length_per_key = False
else:
_length_per_key = kjt._length_per_key
if has_length_per_key and _length_per_key is not None:
length_per_key += _length_per_key
keys += kjt.keys()
value_list.append(kjt.values())
if is_weighted:
weight_list.append(kjt.weights())
length_list.append(kjt.lengths())
if variable_stride_per_key:
stride_per_key_per_rank += kjt.stride_per_key_per_rank()
elif stride is None:
stride = kjt.stride()
else:
assert stride == kjt.stride(), "strides must be consistent for all KJTs"
return KeyedJaggedTensor(
keys=keys,
values=torch.cat(value_list, dim=0),
weights=torch.cat(weight_list, dim=0) if is_weighted else None,
lengths=torch.cat(length_list, dim=0),
stride=stride,
stride_per_key_per_rank=(
stride_per_key_per_rank if variable_stride_per_key else None
),
length_per_key=length_per_key if has_length_per_key else None,
)
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "KeyedJaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return KeyedJaggedTensor(
keys=torch.jit.annotate(List[str], []),
values=torch.empty(0, dtype=values_dtype, device=device),
weights=weights,
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
stride=0,
)
def empty_like(kjt: "KeyedJaggedTensor") -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, kjt.stride_per_key_per_rank())
if kjt.variable_stride_per_key()
else (kjt.stride(), None)
)
return KeyedJaggedTensor(
keys=[],
values=torch.empty(0, device=kjt.device(), dtype=kjt.values().dtype),
weights=(
None
if kjt.weights_or_none() is None
else torch.empty(0, device=kjt.device(), dtype=kjt.weights().dtype)
),
lengths=torch.empty(0, device=kjt.device(), dtype=kjt.lengths().dtype),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
)
def from_jt_dict(jt_dict: Dict[str, JaggedTensor]) -> "KeyedJaggedTensor":
"""
Constructs a KeyedJaggedTensor from a Dict[str, JaggedTensor],
but this function will ONLY work if the JaggedTensors all
have the same "implicit" batch_size dimension.
Basically, we can visualize JaggedTensors as 2-D tensors
of the format of [batch_size x variable_feature_dim].
In case, we have some batch without a feature value,
the input JaggedTensor could just not include any values.
But KeyedJaggedTensor (by default) typically pad "None"
so that all the JaggedTensors stored in the KeyedJaggedTensor
have the same batch_size dimension. That is, in the case,
the JaggedTensor input didn't automatically pad
for the empty batches, this function would error / not work.
Consider the visualization of the following KeyedJaggedTensor:
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
Notice that the inputs for this KeyedJaggedTensor would have looked like:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
Now if the input jt_dict = {
# "Feature0" [V0,V1] [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
} and the "None" is left out from each JaggedTensor,
then this function would fail as we would not correctly
be able to pad "None" as it does not technically know
the correct batch / place to pad within the JaggedTensor.
Essentially, the lengths Tensor inferred by this function
would be [2, 1, 1, 1, 3] indicating variable batch_size
dim_1 violates the existing assumption / precondition
that KeyedJaggedTensor's should have fixed batch_size dimension.
"""
kjt_keys = list(jt_dict.keys())
kjt_vals_list: List[torch.Tensor] = []
kjt_lens_list: List[torch.Tensor] = []
kjt_weights_list: List[torch.Tensor] = []
stride_per_key: List[int] = []
for jt in jt_dict.values():
stride_per_key.append(len(jt.lengths()))
kjt_vals_list.append(jt.values())
kjt_lens_list.append(jt.lengths())
weight = jt.weights_or_none()
if weight is not None:
kjt_weights_list.append(weight)
kjt_vals = torch.concat(kjt_vals_list)
kjt_lens = torch.concat(kjt_lens_list)
kjt_weights = (
torch.concat(kjt_weights_list) if len(kjt_weights_list) > 0 else None
)
kjt_stride, kjt_stride_per_key_per_rank = (
(stride_per_key[0], None)
if all(s == stride_per_key[0] for s in stride_per_key)
else (None, [[stride] for stride in stride_per_key])
)
kjt = KeyedJaggedTensor(
keys=kjt_keys,
values=kjt_vals,
weights=kjt_weights,
lengths=kjt_lens,
stride=kjt_stride,
stride_per_key_per_rank=kjt_stride_per_key_per_rank,
).sync()
return kjt
def sync(self) -> "KeyedJaggedTensor":
self.length_per_key()
self.offset_per_key()
return self
def unsync(self) -> "KeyedJaggedTensor":
self._length_per_key = None
self._offset_per_key = None
return self
def device(self) -> torch.device:
return self._values.device
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def keys(self) -> List[str]:
return self._keys
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def stride(self) -> int:
return self._stride
def stride_per_key(self) -> List[int]:
return self._stride_per_key
def stride_per_key_per_rank(self) -> List[List[int]]:
return self._stride_per_key_per_rank
def variable_stride_per_key(self) -> bool:
return self._variable_stride_per_key
def inverse_indices(self) -> Tuple[List[str], torch.Tensor]:
return _get_inverse_indices_or_throw(self._inverse_indices)
def inverse_indices_or_none(self) -> Optional[Tuple[List[str], torch.Tensor]]:
return self._inverse_indices
def _key_indices(self) -> Dict[str, int]:
_index_per_key: Dict[str, int] = _maybe_compute_index_per_key(
self._keys,
self._index_per_key,
)
self._index_per_key = _index_per_key
return _index_per_key
def length_per_key(self) -> List[int]:
_length_per_key = _maybe_compute_length_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
return _length_per_key
def length_per_key_or_none(self) -> Optional[List[int]]:
return self._length_per_key
def offset_per_key(self) -> List[int]:
_length_per_key, _offset_per_key = _maybe_compute_offset_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
self._offset_per_key = _offset_per_key
return _offset_per_key
def offset_per_key_or_none(self) -> Optional[List[int]]:
return self._offset_per_key
def lengths_offset_per_key(self) -> List[int]:
if not self._lengths_offset_per_key:
self._lengths_offset_per_key = _cumsum(self.stride_per_key())
return self._lengths_offset_per_key
def split(self, segments: List[int]) -> List["KeyedJaggedTensor"]:
split_list: List[KeyedJaggedTensor] = []
start = 0
start_offset = 0
_length_per_key = self.length_per_key()
_offset_per_key = self.offset_per_key()
for segment in segments:
end = start + segment
end_offset = _offset_per_key[end]
keys: List[str] = self._keys[start:end]
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank()[start:end])
if self.variable_stride_per_key()
else (self._stride, None)
)
if segment == len(self._keys):
# no torch slicing required
split_list.append(
KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self.weights_or_none(),
lengths=self._lengths,
offsets=self._offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=self._jt_dict,
inverse_indices=None,
)
)
elif segment == 0:
empty_int_list: List[int] = torch.jit.annotate(List[int], [])
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=torch.tensor(
empty_int_list,
device=self.device(),
dtype=self._values.dtype,
),
weights=(
None
if self.weights_or_none() is None
else torch.tensor(
empty_int_list,
device=self.device(),
dtype=self.weights().dtype,
)
),
lengths=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
offsets=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
else:
split_length_per_key = _length_per_key[start:end]
if not torch.jit.is_scripting() and is_torchdynamo_compiling():
# Checks for dynamo dynamic shapes tracing
torch._check_is_size(start_offset)
torch._check_is_size(end_offset)
torch._check_is_size(end_offset - start_offset)
torch._check(start_offset <= self._values.size(0))
torch._check(end_offset <= self._values.size(0))
torch._check(end_offset >= start_offset)
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[
start
] : self.lengths_offset_per_key()[end]
],
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=split_length_per_key,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
start = end
start_offset = end_offset
return split_list
def permute(
self,
indices: List[int],
indices_tensor: Optional[torch.Tensor] = None,
include_inverse_indices: bool = False,
) -> "KeyedJaggedTensor":
if indices_tensor is None:
indices_tensor = torch.tensor(
indices, dtype=torch.int, device=self.device()
)
length_per_key = self.length_per_key()
permuted_keys: List[str] = []
permuted_stride_per_key_per_rank: List[List[int]] = []
permuted_length_per_key: List[int] = []
permuted_lengths_sum = 0
for index in indices:
key = self.keys()[index]
permuted_keys.append(key)
permuted_stride_per_key_per_rank.append(
self.stride_per_key_per_rank()[index]
)
permuted_length_per_key.append(length_per_key[index])
permuted_lengths_sum += length_per_key[index]
if self.variable_stride_per_key():
length_per_key_tensor = _pin_and_move(
torch.tensor(self.length_per_key()), self.device()
)
stride_per_key_tensor = _pin_and_move(
torch.tensor(self.stride_per_key()), self.device()
)
permuted_lengths, _ = _permute_tensor_by_segments(
self.lengths(),
stride_per_key_tensor,
indices_tensor,
None,
)
permuted_values, permuted_weights = _permute_tensor_by_segments(
self.values(),
length_per_key_tensor,
indices_tensor,
self.weights_or_none(),
)
else:
(
permuted_lengths,
permuted_values,
permuted_weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
indices_tensor,
self.lengths().view(len(self._keys), -1),
self.values(),
self.weights_or_none(),
permuted_lengths_sum,
)
stride, optional_permuted_stride_per_key_per_rank = (
(None, permuted_stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
kjt = KeyedJaggedTensor(
keys=permuted_keys,
values=permuted_values,
weights=permuted_weights,
lengths=permuted_lengths.view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=optional_permuted_stride_per_key_per_rank,
length_per_key=permuted_length_per_key if len(permuted_keys) > 0 else None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=(
self.inverse_indices_or_none() if include_inverse_indices else None
),
)
return kjt
def flatten_lengths(self) -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank())
if self.variable_stride_per_key()
else (self._stride, None)
)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self._weights,
lengths=self.lengths().view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self.length_per_key(),
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
def __getitem__(self, key: str) -> JaggedTensor:
offset_per_key = self.offset_per_key()
index = self._key_indices()[key]
start_offset = offset_per_key[index]
end_offset = (
offset_per_key[index + 1]
if index + 1 < len(offset_per_key)
else start_offset
)
return JaggedTensor(
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[index] : self.lengths_offset_per_key()[
index + 1
]
],
offsets=None,
)
def to_dict(self) -> Dict[str, JaggedTensor]:
_jt_dict = _maybe_compute_kjt_to_jt_dict(
stride=self.stride(),
stride_per_key=self.stride_per_key(),
keys=self.keys(),
length_per_key=self.length_per_key(),
lengths=self.lengths(),
values=self.values(),
variable_stride_per_key=self.variable_stride_per_key(),
weights=self.weights_or_none(),
jt_dict=self._jt_dict,
)
self._jt_dict = _jt_dict
return _jt_dict
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def to(
self,
device: torch.device,
non_blocking: bool = False,
dtype: Optional[torch.dtype] = None,
) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
length_per_key = self._length_per_key
offset_per_key = self._offset_per_key
index_per_key = self._index_per_key
jt_dict = self._jt_dict
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (
inverse_indices[0],
inverse_indices[1].to(device, non_blocking=non_blocking),
)
if weights is not None:
if dtype is not None:
weights = weights.to(
dtype=dtype, device=device, non_blocking=non_blocking
)
else:
weights = weights.to(device=device, non_blocking=non_blocking)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.to(device, non_blocking=non_blocking),
weights=weights,
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=length_per_key,
offset_per_key=offset_per_key,
index_per_key=index_per_key,
jt_dict=jt_dict,
inverse_indices=inverse_indices,
)
def __str__(self) -> str:
if len(self._keys) == 0 or self._offsets is None and self._lengths is None:
return "KeyedJaggedTensor()\n"
offsets = self.offsets()
return (
"KeyedJaggedTensor({\n"
+ ",\n".join(
[
" "
+ _jagged_tensor_string(
self._keys[index],
self._values,
self._weights,
offsets,
sum(self.stride_per_key()[:index]),
sum(self.stride_per_key()[: index + 1]),
)
for index in range(len(self._keys))
]
)
+ "\n})\n"
)
def pin_memory(self) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (inverse_indices[0], inverse_indices[1].pin_memory())
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.pin_memory(),
weights=weights.pin_memory() if weights is not None else None,
lengths=lengths.pin_memory() if lengths is not None else None,
offsets=offsets.pin_memory() if offsets is not None else None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=None,
inverse_indices=inverse_indices,
)
def dist_labels(self) -> List[str]:
labels = ["lengths", "values"]
if self.variable_stride_per_key():
labels.append("strides")
if self.weights_or_none() is not None:
labels.append("weights")
return labels
def dist_splits(self, key_splits: List[int]) -> List[List[int]]:
batch_size_per_split = _sum_by_splits(self.stride_per_key(), key_splits)
length_per_split = _sum_by_splits(self.length_per_key(), key_splits)
splits = [batch_size_per_split, length_per_split]
if self.variable_stride_per_key():
splits.append(key_splits)
if self.weights_or_none() is not None:
splits.append(length_per_split)
return splits
def dist_tensors(self) -> List[torch.Tensor]:
tensors = [self.lengths(), self.values()]
if self.variable_stride_per_key():
strides = _pin_and_move(torch.tensor(self.stride_per_key()), self.device())
tensors.append(strides)
if self.weights_or_none() is not None:
tensors.append(self.weights())
return tensors
def dist_init(
keys: List[str],
tensors: List[torch.Tensor],
variable_stride_per_key: bool,
num_workers: int,
recat: Optional[torch.Tensor],
stride_per_rank: Optional[List[int]],
stagger: int = 1,
) -> "KeyedJaggedTensor":
assert len(tensors) in [2, 3, 4]
lengths = tensors[0]
values = tensors[1]
stride_per_rank_per_key = tensors[2] if variable_stride_per_key else None
weights = (
tensors[-1]
if (variable_stride_per_key and len(tensors) == 4)
or (not variable_stride_per_key and len(tensors) == 3)
else None
)
if variable_stride_per_key:
assert stride_per_rank_per_key is not None
stride_per_key_per_rank: List[List[int]] = stride_per_rank_per_key.view(
num_workers, len(keys)
).T.tolist()
strides_cumsum: List[int] = torch.ops.fbgemm.asynchronous_complete_cumsum(
stride_per_rank_per_key
).tolist()
cumsum_lengths = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
length_per_key = (
cumsum_lengths[strides_cumsum[1:]] - cumsum_lengths[strides_cumsum[:-1]]
)
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
lengths, _ = _permute_tensor_by_segments(
lengths,
stride_per_rank_per_key,
recat,
None,
)
values, weights = _permute_tensor_by_segments(
values,
length_per_key,
recat,
weights,
)
if not stride_per_key_per_rank:
stride_per_key_per_rank = [[0]] * len(keys)
if stagger > 1:
stride_per_key_per_rank_stagger: List[List[int]] = []
local_world_size = num_workers // stagger
for i in range(len(keys)):
stride_per_rank_stagger: List[int] = []
for j in range(local_world_size):
stride_per_rank_stagger.extend(
stride_per_key_per_rank[i][j::local_world_size]
)
stride_per_key_per_rank_stagger.append(stride_per_rank_stagger)
stride_per_key_per_rank = stride_per_key_per_rank_stagger
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride_per_key_per_rank=stride_per_key_per_rank,
)
return kjt.sync()
else:
assert stride_per_rank is not None
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
stride = stride_per_rank[0]
if all(s == stride for s in stride_per_rank):
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
recat,
lengths.view(-1, stride),
values,
weights,
values.numel(),
)
lengths = lengths.view(-1)
else: # variable batch size per rank
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_1D_sparse_data(
recat,
lengths.view(-1),
values,
weights,
values.numel(),
)
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=sum(stride_per_rank),
)
return kjt.sync()
def create_embedding_sharding(
sharding_type: str,
sharding_infos: List[EmbeddingShardingInfo],
env: ShardingEnv,
device: Optional[torch.device] = None,
qcomm_codecs_registry: Optional[Dict[str, QuantizedCommCodecs]] = None,
) -> EmbeddingSharding[
SequenceShardingContext, KeyedJaggedTensor, torch.Tensor, torch.Tensor
]:
if sharding_type == ShardingType.TABLE_WISE.value:
return TwSequenceEmbeddingSharding(
sharding_infos=sharding_infos,
env=env,
device=device,
qcomm_codecs_registry=qcomm_codecs_registry,
)
elif sharding_type == ShardingType.ROW_WISE.value:
return RwSequenceEmbeddingSharding(
sharding_infos=sharding_infos,
env=env,
device=device,
qcomm_codecs_registry=qcomm_codecs_registry,
)
elif sharding_type == ShardingType.DATA_PARALLEL.value:
return DpSequenceEmbeddingSharding(
sharding_infos=sharding_infos,
env=env,
device=device,
)
elif sharding_type == ShardingType.COLUMN_WISE.value:
return CwSequenceEmbeddingSharding(
sharding_infos=sharding_infos,
env=env,
device=device,
qcomm_codecs_registry=qcomm_codecs_registry,
)
else:
raise ValueError(f"Sharding not supported {sharding_type}") | null |
8,993 | import copy
import logging
import warnings
from collections import defaultdict, deque, OrderedDict
from dataclasses import dataclass, field
from itertools import accumulate
from typing import Any, cast, Dict, List, MutableMapping, Optional, Type, Union
import torch
from torch import nn
from torch.autograd.profiler import record_function
from torch.nn.parallel import DistributedDataParallel
from torchrec.distributed.embedding_sharding import (
EmbeddingSharding,
EmbeddingShardingInfo,
KJTListSplitsAwaitable,
)
from torchrec.distributed.embedding_types import (
BaseEmbeddingSharder,
EmbeddingComputeKernel,
KJTList,
ShardedEmbeddingModule,
ShardingType,
)
from torchrec.distributed.sharding.cw_sequence_sharding import (
CwSequenceEmbeddingSharding,
)
from torchrec.distributed.sharding.dp_sequence_sharding import (
DpSequenceEmbeddingSharding,
)
from torchrec.distributed.sharding.rw_sequence_sharding import (
RwSequenceEmbeddingSharding,
)
from torchrec.distributed.sharding.rw_sharding import RwSparseFeaturesDist
from torchrec.distributed.sharding.sequence_sharding import SequenceShardingContext
from torchrec.distributed.sharding.tw_sequence_sharding import (
TwSequenceEmbeddingSharding,
)
from torchrec.distributed.types import (
Awaitable,
EmbeddingModuleShardingPlan,
LazyAwaitable,
Multistreamable,
ParameterSharding,
QuantizedCommCodecs,
ShardedTensor,
ShardingEnv,
ShardMetadata,
)
from torchrec.distributed.utils import (
add_params_from_parameter_sharding,
convert_to_fbgemm_types,
merge_fused_params,
optimizer_type_to_emb_opt_type,
)
from torchrec.modules.embedding_configs import (
EmbeddingConfig,
EmbeddingTableConfig,
PoolingType,
)
from torchrec.modules.embedding_modules import (
EmbeddingCollection,
EmbeddingCollectionInterface,
)
from torchrec.modules.utils import construct_jagged_tensors
from torchrec.optim.fused import EmptyFusedOptimizer, FusedOptimizerModule
from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizer
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
class EmbeddingShardingInfo:
embedding_config: EmbeddingTableConfig
param_sharding: ParameterSharding
param: torch.Tensor
fused_params: Optional[Dict[str, Any]] = None
class EmbeddingComputeKernel(Enum):
DENSE = "dense"
FUSED = "fused"
FUSED_UVM = "fused_uvm"
FUSED_UVM_CACHING = "fused_uvm_caching"
QUANT = "quant"
QUANT_UVM = "quant_uvm"
QUANT_UVM_CACHING = "quant_uvm_caching"
class ParameterSharding:
"""
Describes the sharding of the parameter.
sharding_type (str): how this parameter is sharded. See ShardingType for well-known
types.
compute_kernel (str): compute kernel to be used by this parameter.
ranks (Optional[List[int]]): rank of each shard.
sharding_spec (Optional[ShardingSpec]): list of ShardMetadata for each shard.
cache_params (Optional[CacheParams]): cache params for embedding lookup.
enforce_hbm (Optional[bool]): whether to use HBM.
stochastic_rounding (Optional[bool]): whether to use stochastic rounding.
bounds_check_mode (Optional[BoundsCheckMode]): bounds check mode.
NOTE:
ShardingType.TABLE_WISE - rank where this embedding is placed
ShardingType.COLUMN_WISE - rank where the embedding shards are placed, seen as
individual tables
ShardingType.TABLE_ROW_WISE - first rank when this embedding is placed
ShardingType.ROW_WISE, ShardingType.DATA_PARALLEL - unused
"""
sharding_type: str
compute_kernel: str
ranks: Optional[List[int]] = None
sharding_spec: Optional[ShardingSpec] = None
cache_params: Optional[CacheParams] = None
enforce_hbm: Optional[bool] = None
stochastic_rounding: Optional[bool] = None
bounds_check_mode: Optional[BoundsCheckMode] = None
def optimizer_type_to_emb_opt_type(
optimizer_class: Type[torch.optim.Optimizer],
) -> Optional[EmbOptimType]:
# TODO add more optimizers to be in parity with ones provided by FBGEMM
# TODO kwargs accepted by fbgemm and and canonical optimizers are different
# may need to add special handling for them
lookup = {
torch.optim.SGD: EmbOptimType.EXACT_SGD,
torch.optim.Adagrad: EmbOptimType.EXACT_ADAGRAD,
torch.optim.Adam: EmbOptimType.ADAM,
# below are torchrec wrappers over these optims.
# they accept an **unused kwargs portion, that let us set FBGEMM specific args such as
# max gradient, etc
trec_optim.SGD: EmbOptimType.EXACT_SGD,
trec_optim.LarsSGD: EmbOptimType.LARS_SGD,
trec_optim.LAMB: EmbOptimType.LAMB,
trec_optim.PartialRowWiseLAMB: EmbOptimType.PARTIAL_ROWWISE_LAMB,
trec_optim.Adam: EmbOptimType.ADAM,
trec_optim.PartialRowWiseAdam: EmbOptimType.PARTIAL_ROWWISE_ADAM,
trec_optim.Adagrad: EmbOptimType.EXACT_ADAGRAD,
trec_optim.RowWiseAdagrad: EmbOptimType.EXACT_ROWWISE_ADAGRAD,
}
if optimizer_class not in lookup:
raise ValueError(f"Cannot cast {optimizer_class} to an EmbOptimType")
return lookup[optimizer_class]
def merge_fused_params(
fused_params: Optional[Dict[str, Any]] = None,
param_fused_params: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""
Configure the fused_params including cache_precision if the value is not preset.
Values set in table_level_fused_params take precidence over the global fused_params
Args:
fused_params (Optional[Dict[str, Any]]): the original fused_params
grouped_fused_params
Returns:
[Dict[str, Any]]: a non-null configured fused_params dictionary to be
used to configure the embedding lookup kernel
"""
if fused_params is None:
fused_params = {}
if param_fused_params is None:
param_fused_params = {}
if "lr" in param_fused_params:
param_fused_params["learning_rate"] = param_fused_params.pop("lr")
_fused_params = copy.deepcopy(fused_params)
_fused_params.update(param_fused_params)
return _fused_params
def add_params_from_parameter_sharding(
fused_params: Optional[Dict[str, Any]],
parameter_sharding: ParameterSharding,
) -> Dict[str, Any]:
"""
Extract params from parameter sharding and then add them to fused_params.
Params from parameter sharding will override the ones in fused_params if they
exist already.
Args:
fused_params (Optional[Dict[str, Any]]): the existing fused_params
parameter_sharding (ParameterSharding): the parameter sharding to use
Returns:
[Dict[str, Any]]: the fused_params dictionary with params from parameter
sharding added.
"""
if fused_params is None:
fused_params = {}
# update fused_params using params from parameter_sharding
# this will take precidence over the fused_params provided from sharders
if parameter_sharding.cache_params is not None:
cache_params = parameter_sharding.cache_params
if cache_params.algorithm is not None:
fused_params["cache_algorithm"] = cache_params.algorithm
if cache_params.load_factor is not None:
fused_params["cache_load_factor"] = cache_params.load_factor
if cache_params.reserved_memory is not None:
fused_params["cache_reserved_memory"] = cache_params.reserved_memory
if cache_params.precision is not None:
fused_params["cache_precision"] = cache_params.precision
if cache_params.prefetch_pipeline is not None:
fused_params["prefetch_pipeline"] = cache_params.prefetch_pipeline
if parameter_sharding.enforce_hbm is not None:
fused_params["enforce_hbm"] = parameter_sharding.enforce_hbm
if parameter_sharding.stochastic_rounding is not None:
fused_params["stochastic_rounding"] = parameter_sharding.stochastic_rounding
if parameter_sharding.bounds_check_mode is not None:
fused_params["bounds_check_mode"] = parameter_sharding.bounds_check_mode
# print warning if sharding_type is data_parallel or kernel is dense
if parameter_sharding.sharding_type == ShardingType.DATA_PARALLEL.value:
logger.warning(
f"Sharding Type is {parameter_sharding.sharding_type}, "
"caching params will be ignored"
)
elif parameter_sharding.compute_kernel == EmbeddingComputeKernel.DENSE.value:
logger.warning(
f"Compute Kernel is {parameter_sharding.compute_kernel}, "
"caching params will be ignored"
)
return fused_params
def convert_to_fbgemm_types(fused_params: Dict[str, Any]) -> Dict[str, Any]:
if "cache_precision" in fused_params:
if isinstance(fused_params["cache_precision"], DataType):
fused_params["cache_precision"] = data_type_to_sparse_type(
fused_params["cache_precision"]
)
if "weights_precision" in fused_params:
if isinstance(fused_params["weights_precision"], DataType):
fused_params["weights_precision"] = data_type_to_sparse_type(
fused_params["weights_precision"]
)
if "output_dtype" in fused_params:
if isinstance(fused_params["output_dtype"], DataType):
fused_params["output_dtype"] = data_type_to_sparse_type(
fused_params["output_dtype"]
)
return fused_params
class PoolingType(Enum):
SUM = "SUM"
MEAN = "MEAN"
NONE = "NONE"
class EmbeddingTableConfig(BaseEmbeddingConfig):
pooling: PoolingType = PoolingType.SUM
is_weighted: bool = False
has_feature_processor: bool = False
embedding_names: List[str] = field(default_factory=list)
class EmbeddingCollectionInterface(abc.ABC, nn.Module):
"""
Interface for `EmbeddingCollection`.
"""
def forward(
self,
features: KeyedJaggedTensor,
) -> Dict[str, JaggedTensor]:
pass
def embedding_configs(
self,
) -> List[EmbeddingConfig]:
pass
def need_indices(self) -> bool:
pass
def embedding_dim(self) -> int:
pass
def embedding_names_by_table(self) -> List[List[str]]:
pass
def create_sharding_infos_by_sharding(
module: EmbeddingCollectionInterface,
table_name_to_parameter_sharding: Dict[str, ParameterSharding],
fused_params: Optional[Dict[str, Any]],
) -> Dict[str, List[EmbeddingShardingInfo]]:
if fused_params is None:
fused_params = {}
sharding_type_to_sharding_infos: Dict[str, List[EmbeddingShardingInfo]] = {}
# state_dict returns parameter.Tensor, which loses parameter level attributes
parameter_by_name = dict(module.named_parameters())
# QuantEBC registers weights as buffers (since they are INT8), and so we need to grab it there
state_dict = module.state_dict()
for (
config,
embedding_names,
) in zip(module.embedding_configs(), module.embedding_names_by_table()):
table_name = config.name
assert table_name in table_name_to_parameter_sharding
parameter_sharding = table_name_to_parameter_sharding[table_name]
if parameter_sharding.compute_kernel not in [
kernel.value for kernel in EmbeddingComputeKernel
]:
raise ValueError(
f"Compute kernel not supported {parameter_sharding.compute_kernel}"
)
param_name = "embeddings." + config.name + ".weight"
assert param_name in parameter_by_name or param_name in state_dict
param = parameter_by_name.get(param_name, state_dict[param_name])
if parameter_sharding.sharding_type not in sharding_type_to_sharding_infos:
sharding_type_to_sharding_infos[parameter_sharding.sharding_type] = []
optimizer_params = getattr(param, "_optimizer_kwargs", [{}])
optimizer_classes = getattr(param, "_optimizer_classes", [None])
assert (
len(optimizer_classes) == 1 and len(optimizer_params) == 1
), f"Only support 1 optimizer, given {len(optimizer_classes)}"
optimizer_class = optimizer_classes[0]
optimizer_params = optimizer_params[0]
if optimizer_class:
optimizer_params["optimizer"] = optimizer_type_to_emb_opt_type(
optimizer_class
)
per_table_fused_params = merge_fused_params(fused_params, optimizer_params)
per_table_fused_params = add_params_from_parameter_sharding(
per_table_fused_params, parameter_sharding
)
per_table_fused_params = convert_to_fbgemm_types(per_table_fused_params)
sharding_type_to_sharding_infos[parameter_sharding.sharding_type].append(
(
EmbeddingShardingInfo(
embedding_config=EmbeddingTableConfig(
num_embeddings=config.num_embeddings,
embedding_dim=config.embedding_dim,
name=config.name,
data_type=config.data_type,
feature_names=copy.deepcopy(config.feature_names),
pooling=PoolingType.NONE,
is_weighted=False,
has_feature_processor=False,
embedding_names=embedding_names,
weight_init_max=config.weight_init_max,
weight_init_min=config.weight_init_min,
),
param_sharding=parameter_sharding,
param=param,
fused_params=per_table_fused_params,
)
)
)
return sharding_type_to_sharding_infos | null |
8,994 | import copy
from typing import Any, Dict, List, Optional, Type
import torch
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from torch import nn
from torchrec.distributed.embedding_lookup import EmbeddingComputeKernel
from torchrec.distributed.embedding_sharding import (
EmbeddingSharding,
EmbeddingShardingInfo,
)
from torchrec.distributed.embedding_types import (
BaseQuantEmbeddingSharder,
FeatureShardingMixIn,
GroupedEmbeddingConfig,
KJTList,
ListOfKJTList,
)
from torchrec.distributed.embeddingbag import (
construct_output_kt,
create_sharding_infos_by_sharding,
)
from torchrec.distributed.fused_params import (
FUSED_PARAM_QUANT_STATE_DICT_SPLIT_SCALE_BIAS,
FUSED_PARAM_REGISTER_TBE_BOOL,
get_tbes_to_register_from_iterable,
is_fused_param_quant_state_dict_split_scale_bias,
is_fused_param_register_tbe,
)
from torchrec.distributed.quant_state import ShardedQuantEmbeddingModuleState
from torchrec.distributed.sharding.cw_sharding import InferCwPooledEmbeddingSharding
from torchrec.distributed.sharding.rw_sharding import InferRwPooledEmbeddingSharding
from torchrec.distributed.sharding.tw_sharding import InferTwEmbeddingSharding
from torchrec.distributed.types import (
NullShardedModuleContext,
NullShardingContext,
ParameterSharding,
ShardingEnv,
ShardingType,
)
from torchrec.distributed.utils import copy_to_device
from torchrec.modules.embedding_configs import (
data_type_to_sparse_type,
dtype_to_data_type,
EmbeddingBagConfig,
)
from torchrec.modules.embedding_modules import EmbeddingBagCollectionInterface
from torchrec.modules.feature_processor_ import FeatureProcessorsCollection
from torchrec.quant.embedding_modules import (
EmbeddingBagCollection as QuantEmbeddingBagCollection,
FeatureProcessedEmbeddingBagCollection as QuantFeatureProcessedEmbeddingBagCollection,
MODULE_ATTR_QUANT_STATE_DICT_SPLIT_SCALE_BIAS,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor, KeyedTensor
def is_torchdynamo_compiling() -> bool: # type: ignore[misc]
return False | null |
8,995 | import copy
from typing import Any, Dict, List, Optional, Type
import torch
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from torch import nn
from torchrec.distributed.embedding_lookup import EmbeddingComputeKernel
from torchrec.distributed.embedding_sharding import (
EmbeddingSharding,
EmbeddingShardingInfo,
)
from torchrec.distributed.embedding_types import (
BaseQuantEmbeddingSharder,
FeatureShardingMixIn,
GroupedEmbeddingConfig,
KJTList,
ListOfKJTList,
)
from torchrec.distributed.embeddingbag import (
construct_output_kt,
create_sharding_infos_by_sharding,
)
from torchrec.distributed.fused_params import (
FUSED_PARAM_QUANT_STATE_DICT_SPLIT_SCALE_BIAS,
FUSED_PARAM_REGISTER_TBE_BOOL,
get_tbes_to_register_from_iterable,
is_fused_param_quant_state_dict_split_scale_bias,
is_fused_param_register_tbe,
)
from torchrec.distributed.quant_state import ShardedQuantEmbeddingModuleState
from torchrec.distributed.sharding.cw_sharding import InferCwPooledEmbeddingSharding
from torchrec.distributed.sharding.rw_sharding import InferRwPooledEmbeddingSharding
from torchrec.distributed.sharding.tw_sharding import InferTwEmbeddingSharding
from torchrec.distributed.types import (
NullShardedModuleContext,
NullShardingContext,
ParameterSharding,
ShardingEnv,
ShardingType,
)
from torchrec.distributed.utils import copy_to_device
from torchrec.modules.embedding_configs import (
data_type_to_sparse_type,
dtype_to_data_type,
EmbeddingBagConfig,
)
from torchrec.modules.embedding_modules import EmbeddingBagCollectionInterface
from torchrec.modules.feature_processor_ import FeatureProcessorsCollection
from torchrec.quant.embedding_modules import (
EmbeddingBagCollection as QuantEmbeddingBagCollection,
FeatureProcessedEmbeddingBagCollection as QuantFeatureProcessedEmbeddingBagCollection,
MODULE_ATTR_QUANT_STATE_DICT_SPLIT_SCALE_BIAS,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor, KeyedTensor
# pyre-ignore
class KeyedJaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""Represents an (optionally weighted) keyed jagged tensor.
A `KeyedJaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. Keyed on first dimension and jagged on the last
dimension.
Implementation is torch.jit.script-able.
Args:
keys (List[str]): keys to the jagged Tensor.
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if the values have weights. Tensor with the
same shape as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
stride (Optional[int]): number of examples per batch.
stride_per_key_per_rank (Optional[List[List[int]]]): batch size
(number of examples) per key per rank, with the outer list representing the
keys and the inner list representing the values.
Each value in the inner list represents the number of examples in the batch
from the rank of its index in a distributed context.
length_per_key (Optional[List[int]]): start length for each key.
offset_per_key (Optional[List[int]]): start offset for each key and final
offset.
index_per_key (Optional[Dict[str, int]]): index for each key.
jt_dict (Optional[Dict[str, JaggedTensor]]):
inverse_indices (Optional[Tuple[List[str], torch.Tensor]]): inverse indices to
expand deduplicated embedding output for variable stride per key.
Example::
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
dim_0: keyed dimension (ie. `Feature0`, `Feature1`)
dim_1: optional second dimension (ie. batch size)
dim_2: The jagged dimension which has slice lengths between 0-3 in the above example
# We represent this data with following inputs:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
"""
# This is the subset of fields on KJT which are required (all other fields
# can be derived from these fields, and are only cached)
_fields = [
"_values",
"_weights",
"_lengths",
"_offsets",
]
def __init__(
self,
keys: List[str],
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
# Below exposed to ensure torch.script-able
length_per_key: Optional[List[int]] = None,
offset_per_key: Optional[List[int]] = None,
index_per_key: Optional[Dict[str, int]] = None,
jt_dict: Optional[Dict[str, JaggedTensor]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> None:
self._keys: List[str] = keys
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
self._stride_per_key_per_rank: List[List[int]] = []
self._stride_per_key: List[int] = []
self._variable_stride_per_key: bool = False
self._stride: int = -1
if stride_per_key_per_rank is not None:
if stride is not None:
raise ValueError(
"Cannot initialize KJT with both `stride` and `stride_per_key_per_rank`"
)
self._stride_per_key_per_rank = stride_per_key_per_rank
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
self._variable_stride_per_key = True
if not stride_per_key_per_rank:
self._stride = 0
elif all(s == self.stride_per_key()[0] for s in self.stride_per_key()):
self._stride = self.stride_per_key()[0]
else:
if torch.jit.is_tracing():
stride = _maybe_compute_stride_kjt_scripted(
keys, stride, lengths, offsets
)[0]
else:
stride = _maybe_compute_stride_kjt(keys, stride, lengths, offsets)
self._stride = stride
self._stride_per_key_per_rank = [[stride]] * len(self._keys)
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
# lazy fields
self._length_per_key: Optional[List[int]] = length_per_key
self._offset_per_key: Optional[List[int]] = offset_per_key
self._index_per_key: Optional[Dict[str, int]] = index_per_key
self._jt_dict: Optional[Dict[str, JaggedTensor]] = jt_dict
self._inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = (
inverse_indices
)
self._lengths_offset_per_key: List[int] = []
def from_offsets_sync(
keys: List[str],
values: torch.Tensor,
offsets: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
offsets=offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def from_lengths_sync(
keys: List[str],
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def concat(
kjt_list: List["KeyedJaggedTensor"],
) -> "KeyedJaggedTensor":
if len(kjt_list) == 0:
raise ValueError("Can't concat empty KJT list")
is_weighted: bool = kjt_list[0].weights_or_none() is not None
has_length_per_key: bool = True
length_per_key: List[int] = []
keys: List[str] = []
value_list: List[torch.Tensor] = []
weight_list: List[torch.Tensor] = []
length_list: List[torch.Tensor] = []
stride_per_key_per_rank: List[List[int]] = []
stride: Optional[int] = None
variable_stride_per_key_list = [
kjt.variable_stride_per_key() for kjt in kjt_list
]
assert all(variable_stride_per_key_list) or not any(
variable_stride_per_key_list
), "variable stride per key must be consistent for all KJTs"
variable_stride_per_key = all(variable_stride_per_key_list)
for kjt in kjt_list:
curr_is_weighted: bool = kjt.weights_or_none() is not None
if is_weighted != curr_is_weighted:
raise ValueError("Can't merge weighted KJT with unweighted KJT")
_length_per_key: Optional[List[int]] = None
if kjt._length_per_key is None:
has_length_per_key = False
else:
_length_per_key = kjt._length_per_key
if has_length_per_key and _length_per_key is not None:
length_per_key += _length_per_key
keys += kjt.keys()
value_list.append(kjt.values())
if is_weighted:
weight_list.append(kjt.weights())
length_list.append(kjt.lengths())
if variable_stride_per_key:
stride_per_key_per_rank += kjt.stride_per_key_per_rank()
elif stride is None:
stride = kjt.stride()
else:
assert stride == kjt.stride(), "strides must be consistent for all KJTs"
return KeyedJaggedTensor(
keys=keys,
values=torch.cat(value_list, dim=0),
weights=torch.cat(weight_list, dim=0) if is_weighted else None,
lengths=torch.cat(length_list, dim=0),
stride=stride,
stride_per_key_per_rank=(
stride_per_key_per_rank if variable_stride_per_key else None
),
length_per_key=length_per_key if has_length_per_key else None,
)
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "KeyedJaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return KeyedJaggedTensor(
keys=torch.jit.annotate(List[str], []),
values=torch.empty(0, dtype=values_dtype, device=device),
weights=weights,
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
stride=0,
)
def empty_like(kjt: "KeyedJaggedTensor") -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, kjt.stride_per_key_per_rank())
if kjt.variable_stride_per_key()
else (kjt.stride(), None)
)
return KeyedJaggedTensor(
keys=[],
values=torch.empty(0, device=kjt.device(), dtype=kjt.values().dtype),
weights=(
None
if kjt.weights_or_none() is None
else torch.empty(0, device=kjt.device(), dtype=kjt.weights().dtype)
),
lengths=torch.empty(0, device=kjt.device(), dtype=kjt.lengths().dtype),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
)
def from_jt_dict(jt_dict: Dict[str, JaggedTensor]) -> "KeyedJaggedTensor":
"""
Constructs a KeyedJaggedTensor from a Dict[str, JaggedTensor],
but this function will ONLY work if the JaggedTensors all
have the same "implicit" batch_size dimension.
Basically, we can visualize JaggedTensors as 2-D tensors
of the format of [batch_size x variable_feature_dim].
In case, we have some batch without a feature value,
the input JaggedTensor could just not include any values.
But KeyedJaggedTensor (by default) typically pad "None"
so that all the JaggedTensors stored in the KeyedJaggedTensor
have the same batch_size dimension. That is, in the case,
the JaggedTensor input didn't automatically pad
for the empty batches, this function would error / not work.
Consider the visualization of the following KeyedJaggedTensor:
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
Notice that the inputs for this KeyedJaggedTensor would have looked like:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
Now if the input jt_dict = {
# "Feature0" [V0,V1] [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
} and the "None" is left out from each JaggedTensor,
then this function would fail as we would not correctly
be able to pad "None" as it does not technically know
the correct batch / place to pad within the JaggedTensor.
Essentially, the lengths Tensor inferred by this function
would be [2, 1, 1, 1, 3] indicating variable batch_size
dim_1 violates the existing assumption / precondition
that KeyedJaggedTensor's should have fixed batch_size dimension.
"""
kjt_keys = list(jt_dict.keys())
kjt_vals_list: List[torch.Tensor] = []
kjt_lens_list: List[torch.Tensor] = []
kjt_weights_list: List[torch.Tensor] = []
stride_per_key: List[int] = []
for jt in jt_dict.values():
stride_per_key.append(len(jt.lengths()))
kjt_vals_list.append(jt.values())
kjt_lens_list.append(jt.lengths())
weight = jt.weights_or_none()
if weight is not None:
kjt_weights_list.append(weight)
kjt_vals = torch.concat(kjt_vals_list)
kjt_lens = torch.concat(kjt_lens_list)
kjt_weights = (
torch.concat(kjt_weights_list) if len(kjt_weights_list) > 0 else None
)
kjt_stride, kjt_stride_per_key_per_rank = (
(stride_per_key[0], None)
if all(s == stride_per_key[0] for s in stride_per_key)
else (None, [[stride] for stride in stride_per_key])
)
kjt = KeyedJaggedTensor(
keys=kjt_keys,
values=kjt_vals,
weights=kjt_weights,
lengths=kjt_lens,
stride=kjt_stride,
stride_per_key_per_rank=kjt_stride_per_key_per_rank,
).sync()
return kjt
def sync(self) -> "KeyedJaggedTensor":
self.length_per_key()
self.offset_per_key()
return self
def unsync(self) -> "KeyedJaggedTensor":
self._length_per_key = None
self._offset_per_key = None
return self
def device(self) -> torch.device:
return self._values.device
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def keys(self) -> List[str]:
return self._keys
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def stride(self) -> int:
return self._stride
def stride_per_key(self) -> List[int]:
return self._stride_per_key
def stride_per_key_per_rank(self) -> List[List[int]]:
return self._stride_per_key_per_rank
def variable_stride_per_key(self) -> bool:
return self._variable_stride_per_key
def inverse_indices(self) -> Tuple[List[str], torch.Tensor]:
return _get_inverse_indices_or_throw(self._inverse_indices)
def inverse_indices_or_none(self) -> Optional[Tuple[List[str], torch.Tensor]]:
return self._inverse_indices
def _key_indices(self) -> Dict[str, int]:
_index_per_key: Dict[str, int] = _maybe_compute_index_per_key(
self._keys,
self._index_per_key,
)
self._index_per_key = _index_per_key
return _index_per_key
def length_per_key(self) -> List[int]:
_length_per_key = _maybe_compute_length_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
return _length_per_key
def length_per_key_or_none(self) -> Optional[List[int]]:
return self._length_per_key
def offset_per_key(self) -> List[int]:
_length_per_key, _offset_per_key = _maybe_compute_offset_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
self._offset_per_key = _offset_per_key
return _offset_per_key
def offset_per_key_or_none(self) -> Optional[List[int]]:
return self._offset_per_key
def lengths_offset_per_key(self) -> List[int]:
if not self._lengths_offset_per_key:
self._lengths_offset_per_key = _cumsum(self.stride_per_key())
return self._lengths_offset_per_key
def split(self, segments: List[int]) -> List["KeyedJaggedTensor"]:
split_list: List[KeyedJaggedTensor] = []
start = 0
start_offset = 0
_length_per_key = self.length_per_key()
_offset_per_key = self.offset_per_key()
for segment in segments:
end = start + segment
end_offset = _offset_per_key[end]
keys: List[str] = self._keys[start:end]
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank()[start:end])
if self.variable_stride_per_key()
else (self._stride, None)
)
if segment == len(self._keys):
# no torch slicing required
split_list.append(
KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self.weights_or_none(),
lengths=self._lengths,
offsets=self._offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=self._jt_dict,
inverse_indices=None,
)
)
elif segment == 0:
empty_int_list: List[int] = torch.jit.annotate(List[int], [])
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=torch.tensor(
empty_int_list,
device=self.device(),
dtype=self._values.dtype,
),
weights=(
None
if self.weights_or_none() is None
else torch.tensor(
empty_int_list,
device=self.device(),
dtype=self.weights().dtype,
)
),
lengths=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
offsets=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
else:
split_length_per_key = _length_per_key[start:end]
if not torch.jit.is_scripting() and is_torchdynamo_compiling():
# Checks for dynamo dynamic shapes tracing
torch._check_is_size(start_offset)
torch._check_is_size(end_offset)
torch._check_is_size(end_offset - start_offset)
torch._check(start_offset <= self._values.size(0))
torch._check(end_offset <= self._values.size(0))
torch._check(end_offset >= start_offset)
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[
start
] : self.lengths_offset_per_key()[end]
],
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=split_length_per_key,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
start = end
start_offset = end_offset
return split_list
def permute(
self,
indices: List[int],
indices_tensor: Optional[torch.Tensor] = None,
include_inverse_indices: bool = False,
) -> "KeyedJaggedTensor":
if indices_tensor is None:
indices_tensor = torch.tensor(
indices, dtype=torch.int, device=self.device()
)
length_per_key = self.length_per_key()
permuted_keys: List[str] = []
permuted_stride_per_key_per_rank: List[List[int]] = []
permuted_length_per_key: List[int] = []
permuted_lengths_sum = 0
for index in indices:
key = self.keys()[index]
permuted_keys.append(key)
permuted_stride_per_key_per_rank.append(
self.stride_per_key_per_rank()[index]
)
permuted_length_per_key.append(length_per_key[index])
permuted_lengths_sum += length_per_key[index]
if self.variable_stride_per_key():
length_per_key_tensor = _pin_and_move(
torch.tensor(self.length_per_key()), self.device()
)
stride_per_key_tensor = _pin_and_move(
torch.tensor(self.stride_per_key()), self.device()
)
permuted_lengths, _ = _permute_tensor_by_segments(
self.lengths(),
stride_per_key_tensor,
indices_tensor,
None,
)
permuted_values, permuted_weights = _permute_tensor_by_segments(
self.values(),
length_per_key_tensor,
indices_tensor,
self.weights_or_none(),
)
else:
(
permuted_lengths,
permuted_values,
permuted_weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
indices_tensor,
self.lengths().view(len(self._keys), -1),
self.values(),
self.weights_or_none(),
permuted_lengths_sum,
)
stride, optional_permuted_stride_per_key_per_rank = (
(None, permuted_stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
kjt = KeyedJaggedTensor(
keys=permuted_keys,
values=permuted_values,
weights=permuted_weights,
lengths=permuted_lengths.view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=optional_permuted_stride_per_key_per_rank,
length_per_key=permuted_length_per_key if len(permuted_keys) > 0 else None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=(
self.inverse_indices_or_none() if include_inverse_indices else None
),
)
return kjt
def flatten_lengths(self) -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank())
if self.variable_stride_per_key()
else (self._stride, None)
)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self._weights,
lengths=self.lengths().view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self.length_per_key(),
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
def __getitem__(self, key: str) -> JaggedTensor:
offset_per_key = self.offset_per_key()
index = self._key_indices()[key]
start_offset = offset_per_key[index]
end_offset = (
offset_per_key[index + 1]
if index + 1 < len(offset_per_key)
else start_offset
)
return JaggedTensor(
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[index] : self.lengths_offset_per_key()[
index + 1
]
],
offsets=None,
)
def to_dict(self) -> Dict[str, JaggedTensor]:
_jt_dict = _maybe_compute_kjt_to_jt_dict(
stride=self.stride(),
stride_per_key=self.stride_per_key(),
keys=self.keys(),
length_per_key=self.length_per_key(),
lengths=self.lengths(),
values=self.values(),
variable_stride_per_key=self.variable_stride_per_key(),
weights=self.weights_or_none(),
jt_dict=self._jt_dict,
)
self._jt_dict = _jt_dict
return _jt_dict
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def to(
self,
device: torch.device,
non_blocking: bool = False,
dtype: Optional[torch.dtype] = None,
) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
length_per_key = self._length_per_key
offset_per_key = self._offset_per_key
index_per_key = self._index_per_key
jt_dict = self._jt_dict
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (
inverse_indices[0],
inverse_indices[1].to(device, non_blocking=non_blocking),
)
if weights is not None:
if dtype is not None:
weights = weights.to(
dtype=dtype, device=device, non_blocking=non_blocking
)
else:
weights = weights.to(device=device, non_blocking=non_blocking)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.to(device, non_blocking=non_blocking),
weights=weights,
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=length_per_key,
offset_per_key=offset_per_key,
index_per_key=index_per_key,
jt_dict=jt_dict,
inverse_indices=inverse_indices,
)
def __str__(self) -> str:
if len(self._keys) == 0 or self._offsets is None and self._lengths is None:
return "KeyedJaggedTensor()\n"
offsets = self.offsets()
return (
"KeyedJaggedTensor({\n"
+ ",\n".join(
[
" "
+ _jagged_tensor_string(
self._keys[index],
self._values,
self._weights,
offsets,
sum(self.stride_per_key()[:index]),
sum(self.stride_per_key()[: index + 1]),
)
for index in range(len(self._keys))
]
)
+ "\n})\n"
)
def pin_memory(self) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (inverse_indices[0], inverse_indices[1].pin_memory())
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.pin_memory(),
weights=weights.pin_memory() if weights is not None else None,
lengths=lengths.pin_memory() if lengths is not None else None,
offsets=offsets.pin_memory() if offsets is not None else None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=None,
inverse_indices=inverse_indices,
)
def dist_labels(self) -> List[str]:
labels = ["lengths", "values"]
if self.variable_stride_per_key():
labels.append("strides")
if self.weights_or_none() is not None:
labels.append("weights")
return labels
def dist_splits(self, key_splits: List[int]) -> List[List[int]]:
batch_size_per_split = _sum_by_splits(self.stride_per_key(), key_splits)
length_per_split = _sum_by_splits(self.length_per_key(), key_splits)
splits = [batch_size_per_split, length_per_split]
if self.variable_stride_per_key():
splits.append(key_splits)
if self.weights_or_none() is not None:
splits.append(length_per_split)
return splits
def dist_tensors(self) -> List[torch.Tensor]:
tensors = [self.lengths(), self.values()]
if self.variable_stride_per_key():
strides = _pin_and_move(torch.tensor(self.stride_per_key()), self.device())
tensors.append(strides)
if self.weights_or_none() is not None:
tensors.append(self.weights())
return tensors
def dist_init(
keys: List[str],
tensors: List[torch.Tensor],
variable_stride_per_key: bool,
num_workers: int,
recat: Optional[torch.Tensor],
stride_per_rank: Optional[List[int]],
stagger: int = 1,
) -> "KeyedJaggedTensor":
assert len(tensors) in [2, 3, 4]
lengths = tensors[0]
values = tensors[1]
stride_per_rank_per_key = tensors[2] if variable_stride_per_key else None
weights = (
tensors[-1]
if (variable_stride_per_key and len(tensors) == 4)
or (not variable_stride_per_key and len(tensors) == 3)
else None
)
if variable_stride_per_key:
assert stride_per_rank_per_key is not None
stride_per_key_per_rank: List[List[int]] = stride_per_rank_per_key.view(
num_workers, len(keys)
).T.tolist()
strides_cumsum: List[int] = torch.ops.fbgemm.asynchronous_complete_cumsum(
stride_per_rank_per_key
).tolist()
cumsum_lengths = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
length_per_key = (
cumsum_lengths[strides_cumsum[1:]] - cumsum_lengths[strides_cumsum[:-1]]
)
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
lengths, _ = _permute_tensor_by_segments(
lengths,
stride_per_rank_per_key,
recat,
None,
)
values, weights = _permute_tensor_by_segments(
values,
length_per_key,
recat,
weights,
)
if not stride_per_key_per_rank:
stride_per_key_per_rank = [[0]] * len(keys)
if stagger > 1:
stride_per_key_per_rank_stagger: List[List[int]] = []
local_world_size = num_workers // stagger
for i in range(len(keys)):
stride_per_rank_stagger: List[int] = []
for j in range(local_world_size):
stride_per_rank_stagger.extend(
stride_per_key_per_rank[i][j::local_world_size]
)
stride_per_key_per_rank_stagger.append(stride_per_rank_stagger)
stride_per_key_per_rank = stride_per_key_per_rank_stagger
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride_per_key_per_rank=stride_per_key_per_rank,
)
return kjt.sync()
else:
assert stride_per_rank is not None
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
stride = stride_per_rank[0]
if all(s == stride for s in stride_per_rank):
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
recat,
lengths.view(-1, stride),
values,
weights,
values.numel(),
)
lengths = lengths.view(-1)
else: # variable batch size per rank
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_1D_sparse_data(
recat,
lengths.view(-1),
values,
weights,
values.numel(),
)
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=sum(stride_per_rank),
)
return kjt.sync()
def flatten_feature_lengths(features: KeyedJaggedTensor) -> KeyedJaggedTensor:
return features.flatten_lengths() if features.lengths().dim() > 1 else features | null |
8,996 | import copy
from typing import Any, Dict, List, Optional, Type
import torch
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from torch import nn
from torchrec.distributed.embedding_lookup import EmbeddingComputeKernel
from torchrec.distributed.embedding_sharding import (
EmbeddingSharding,
EmbeddingShardingInfo,
)
from torchrec.distributed.embedding_types import (
BaseQuantEmbeddingSharder,
FeatureShardingMixIn,
GroupedEmbeddingConfig,
KJTList,
ListOfKJTList,
)
from torchrec.distributed.embeddingbag import (
construct_output_kt,
create_sharding_infos_by_sharding,
)
from torchrec.distributed.fused_params import (
FUSED_PARAM_QUANT_STATE_DICT_SPLIT_SCALE_BIAS,
FUSED_PARAM_REGISTER_TBE_BOOL,
get_tbes_to_register_from_iterable,
is_fused_param_quant_state_dict_split_scale_bias,
is_fused_param_register_tbe,
)
from torchrec.distributed.quant_state import ShardedQuantEmbeddingModuleState
from torchrec.distributed.sharding.cw_sharding import InferCwPooledEmbeddingSharding
from torchrec.distributed.sharding.rw_sharding import InferRwPooledEmbeddingSharding
from torchrec.distributed.sharding.tw_sharding import InferTwEmbeddingSharding
from torchrec.distributed.types import (
NullShardedModuleContext,
NullShardingContext,
ParameterSharding,
ShardingEnv,
ShardingType,
)
from torchrec.distributed.utils import copy_to_device
from torchrec.modules.embedding_configs import (
data_type_to_sparse_type,
dtype_to_data_type,
EmbeddingBagConfig,
)
from torchrec.modules.embedding_modules import EmbeddingBagCollectionInterface
from torchrec.modules.feature_processor_ import FeatureProcessorsCollection
from torchrec.quant.embedding_modules import (
EmbeddingBagCollection as QuantEmbeddingBagCollection,
FeatureProcessedEmbeddingBagCollection as QuantFeatureProcessedEmbeddingBagCollection,
MODULE_ATTR_QUANT_STATE_DICT_SPLIT_SCALE_BIAS,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor, KeyedTensor
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
torch.fx.wrap("len")
class EmbeddingSharding(abc.ABC, Generic[C, F, T, W], FeatureShardingMixIn):
"""
Used to implement different sharding types for `EmbeddingBagCollection`, e.g.
table_wise.
"""
def __init__(
self, qcomm_codecs_registry: Optional[Dict[str, QuantizedCommCodecs]] = None
) -> None:
self._qcomm_codecs_registry = qcomm_codecs_registry
def qcomm_codecs_registry(self) -> Optional[Dict[str, QuantizedCommCodecs]]:
return self._qcomm_codecs_registry
def create_input_dist(
self,
device: Optional[torch.device] = None,
) -> BaseSparseFeaturesDist[F]:
pass
def create_output_dist(
self,
device: Optional[torch.device] = None,
) -> BaseEmbeddingDist[C, T, W]:
pass
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup[F, T]:
pass
def embedding_dims(self) -> List[int]:
pass
def embedding_shard_metadata(self) -> List[Optional[ShardMetadata]]:
pass
def embedding_names(self) -> List[str]:
pass
def embedding_names_per_rank(self) -> List[List[str]]:
pass
def embedding_tables(self) -> List[ShardedEmbeddingTable]:
raise NotImplementedError
def uncombined_embedding_dims(self) -> List[int]:
return self.embedding_dims()
def uncombined_embedding_names(self) -> List[str]:
return self.embedding_names()
class EmbeddingShardingInfo:
embedding_config: EmbeddingTableConfig
param_sharding: ParameterSharding
param: torch.Tensor
fused_params: Optional[Dict[str, Any]] = None
class KJTList(Multistreamable):
def __init__(self, features: List[KeyedJaggedTensor]) -> None:
self.features = features
def __len__(self) -> int:
return len(self.features)
def __setitem__(self, key: int, item: KeyedJaggedTensor) -> None:
self.features[key] = item
def __getitem__(self, key: int) -> KeyedJaggedTensor:
return self.features[key]
def __iter__(self) -> Iterator[KeyedJaggedTensor]:
return iter(self.features)
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
for feature in self.features:
feature.record_stream(stream)
def __fx_create_arg__(self, tracer: torch.fx.Tracer) -> fx.node.Argument:
return tracer.create_node(
"call_function",
KJTList,
args=(tracer.create_arg(self.features),),
kwargs={},
)
class InferCwPooledEmbeddingSharding(
BaseCwEmbeddingSharding[
NullShardingContext, KJTList, List[torch.Tensor], torch.Tensor
]
):
def create_input_dist(
self, device: Optional[torch.device] = None
) -> BaseSparseFeaturesDist[KJTList]:
return InferTwSparseFeaturesDist(
self.features_per_rank(),
self._world_size,
device if device is not None else self._device,
)
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup[KJTList, List[torch.Tensor]]:
return InferGroupedPooledEmbeddingsLookup(
grouped_configs_per_rank=self._grouped_embedding_configs_per_rank,
world_size=self._world_size,
fused_params=fused_params,
device=device if device is not None else self._device,
)
def create_output_dist(
self,
device: Optional[torch.device] = None,
) -> BaseEmbeddingDist[NullShardingContext, List[torch.Tensor], torch.Tensor]:
device = device if device is not None else self._device
assert device is not None
dist_out = InferCwPooledEmbeddingDist(
device,
self._world_size,
)
if self._permute_embeddings and self._embedding_order != list(
range(len(self._embedding_order))
):
return InferCwPooledEmbeddingDistWithPermute(
device, self._world_size, self._embedding_dims, self._embedding_order
)
return dist_out
class InferRwPooledEmbeddingSharding(
BaseRwEmbeddingSharding[
NullShardingContext, KJTList, List[torch.Tensor], torch.Tensor
]
):
def create_input_dist(
self,
device: Optional[torch.device] = None,
) -> BaseSparseFeaturesDist[KJTList]:
num_features = self._get_num_features()
feature_hash_sizes = self._get_feature_hash_sizes()
(embed_sharding, is_even_sharding) = get_embedding_shard_metadata(
self._grouped_embedding_configs_per_rank
)
return InferRwSparseFeaturesDist(
world_size=self._world_size,
num_features=num_features,
feature_hash_sizes=feature_hash_sizes,
device=device if device is not None else self._device,
embedding_shard_metadata=embed_sharding if not is_even_sharding else None,
)
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup[KJTList, List[torch.Tensor]]:
return InferGroupedPooledEmbeddingsLookup(
grouped_configs_per_rank=self._grouped_embedding_configs_per_rank,
world_size=self._world_size,
fused_params=fused_params,
device=device if device is not None else self._device,
)
def create_output_dist(
self,
device: Optional[torch.device] = None,
) -> BaseEmbeddingDist[NullShardingContext, List[torch.Tensor], torch.Tensor]:
assert device is not None
return InferRwPooledEmbeddingDist(
device=device,
world_size=self._world_size,
)
class InferTwEmbeddingSharding(
BaseTwEmbeddingSharding[
NullShardingContext, KJTList, List[torch.Tensor], torch.Tensor
]
):
"""
Shards embedding bags table-wise for inference
"""
def create_input_dist(
self,
device: Optional[torch.device] = None,
) -> BaseSparseFeaturesDist[KJTList]:
return InferTwSparseFeaturesDist(
features_per_rank=self.features_per_rank(),
world_size=self._world_size,
device=device,
)
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup[KJTList, List[torch.Tensor]]:
return InferGroupedPooledEmbeddingsLookup(
grouped_configs_per_rank=self._grouped_embedding_configs_per_rank,
world_size=self._world_size,
fused_params=fused_params,
device=device,
)
def create_output_dist(
self,
device: Optional[torch.device] = None,
) -> BaseEmbeddingDist[NullShardingContext, List[torch.Tensor], torch.Tensor]:
device = device if device is not None else self._device
assert device is not None
return InferTwPooledEmbeddingDist(
device,
self._world_size,
)
class ShardingType(Enum):
"""
Well-known sharding types, used by inter-module optimizations.
"""
# Replicated on all ranks
DATA_PARALLEL = "data_parallel"
# Placed on a single rank
TABLE_WISE = "table_wise"
# Placed on multiple ranks as different sharded tables
COLUMN_WISE = "column_wise"
# Range-split on the first dimension across all ranks
ROW_WISE = "row_wise"
# Row-wise on the same node and table-wise across nodes
# Useful when having multiple ranks per node
# and comms within a single node are more efficient than across nodes.
TABLE_ROW_WISE = "table_row_wise"
# Column-wise on the same node and table-wise across nodes
TABLE_COLUMN_WISE = "table_column_wise"
class ShardingEnv:
"""
Provides an abstraction over `torch.distributed.ProcessGroup`, which practically
enables `DistributedModelParallel` to be used during inference.
"""
def __init__(
self,
world_size: int,
rank: int,
pg: Optional[dist.ProcessGroup] = None,
) -> None:
self.world_size = world_size
self.rank = rank
self.process_group: Optional[dist.ProcessGroup] = pg
def from_process_group(cls, pg: dist.ProcessGroup) -> "ShardingEnv":
"""
Creates ProcessGroup-based sharding environment.
NOTE:
Typically used during training.
"""
return cls(dist.get_world_size(pg), dist.get_rank(pg), pg)
def from_local(cls, world_size: int, rank: int) -> "ShardingEnv":
"""
Creates a local host-based sharding environment.
NOTE:
Typically used during single host inference.
"""
return cls(world_size, rank, None)
class NullShardingContext(Multistreamable):
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
pass
def create_infer_embedding_bag_sharding(
sharding_type: str,
sharding_infos: List[EmbeddingShardingInfo],
env: ShardingEnv,
) -> EmbeddingSharding[NullShardingContext, KJTList, List[torch.Tensor], torch.Tensor]:
if sharding_type == ShardingType.TABLE_WISE.value:
return InferTwEmbeddingSharding(sharding_infos, env, device=None)
elif sharding_type == ShardingType.ROW_WISE.value:
return InferRwPooledEmbeddingSharding(sharding_infos, env, device=None)
elif sharding_type == ShardingType.COLUMN_WISE.value:
return InferCwPooledEmbeddingSharding(
sharding_infos, env, device=None, permute_embeddings=True
)
else:
raise ValueError(f"Sharding type not supported {sharding_type}") | null |
8,997 | from dataclasses import dataclass, field
from typing import Any, List, Optional, Tuple, TypeVar
import torch
import torch.distributed as dist
from torch import Tensor
from torch.autograd import Function
from torch.autograd.profiler import record_function
from torchrec.distributed.types import Awaitable, NoWait, QuantizedCommCodecs
from torchrec.distributed.utils import none_throws
GRADIENT_DIVISION: bool = True
def set_gradient_division(val: bool) -> None:
global GRADIENT_DIVISION
GRADIENT_DIVISION = val | null |
8,998 | from dataclasses import dataclass, field
from typing import Any, List, Optional, Tuple, TypeVar
import torch
import torch.distributed as dist
from torch import Tensor
from torch.autograd import Function
from torch.autograd.profiler import record_function
from torchrec.distributed.types import Awaitable, NoWait, QuantizedCommCodecs
from torchrec.distributed.utils import none_throws
GRADIENT_DIVISION: bool = True
def get_gradient_division() -> bool:
global GRADIENT_DIVISION
return GRADIENT_DIVISION | null |
8,999 | from dataclasses import dataclass, field
from typing import Any, List, Optional, Tuple, TypeVar
import torch
import torch.distributed as dist
from torch import Tensor
from torch.autograd import Function
from torch.autograd.profiler import record_function
from torchrec.distributed.types import Awaitable, NoWait, QuantizedCommCodecs
from torchrec.distributed.utils import none_throws
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def is_torchdynamo_compiling() -> bool: # type: ignore[misc]
return False
class Request(Awaitable[W]):
"""
Defines a collective operation request for a process group on a tensor.
Args:
pg (dist.ProcessGroup): The process group the request is for.
"""
def __init__(self, pg: dist.ProcessGroup, device: torch.device) -> None:
super().__init__()
self.pg: dist.ProcessGroup = pg
self.req: Optional[dist.Work] = None
self.tensor: Optional[W] = None
self.a2ai = None # type: ignore
self.qcomm_ctx = None # type: ignore
self.rsi = None # type: ignore
self.agi = None # type: ignore
self.wait_function = None # type: ignore
# This dummy tensor is used to build the autograd graph between
# CommOp-Req and CommOp-Await. The actual forward tensors, and backwards gradient tensors
# are stored in self.tensor
self.dummy_tensor: torch.Tensor = torch.empty(
1,
requires_grad=True,
device=device,
)
def _wait_impl(self) -> W:
"""
Calls the wait function for this request.
"""
ret = self.wait_function.apply(self.pg, self, self.dummy_tensor)
self.req = None
self.tensor = None
return ret
class All2AllPooledInfo(object):
"""
The data class that collects the attributes when calling the `alltoall_pooled`
operation.
Attributes:
batch_size_per_rank (List[int]): batch size in each rank
dim_sum_per_rank (List[int]): number of features (sum of dimensions) of the
embedding in each rank.
dim_sum_per_rank_tensor (Optional[Tensor]): the tensor version of
`dim_sum_per_rank`, this is only used by the fast kernel of
`_recat_pooled_embedding_grad_out`.
cumsum_dim_sum_per_rank_tensor (Optional[Tensor]): cumulative sum of
`dim_sum_per_rank`, this is only used by the fast kernel of
`_recat_pooled_embedding_grad_out`.
codecs (Optional[QuantizedCommCodecs]): quantized communication codecs.
"""
batch_size_per_rank: List[int]
dim_sum_per_rank: List[int]
dim_sum_per_rank_tensor: Optional[Tensor]
cumsum_dim_sum_per_rank_tensor: Optional[Tensor]
codecs: Optional[QuantizedCommCodecs] = None
def all2all_pooled_sync(
pg: dist.ProcessGroup, a2ai: All2AllPooledInfo, input_embeddings: Tensor
) -> Tensor:
my_rank = pg.rank()
(B_global, D_local_sum) = input_embeddings.shape
dim_sum_per_rank = a2ai.dim_sum_per_rank
batch_size_per_rank = a2ai.batch_size_per_rank
B_local = batch_size_per_rank[my_rank]
assert B_global == sum(batch_size_per_rank)
sharded_input_embeddings = input_embeddings.view(-1)
if a2ai.codecs is not None:
codecs = none_throws(a2ai.codecs)
qcomm_ctx = codecs.forward.create_context()
sharded_input_embeddings = codecs.forward.encode(
sharded_input_embeddings,
qcomm_ctx,
)
output_split_sizes = [
codecs.forward.calc_quantized_size(
B_local * D_rank_sum,
qcomm_ctx,
)
for D_rank_sum in dim_sum_per_rank
]
input_split_sizes = [
codecs.forward.calc_quantized_size(
D_local_sum * B_rank,
qcomm_ctx,
)
for B_rank in batch_size_per_rank
]
else:
output_split_sizes = [B_local * D_rank_sum for D_rank_sum in dim_sum_per_rank]
input_split_sizes = [D_local_sum * B_rank for B_rank in batch_size_per_rank]
qcomm_ctx = None
with record_function("## alltoall_fwd_single ##"):
sharded_output_embeddings = dist._functional_collectives.all_to_all_single(
sharded_input_embeddings,
output_split_sizes,
input_split_sizes,
pg,
)
if a2ai.codecs is not None:
codecs = none_throws(a2ai.codecs)
sharded_output_embeddings = codecs.forward.decode(
sharded_output_embeddings,
qcomm_ctx,
)
outputs_by_rank = sharded_output_embeddings.split(output_split_sizes)
return torch.cat([output.view(B_local, -1) for output in outputs_by_rank], dim=1)
class All2All_Pooled_Req(Function):
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
a2ai: All2AllPooledInfo,
input_embeddings: Tensor,
) -> Tensor:
my_rank = dist.get_rank(pg)
(B_global, D_local_sum) = input_embeddings.shape
dim_sum_per_rank = a2ai.dim_sum_per_rank
batch_size_per_rank = a2ai.batch_size_per_rank
B_local = batch_size_per_rank[my_rank]
assert B_global == sum(batch_size_per_rank)
sharded_input_embeddings = input_embeddings.view(-1)
if a2ai.codecs is not None:
codecs = none_throws(a2ai.codecs)
qcomm_ctx = codecs.forward.create_context()
sharded_input_embeddings = codecs.forward.encode(
sharded_input_embeddings,
qcomm_ctx,
)
output_split_sizes = [
codecs.forward.calc_quantized_size(
B_local * D_rank_sum,
qcomm_ctx,
)
for D_rank_sum in dim_sum_per_rank
]
input_split_sizes = [
codecs.forward.calc_quantized_size(
D_local_sum * B_rank,
qcomm_ctx,
)
for B_rank in batch_size_per_rank
]
else:
output_split_sizes = [
B_local * D_rank_sum for D_rank_sum in dim_sum_per_rank
]
input_split_sizes = [D_local_sum * B_rank for B_rank in batch_size_per_rank]
qcomm_ctx = None
sharded_output_embeddings = torch.empty(
sum(output_split_sizes),
dtype=sharded_input_embeddings.dtype,
device=sharded_input_embeddings.device,
)
with record_function("## alltoall_fwd_single ##"):
req = dist.all_to_all_single(
output=sharded_output_embeddings,
input=sharded_input_embeddings,
output_split_sizes=output_split_sizes,
input_split_sizes=input_split_sizes,
group=pg,
async_op=True,
)
myreq.req = req
myreq.tensor = sharded_output_embeddings
myreq.qcomm_ctx = qcomm_ctx
myreq.a2ai = a2ai
myreq.wait_function = All2All_Pooled_Wait
ctx.myreq = myreq
ctx.pg = pg
return myreq.dummy_tensor
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, *unused) -> Tuple[None, None, None, Tensor]:
pg = ctx.pg
my_rank = dist.get_rank(pg)
myreq = ctx.myreq
a2ai = myreq.a2ai
assert myreq.req is not None
myreq.req.wait()
myreq.req = None
grad_output = myreq.tensor
dim_sum_per_rank = a2ai.dim_sum_per_rank
batch_size_per_rank = a2ai.batch_size_per_rank
D_local_sum = dim_sum_per_rank[my_rank]
B_global = sum(batch_size_per_rank)
if a2ai.codecs is not None:
codecs = none_throws(a2ai.codecs)
grad_input = codecs.backward.decode(grad_output, myreq.qcomm_ctx)
grad_input = grad_input.view(B_global, D_local_sum)
else:
grad_input = grad_output.view(B_global, D_local_sum)
if GRADIENT_DIVISION:
grad_input.div_(dist.get_world_size(ctx.pg))
myreq.tensor = None
myreq.dummy_tensor = None
return (None, None, None, grad_input)
class QuantizedCommCodecs:
"""
The quantization codecs to use for the forward and backward pass respectively of a comm op (e.g. pooled_all_to_all, reduce_scatter, sequence_all_to_all).
"""
# pyre-ignore
forward: QuantizedCommCodec = NoOpQuantizedCommCodec()
# pyre-ignore
backward: QuantizedCommCodec = NoOpQuantizedCommCodec()
class Awaitable(abc.ABC, Generic[W]):
def __init__(self) -> None:
self._callbacks: List[Callable[[W], W]] = []
def _wait_impl(self) -> W:
pass
def wait(self) -> W:
with record_function(f"## {self.__class__.__name__} wait() ##"):
ret: W = self._wait_impl()
for callback in self.callbacks:
ret = callback(ret)
return ret
def callbacks(self) -> List[Callable[[W], W]]:
return self._callbacks
class NoWait(Awaitable[W]):
def __init__(self, obj: W) -> None:
super().__init__()
self._obj = obj
def _wait_impl(self) -> W:
return self._obj
The provided code snippet includes necessary dependencies for implementing the `alltoall_pooled` function. Write a Python function `def alltoall_pooled( a2a_pooled_embs_tensor: Tensor, batch_size_per_rank: List[int], dim_sum_per_rank: List[int], dim_sum_per_rank_tensor: Optional[Tensor] = None, cumsum_dim_sum_per_rank_tensor: Optional[Tensor] = None, group: Optional[dist.ProcessGroup] = None, codecs: Optional[QuantizedCommCodecs] = None, ) -> Awaitable[Tensor]` to solve the following problem:
Performs AlltoAll operation for a single pooled embedding tensor. Each process splits the input pooled embeddings tensor based on the world size, and then scatters the split list to all processes in the group. Then concatenates the received tensors from all processes in the group and returns a single output tensor. Args: a2a_pooled_embs_tensor (Tensor): input pooled embeddings. Must be pooled together before passing into this function. Its shape is `B x D_local_sum`, where `D_local_sum` is the dimension sum of all the local embedding tables. batch_size_per_rank (List[int]): batch size in each rank. dim_sum_per_rank (List[int]): number of features (sum of dimensions) of the embedding in each rank. dim_sum_per_rank_tensor (Optional[Tensor]): the tensor version of `dim_sum_per_rank`, this is only used by the fast kernel of `_recat_pooled_embedding_grad_out`. cumsum_dim_sum_per_rank_tensor (Optional[Tensor]): cumulative sum of `dim_sum_per_rank`, this is only used by the fast kernel of `_recat_pooled_embedding_grad_out`. group (Optional[dist.ProcessGroup]): the process group to work on. If None, the default process group will be used. codecs (Optional[QuantizedCommCodecs]): quantized communication codecs. Returns: Awaitable[Tensor]: async work handle (`Awaitable`), which can be `wait()` later to get the resulting tensor. .. warning:: `alltoall_pooled` is experimental and subject to change.
Here is the function:
def alltoall_pooled(
a2a_pooled_embs_tensor: Tensor,
batch_size_per_rank: List[int],
dim_sum_per_rank: List[int],
dim_sum_per_rank_tensor: Optional[Tensor] = None,
cumsum_dim_sum_per_rank_tensor: Optional[Tensor] = None,
group: Optional[dist.ProcessGroup] = None,
codecs: Optional[QuantizedCommCodecs] = None,
) -> Awaitable[Tensor]:
"""
Performs AlltoAll operation for a single pooled embedding tensor. Each process
splits the input pooled embeddings tensor based on the world size, and then scatters
the split list to all processes in the group. Then concatenates the received tensors
from all processes in the group and returns a single output tensor.
Args:
a2a_pooled_embs_tensor (Tensor): input pooled embeddings. Must be pooled
together before passing into this function. Its shape is `B x D_local_sum`,
where `D_local_sum` is the dimension sum of all the local embedding tables.
batch_size_per_rank (List[int]): batch size in each rank.
dim_sum_per_rank (List[int]): number of features (sum of dimensions) of the
embedding in each rank.
dim_sum_per_rank_tensor (Optional[Tensor]): the tensor version of
`dim_sum_per_rank`, this is only used by the fast kernel of
`_recat_pooled_embedding_grad_out`.
cumsum_dim_sum_per_rank_tensor (Optional[Tensor]): cumulative sum of
`dim_sum_per_rank`, this is only used by the fast kernel of
`_recat_pooled_embedding_grad_out`.
group (Optional[dist.ProcessGroup]): the process group to work on. If None, the
default process group will be used.
codecs (Optional[QuantizedCommCodecs]): quantized communication codecs.
Returns:
Awaitable[Tensor]: async work handle (`Awaitable`), which can be `wait()` later to get the resulting tensor.
.. warning::
`alltoall_pooled` is experimental and subject to change.
"""
if group is None:
group = dist.distributed_c10d._get_default_group()
if group.size() <= 1:
return NoWait(a2a_pooled_embs_tensor)
a2ai = All2AllPooledInfo(
batch_size_per_rank=batch_size_per_rank,
dim_sum_per_rank=dim_sum_per_rank,
dim_sum_per_rank_tensor=dim_sum_per_rank_tensor,
cumsum_dim_sum_per_rank_tensor=cumsum_dim_sum_per_rank_tensor,
codecs=codecs,
)
if is_torchdynamo_compiling():
return NoWait(all2all_pooled_sync(group, a2ai, a2a_pooled_embs_tensor))
myreq = Request(group, device=a2a_pooled_embs_tensor.device)
All2All_Pooled_Req.apply(group, myreq, a2ai, a2a_pooled_embs_tensor)
return myreq | Performs AlltoAll operation for a single pooled embedding tensor. Each process splits the input pooled embeddings tensor based on the world size, and then scatters the split list to all processes in the group. Then concatenates the received tensors from all processes in the group and returns a single output tensor. Args: a2a_pooled_embs_tensor (Tensor): input pooled embeddings. Must be pooled together before passing into this function. Its shape is `B x D_local_sum`, where `D_local_sum` is the dimension sum of all the local embedding tables. batch_size_per_rank (List[int]): batch size in each rank. dim_sum_per_rank (List[int]): number of features (sum of dimensions) of the embedding in each rank. dim_sum_per_rank_tensor (Optional[Tensor]): the tensor version of `dim_sum_per_rank`, this is only used by the fast kernel of `_recat_pooled_embedding_grad_out`. cumsum_dim_sum_per_rank_tensor (Optional[Tensor]): cumulative sum of `dim_sum_per_rank`, this is only used by the fast kernel of `_recat_pooled_embedding_grad_out`. group (Optional[dist.ProcessGroup]): the process group to work on. If None, the default process group will be used. codecs (Optional[QuantizedCommCodecs]): quantized communication codecs. Returns: Awaitable[Tensor]: async work handle (`Awaitable`), which can be `wait()` later to get the resulting tensor. .. warning:: `alltoall_pooled` is experimental and subject to change. |
9,000 | from dataclasses import dataclass, field
from typing import Any, List, Optional, Tuple, TypeVar
import torch
import torch.distributed as dist
from torch import Tensor
from torch.autograd import Function
from torch.autograd.profiler import record_function
from torchrec.distributed.types import Awaitable, NoWait, QuantizedCommCodecs
from torchrec.distributed.utils import none_throws
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def is_torchdynamo_compiling() -> bool: # type: ignore[misc]
return False
class Request(Awaitable[W]):
"""
Defines a collective operation request for a process group on a tensor.
Args:
pg (dist.ProcessGroup): The process group the request is for.
"""
def __init__(self, pg: dist.ProcessGroup, device: torch.device) -> None:
super().__init__()
self.pg: dist.ProcessGroup = pg
self.req: Optional[dist.Work] = None
self.tensor: Optional[W] = None
self.a2ai = None # type: ignore
self.qcomm_ctx = None # type: ignore
self.rsi = None # type: ignore
self.agi = None # type: ignore
self.wait_function = None # type: ignore
# This dummy tensor is used to build the autograd graph between
# CommOp-Req and CommOp-Await. The actual forward tensors, and backwards gradient tensors
# are stored in self.tensor
self.dummy_tensor: torch.Tensor = torch.empty(
1,
requires_grad=True,
device=device,
)
def _wait_impl(self) -> W:
"""
Calls the wait function for this request.
"""
ret = self.wait_function.apply(self.pg, self, self.dummy_tensor)
self.req = None
self.tensor = None
return ret
class VariableBatchAll2AllPooledInfo(object):
"""
The data class that collects the attributes when calling the
`variable_batch_alltoall_pooled` operation.
Attributes:
batch_size_per_rank_per_feature (List[List[int]]): batch size per rank per
feature.
batch_size_per_feature_pre_a2a (List[int]): local batch size before scattering.
emb_dim_per_rank_per_feature (List[List[int]]): embedding dimension per rank
per feature
codecs (Optional[QuantizedCommCodecs]): quantized communication codecs.
input_splits (Optional[List[int]]): input splits of tensor all to all.
output_splits (Optional[List[int]]): output splits of tensor all to all.
"""
batch_size_per_rank_per_feature: List[List[int]]
batch_size_per_feature_pre_a2a: List[int]
emb_dim_per_rank_per_feature: List[List[int]]
codecs: Optional[QuantizedCommCodecs] = None
input_splits: Optional[List[int]] = None
output_splits: Optional[List[int]] = None
def variable_batch_all2all_pooled_sync(
pg: dist.ProcessGroup,
a2ai: VariableBatchAll2AllPooledInfo,
input_embeddings: Tensor,
) -> Tensor:
my_rank = pg.rank()
# get input splits
world_size = dist.get_world_size(pg)
input_split_sizes = [0 for _ in range(world_size)]
if a2ai.batch_size_per_rank_per_feature:
for i in range(world_size):
curr_size = 0
for batch_size, emb_dim in zip(
a2ai.batch_size_per_rank_per_feature[i],
a2ai.emb_dim_per_rank_per_feature[my_rank],
):
curr_size += batch_size * emb_dim
input_split_sizes[i] = curr_size
a2ai.input_splits = input_split_sizes
# get output splits
output_split_sizes = [0 for _ in range(world_size)]
ind = 0
for i in range(world_size):
curr_size = 0
for emb_dim in a2ai.emb_dim_per_rank_per_feature[i]:
curr_size += a2ai.batch_size_per_feature_pre_a2a[ind] * emb_dim
ind += 1
output_split_sizes[i] = curr_size
a2ai.output_splits = output_split_sizes
sharded_input_embeddings = input_embeddings.view(-1)
qcomm_ctx = None
if a2ai.codecs is not None:
codecs = none_throws(a2ai.codecs)
qcomm_ctx = codecs.forward.create_context()
sharded_input_embeddings = codecs.forward.encode(
sharded_input_embeddings,
qcomm_ctx,
)
output_split_sizes = [
codecs.forward.calc_quantized_size(
split,
qcomm_ctx,
)
for split in output_split_sizes
]
input_split_sizes = [
codecs.forward.calc_quantized_size(
split,
qcomm_ctx,
)
for split in input_split_sizes
]
with record_function("## alltoall_fwd_single ##"):
sharded_output_embeddings = dist._functional_collectives.all_to_all_single(
sharded_input_embeddings,
output_split_sizes=output_split_sizes,
input_split_sizes=input_split_sizes,
group=pg,
)
if a2ai.codecs is not None:
codecs = none_throws(a2ai.codecs)
sharded_output_embeddings = codecs.forward.decode(
sharded_output_embeddings,
qcomm_ctx,
)
return sharded_output_embeddings
class Variable_Batch_All2All_Pooled_Req(Function):
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
a2ai: VariableBatchAll2AllPooledInfo,
input_embeddings: Tensor,
) -> Tensor:
my_rank = dist.get_rank(pg)
# get input splits
world_size = dist.get_world_size(pg)
input_split_sizes = [0 for _ in range(world_size)]
if a2ai.batch_size_per_rank_per_feature:
for i in range(world_size):
curr_size = 0
for batch_size, emb_dim in zip(
a2ai.batch_size_per_rank_per_feature[i],
a2ai.emb_dim_per_rank_per_feature[my_rank],
):
curr_size += batch_size * emb_dim
input_split_sizes[i] = curr_size
a2ai.input_splits = input_split_sizes
# get output splits
output_split_sizes = [0 for _ in range(world_size)]
ind = 0
for i in range(world_size):
curr_size = 0
for emb_dim in a2ai.emb_dim_per_rank_per_feature[i]:
curr_size += a2ai.batch_size_per_feature_pre_a2a[ind] * emb_dim
ind += 1
output_split_sizes[i] = curr_size
a2ai.output_splits = output_split_sizes
sharded_input_embeddings = input_embeddings.view(-1)
qcomm_ctx = None
if a2ai.codecs is not None:
codecs = none_throws(a2ai.codecs)
qcomm_ctx = codecs.forward.create_context()
sharded_input_embeddings = codecs.forward.encode(
sharded_input_embeddings,
qcomm_ctx,
)
output_split_sizes = [
codecs.forward.calc_quantized_size(
split,
qcomm_ctx,
)
for split in output_split_sizes
]
input_split_sizes = [
codecs.forward.calc_quantized_size(
split,
qcomm_ctx,
)
for split in input_split_sizes
]
sharded_output_embeddings = torch.empty(
sum(output_split_sizes),
dtype=sharded_input_embeddings.dtype,
device=sharded_input_embeddings.device,
)
with record_function("## alltoall_fwd_single ##"):
req = dist.all_to_all_single(
output=sharded_output_embeddings,
input=sharded_input_embeddings,
output_split_sizes=output_split_sizes,
input_split_sizes=input_split_sizes,
group=pg,
async_op=True,
)
myreq.req = req
myreq.tensor = sharded_output_embeddings
myreq.qcomm_ctx = qcomm_ctx
myreq.a2ai = a2ai
myreq.wait_function = Variable_Batch_All2All_Pooled_Wait
ctx.myreq = myreq
ctx.pg = pg
return myreq.dummy_tensor
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, *unused) -> Tuple[None, None, None, Tensor]:
myreq = ctx.myreq
a2ai = myreq.a2ai
assert myreq.req is not None
myreq.req.wait()
if isinstance(myreq.req, dist.Work):
myreq.req.wait()
myreq.req = None
grad_output = myreq.tensor
if a2ai.codecs is not None:
codecs = none_throws(a2ai.codecs)
grad_input = codecs.backward.decode(grad_output, myreq.qcomm_ctx)
else:
grad_input = grad_output
if GRADIENT_DIVISION:
grad_input.div_(dist.get_world_size(ctx.pg))
myreq.tensor = None
myreq.dummy_tensor = None
return (None, None, None, grad_input)
class QuantizedCommCodecs:
"""
The quantization codecs to use for the forward and backward pass respectively of a comm op (e.g. pooled_all_to_all, reduce_scatter, sequence_all_to_all).
"""
# pyre-ignore
forward: QuantizedCommCodec = NoOpQuantizedCommCodec()
# pyre-ignore
backward: QuantizedCommCodec = NoOpQuantizedCommCodec()
class Awaitable(abc.ABC, Generic[W]):
def __init__(self) -> None:
self._callbacks: List[Callable[[W], W]] = []
def _wait_impl(self) -> W:
pass
def wait(self) -> W:
with record_function(f"## {self.__class__.__name__} wait() ##"):
ret: W = self._wait_impl()
for callback in self.callbacks:
ret = callback(ret)
return ret
def callbacks(self) -> List[Callable[[W], W]]:
return self._callbacks
class NoWait(Awaitable[W]):
def __init__(self, obj: W) -> None:
super().__init__()
self._obj = obj
def _wait_impl(self) -> W:
return self._obj
def variable_batch_alltoall_pooled(
a2a_pooled_embs_tensor: Tensor,
batch_size_per_rank_per_feature: List[List[int]],
batch_size_per_feature_pre_a2a: List[int],
emb_dim_per_rank_per_feature: List[List[int]],
group: Optional[dist.ProcessGroup] = None,
codecs: Optional[QuantizedCommCodecs] = None,
) -> Awaitable[Tensor]:
if group is None:
group = dist.distributed_c10d._get_default_group()
if dist.get_world_size(group) <= 1:
return NoWait(a2a_pooled_embs_tensor)
a2ai = VariableBatchAll2AllPooledInfo(
batch_size_per_rank_per_feature=batch_size_per_rank_per_feature,
batch_size_per_feature_pre_a2a=batch_size_per_feature_pre_a2a,
emb_dim_per_rank_per_feature=emb_dim_per_rank_per_feature,
codecs=codecs,
)
if is_torchdynamo_compiling():
return NoWait(
variable_batch_all2all_pooled_sync(group, a2ai, a2a_pooled_embs_tensor)
)
myreq = Request(group, device=a2a_pooled_embs_tensor.device)
Variable_Batch_All2All_Pooled_Req.apply(group, myreq, a2ai, a2a_pooled_embs_tensor)
return myreq | null |
9,001 | from dataclasses import dataclass, field
from typing import Any, List, Optional, Tuple, TypeVar
import torch
import torch.distributed as dist
from torch import Tensor
from torch.autograd import Function
from torch.autograd.profiler import record_function
from torchrec.distributed.types import Awaitable, NoWait, QuantizedCommCodecs
from torchrec.distributed.utils import none_throws
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def is_torchdynamo_compiling() -> bool: # type: ignore[misc]
return False
class Request(Awaitable[W]):
"""
Defines a collective operation request for a process group on a tensor.
Args:
pg (dist.ProcessGroup): The process group the request is for.
"""
def __init__(self, pg: dist.ProcessGroup, device: torch.device) -> None:
super().__init__()
self.pg: dist.ProcessGroup = pg
self.req: Optional[dist.Work] = None
self.tensor: Optional[W] = None
self.a2ai = None # type: ignore
self.qcomm_ctx = None # type: ignore
self.rsi = None # type: ignore
self.agi = None # type: ignore
self.wait_function = None # type: ignore
# This dummy tensor is used to build the autograd graph between
# CommOp-Req and CommOp-Await. The actual forward tensors, and backwards gradient tensors
# are stored in self.tensor
self.dummy_tensor: torch.Tensor = torch.empty(
1,
requires_grad=True,
device=device,
)
def _wait_impl(self) -> W:
"""
Calls the wait function for this request.
"""
ret = self.wait_function.apply(self.pg, self, self.dummy_tensor)
self.req = None
self.tensor = None
return ret
class All2AllSequenceInfo(object):
"""
The data class that collects the attributes when calling the `alltoall_sequence`
operation.
Attributes:
embedding_dim (int): embedding dimension.
lengths_after_sparse_data_all2all (Tensor): lengths of sparse features after
AlltoAll.
forward_recat_tensor (Optional[Tensor]): recat tensor for forward.
backward_recat_tensor (Tensor): recat tensor for backward.
input_splits (List[int]): input splits.
output_splits (List[int]): output splits.
variable_batch_size (bool): whether variable batch size is enabled.
codecs (Optional[QuantizedCommCodecs]): quantized communication codecs.
permuted_lengths_after_sparse_data_all2all (Optional[Tensor]): lengths of sparse
features before AlltoAll.
"""
embedding_dim: int
lengths_after_sparse_data_all2all: Tensor
forward_recat_tensor: Optional[Tensor]
backward_recat_tensor: Tensor
input_splits: List[int]
output_splits: List[int]
variable_batch_size: bool = False
codecs: Optional[QuantizedCommCodecs] = None
permuted_lengths_after_sparse_data_all2all: Optional[Tensor] = None
def all2all_sequence_sync(
pg: dist.ProcessGroup,
a2ai: All2AllSequenceInfo,
sharded_input_embeddings: Tensor,
) -> Tensor:
world_size = pg.size()
D = a2ai.embedding_dim
forward_recat_tensor = a2ai.forward_recat_tensor
variable_batch_size = a2ai.variable_batch_size
lengths_after_sparse_data_all2all = a2ai.lengths_after_sparse_data_all2all * D
input_splits = [i * D for i in a2ai.output_splits]
output_splits = [i * D for i in a2ai.input_splits]
a2ai.input_splits = input_splits
a2ai.output_splits = output_splits
local_T = lengths_after_sparse_data_all2all.shape[0]
if local_T > 0:
with record_function("## alltoall_seq_embedding_fwd_permute ##"):
if not variable_batch_size:
(
permuted_lengths_after_sparse_data_all2all,
sharded_input_embeddings,
_,
) = torch.ops.fbgemm.permute_2D_sparse_data(
forward_recat_tensor,
lengths_after_sparse_data_all2all.view(local_T * world_size, -1),
sharded_input_embeddings.view(-1),
None,
sharded_input_embeddings.numel(),
)
else:
(
permuted_lengths_after_sparse_data_all2all,
sharded_input_embeddings,
_,
) = torch.ops.fbgemm.permute_1D_sparse_data(
forward_recat_tensor,
lengths_after_sparse_data_all2all.view(-1),
sharded_input_embeddings.view(-1),
None,
sharded_input_embeddings.numel(),
)
else:
# Variable is not used in sync mode, left for conformity with async path
permuted_lengths_after_sparse_data_all2all = None # noqa: F841
if a2ai.codecs is not None:
codecs = none_throws(a2ai.codecs)
qcomm_ctx = codecs.forward.create_context()
# pyre-ignore [16]
sharded_input_embeddings = a2ai.codecs.forward.encode(
sharded_input_embeddings, qcomm_ctx
)
output_splits = [
a2ai.codecs.forward.calc_quantized_size(x, qcomm_ctx) for x in output_splits
]
input_splits = [
a2ai.codecs.forward.calc_quantized_size(x, qcomm_ctx) for x in input_splits
]
else:
qcomm_ctx = None
with record_function("## alltoall_seq_embedding_fwd_single ##"):
sharded_output_embeddings = dist._functional_collectives.all_to_all_single(
sharded_input_embeddings,
output_split_sizes=output_splits,
input_split_sizes=input_splits,
group=pg,
)
if a2ai.codecs is not None:
codecs = none_throws(a2ai.codecs)
sharded_output_embeddings = codecs.forward.decode(
sharded_output_embeddings, qcomm_ctx
)
return sharded_output_embeddings.view(-1, D)
class All2All_Seq_Req(Function):
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
a2ai: All2AllSequenceInfo,
sharded_input_embeddings: Tensor,
) -> Tensor:
world_size = dist.get_world_size(pg)
my_rank = dist.get_rank(pg)
D = a2ai.embedding_dim
forward_recat_tensor = a2ai.forward_recat_tensor
variable_batch_size = a2ai.variable_batch_size
lengths_after_sparse_data_all2all = a2ai.lengths_after_sparse_data_all2all * D
input_splits = [i * D for i in a2ai.output_splits]
output_splits = [i * D for i in a2ai.input_splits]
a2ai.input_splits = input_splits
a2ai.output_splits = output_splits
local_T = lengths_after_sparse_data_all2all.shape[0]
if local_T > 0:
with record_function("## alltoall_seq_embedding_fwd_permute ##"):
if not variable_batch_size:
(
permuted_lengths_after_sparse_data_all2all,
sharded_input_embeddings,
_,
) = torch.ops.fbgemm.permute_2D_sparse_data(
forward_recat_tensor,
lengths_after_sparse_data_all2all.view(
local_T * world_size, -1
),
sharded_input_embeddings.view(-1),
None,
sharded_input_embeddings.numel(),
)
else:
(
permuted_lengths_after_sparse_data_all2all,
sharded_input_embeddings,
_,
) = torch.ops.fbgemm.permute_1D_sparse_data(
forward_recat_tensor,
lengths_after_sparse_data_all2all.view(-1),
sharded_input_embeddings.view(-1),
None,
sharded_input_embeddings.numel(),
)
else:
permuted_lengths_after_sparse_data_all2all = None
if a2ai.codecs is not None:
codecs = none_throws(a2ai.codecs)
qcomm_ctx = codecs.forward.create_context()
# pyre-ignore [16]
sharded_input_embeddings = a2ai.codecs.forward.encode(
sharded_input_embeddings, qcomm_ctx
)
output_splits = [
a2ai.codecs.forward.calc_quantized_size(x, qcomm_ctx)
for x in output_splits
]
input_splits = [
a2ai.codecs.forward.calc_quantized_size(x, qcomm_ctx)
for x in input_splits
]
else:
qcomm_ctx = None
sharded_output_embeddings = torch.empty(
sum(output_splits),
dtype=sharded_input_embeddings.dtype,
device=sharded_input_embeddings.device,
)
with record_function("## alltoall_seq_embedding_fwd_single ##"):
req = dist.all_to_all_single(
output=sharded_output_embeddings,
input=sharded_input_embeddings,
output_split_sizes=output_splits,
input_split_sizes=input_splits,
group=pg,
async_op=True,
)
a2ai.permuted_lengths_after_sparse_data_all2all = (
permuted_lengths_after_sparse_data_all2all
)
myreq.req = req
myreq.tensor = sharded_output_embeddings
myreq.a2ai = a2ai
myreq.wait_function = All2All_Seq_Req_Wait
ctx.myreq = myreq
myreq.qcomm_ctx = qcomm_ctx
ctx.pg = pg
ctx.my_rank = my_rank
ctx.world_size = world_size
return myreq.dummy_tensor
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, *unused) -> Tuple[None, None, None, Tensor]:
myreq = ctx.myreq
a2ai = myreq.a2ai
D = a2ai.embedding_dim
variable_batch_size = a2ai.variable_batch_size
backward_recat_tensor = a2ai.backward_recat_tensor
permuted_lengths_after_sparse_data_all2all = (
a2ai.permuted_lengths_after_sparse_data_all2all
)
assert myreq.req is not None
myreq.req.wait()
sharded_grad_input = myreq.tensor
if a2ai.codecs is not None:
codecs = none_throws(a2ai.codecs)
sharded_grad_input = codecs.backward.decode(
sharded_grad_input, myreq.qcomm_ctx
)
myreq.req = None
myreq.tensor = None
myreq.dummy_tensor = None
if permuted_lengths_after_sparse_data_all2all is not None:
with record_function("## alltoall_seq_embedding_bwd_permute ##"):
if not variable_batch_size:
_, sharded_grad_input, _ = torch.ops.fbgemm.permute_2D_sparse_data(
backward_recat_tensor,
permuted_lengths_after_sparse_data_all2all,
sharded_grad_input,
None,
sharded_grad_input.numel(),
)
else:
_, sharded_grad_input, _ = torch.ops.fbgemm.permute_1D_sparse_data(
backward_recat_tensor,
permuted_lengths_after_sparse_data_all2all,
sharded_grad_input,
None,
sharded_grad_input.numel(),
)
if GRADIENT_DIVISION:
sharded_grad_input.div_(dist.get_world_size(ctx.pg))
return (None, None, None, sharded_grad_input.view(-1, D))
class QuantizedCommCodecs:
"""
The quantization codecs to use for the forward and backward pass respectively of a comm op (e.g. pooled_all_to_all, reduce_scatter, sequence_all_to_all).
"""
# pyre-ignore
forward: QuantizedCommCodec = NoOpQuantizedCommCodec()
# pyre-ignore
backward: QuantizedCommCodec = NoOpQuantizedCommCodec()
class Awaitable(abc.ABC, Generic[W]):
def __init__(self) -> None:
self._callbacks: List[Callable[[W], W]] = []
def _wait_impl(self) -> W:
pass
def wait(self) -> W:
with record_function(f"## {self.__class__.__name__} wait() ##"):
ret: W = self._wait_impl()
for callback in self.callbacks:
ret = callback(ret)
return ret
def callbacks(self) -> List[Callable[[W], W]]:
return self._callbacks
class NoWait(Awaitable[W]):
def __init__(self, obj: W) -> None:
super().__init__()
self._obj = obj
def _wait_impl(self) -> W:
return self._obj
The provided code snippet includes necessary dependencies for implementing the `alltoall_sequence` function. Write a Python function `def alltoall_sequence( # (T, B, L_i * D) flattened a2a_sequence_embs_tensor: Tensor, forward_recat_tensor: Tensor, backward_recat_tensor: Tensor, lengths_after_sparse_data_all2all: Tensor, input_splits: List[int], output_splits: List[int], variable_batch_size: bool = False, group: Optional[dist.ProcessGroup] = None, codecs: Optional[QuantizedCommCodecs] = None, ) -> Awaitable[Tensor]` to solve the following problem:
Performs AlltoAll operation for sequence embeddings. Each process splits the input tensor based on the world size, and then scatters the split list to all processes in the group. Then concatenates the received tensors from all processes in the group and returns a single output tensor. NOTE: AlltoAll operator for Sequence embedding tensors. Does not support mixed dimensions. Args: a2a_sequence_embs_tensor (Tensor): input embeddings. forward_recat_tensor (Tensor): recat tensor for forward. backward_recat_tensor (Tensor): recat tensor for backward. lengths_after_sparse_data_all2all (Tensor): lengths of sparse features after AlltoAll. input_splits (List[int]): input splits. output_splits (List[int]): output splits. variable_batch_size (bool): whether variable batch size is enabled. group (Optional[dist.ProcessGroup]): the process group to work on. If None, the default process group will be used. codecs (Optional[QuantizedCommCodecs]): quantized communication codecs. Returns: Awaitable[List[Tensor]]: async work handle (`Awaitable`), which can be `wait()` later to get the resulting tensor. .. warning:: `alltoall_sequence` is experimental and subject to change.
Here is the function:
def alltoall_sequence(
# (T, B, L_i * D) flattened
a2a_sequence_embs_tensor: Tensor,
forward_recat_tensor: Tensor,
backward_recat_tensor: Tensor,
lengths_after_sparse_data_all2all: Tensor,
input_splits: List[int],
output_splits: List[int],
variable_batch_size: bool = False,
group: Optional[dist.ProcessGroup] = None,
codecs: Optional[QuantizedCommCodecs] = None,
) -> Awaitable[Tensor]:
"""
Performs AlltoAll operation for sequence embeddings. Each process splits the input
tensor based on the world size, and then scatters the split list to all processes in
the group. Then concatenates the received tensors from all processes in the group
and returns a single output tensor.
NOTE:
AlltoAll operator for Sequence embedding tensors.
Does not support mixed dimensions.
Args:
a2a_sequence_embs_tensor (Tensor): input embeddings.
forward_recat_tensor (Tensor): recat tensor for forward.
backward_recat_tensor (Tensor): recat tensor for backward.
lengths_after_sparse_data_all2all (Tensor): lengths of sparse features after
AlltoAll.
input_splits (List[int]): input splits.
output_splits (List[int]): output splits.
variable_batch_size (bool): whether variable batch size is enabled.
group (Optional[dist.ProcessGroup]): the process group to work on. If None, the
default process group will be used.
codecs (Optional[QuantizedCommCodecs]): quantized communication codecs.
Returns:
Awaitable[List[Tensor]]: async work handle (`Awaitable`), which can be `wait()` later to get the resulting tensor.
.. warning::
`alltoall_sequence` is experimental and subject to change.
"""
if group is None:
group = dist.distributed_c10d._get_default_group()
if dist.get_world_size(group) <= 1:
return NoWait(a2a_sequence_embs_tensor)
a2ai = All2AllSequenceInfo(
embedding_dim=a2a_sequence_embs_tensor.shape[1],
lengths_after_sparse_data_all2all=lengths_after_sparse_data_all2all,
forward_recat_tensor=forward_recat_tensor,
backward_recat_tensor=backward_recat_tensor,
input_splits=input_splits,
output_splits=output_splits,
variable_batch_size=variable_batch_size,
codecs=codecs,
)
# sequence of embeddings, bags are definitely non-uniform
if is_torchdynamo_compiling():
return NoWait(all2all_sequence_sync(group, a2ai, a2a_sequence_embs_tensor))
myreq = Request(group, device=a2a_sequence_embs_tensor.device)
All2All_Seq_Req.apply(group, myreq, a2ai, a2a_sequence_embs_tensor)
return myreq | Performs AlltoAll operation for sequence embeddings. Each process splits the input tensor based on the world size, and then scatters the split list to all processes in the group. Then concatenates the received tensors from all processes in the group and returns a single output tensor. NOTE: AlltoAll operator for Sequence embedding tensors. Does not support mixed dimensions. Args: a2a_sequence_embs_tensor (Tensor): input embeddings. forward_recat_tensor (Tensor): recat tensor for forward. backward_recat_tensor (Tensor): recat tensor for backward. lengths_after_sparse_data_all2all (Tensor): lengths of sparse features after AlltoAll. input_splits (List[int]): input splits. output_splits (List[int]): output splits. variable_batch_size (bool): whether variable batch size is enabled. group (Optional[dist.ProcessGroup]): the process group to work on. If None, the default process group will be used. codecs (Optional[QuantizedCommCodecs]): quantized communication codecs. Returns: Awaitable[List[Tensor]]: async work handle (`Awaitable`), which can be `wait()` later to get the resulting tensor. .. warning:: `alltoall_sequence` is experimental and subject to change. |
9,002 | from dataclasses import dataclass, field
from typing import Any, List, Optional, Tuple, TypeVar
import torch
import torch.distributed as dist
from torch import Tensor
from torch.autograd import Function
from torch.autograd.profiler import record_function
from torchrec.distributed.types import Awaitable, NoWait, QuantizedCommCodecs
from torchrec.distributed.utils import none_throws
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def is_torchdynamo_compiling() -> bool: # type: ignore[misc]
return False
class Request(Awaitable[W]):
"""
Defines a collective operation request for a process group on a tensor.
Args:
pg (dist.ProcessGroup): The process group the request is for.
"""
def __init__(self, pg: dist.ProcessGroup, device: torch.device) -> None:
super().__init__()
self.pg: dist.ProcessGroup = pg
self.req: Optional[dist.Work] = None
self.tensor: Optional[W] = None
self.a2ai = None # type: ignore
self.qcomm_ctx = None # type: ignore
self.rsi = None # type: ignore
self.agi = None # type: ignore
self.wait_function = None # type: ignore
# This dummy tensor is used to build the autograd graph between
# CommOp-Req and CommOp-Await. The actual forward tensors, and backwards gradient tensors
# are stored in self.tensor
self.dummy_tensor: torch.Tensor = torch.empty(
1,
requires_grad=True,
device=device,
)
def _wait_impl(self) -> W:
"""
Calls the wait function for this request.
"""
ret = self.wait_function.apply(self.pg, self, self.dummy_tensor)
self.req = None
self.tensor = None
return ret
class All2AllVInfo(object):
"""
The data class that collects the attributes when calling the `alltoallv` operation.
Attributes:
dim_sum_per_rank (List[int]): number of features (sum of dimensions) of the
embedding in each rank.
B_global (int): global batch size for each rank.
B_local (int): local batch size before scattering.
B_local_list: (List[int]): local batch sizes for each embedding table locally
(in my current rank).
D_local_list (List[int]): embedding dimension of each embedding table locally
(in my current rank).
input_split_sizes (List[int]): The input split sizes for each rank, this
remembers how to split the input when doing the `all_to_all_single` operation.
output_split_sizes (List[int]): The output split sizes for each rank, this
remembers how to fill the output when doing the `all_to_all_single` operation.
"""
dims_sum_per_rank: List[int]
B_global: int
B_local: int
B_local_list: List[int]
D_local_list: List[int]
input_split_sizes: List[int] = field(default_factory=list)
output_split_sizes: List[int] = field(default_factory=list)
codecs: Optional[QuantizedCommCodecs] = None
def _get_split_lengths_by_len(
world_size: int, my_rank: int, n: int
) -> Tuple[int, List[int]]:
k = n // world_size
m = n % world_size
splits = []
if m == 0:
for _ in range(world_size):
splits.append(k)
my_len = k
else:
for i in range(world_size):
splits.append((k + 1) if i < m else k)
my_len = splits[my_rank]
return (my_len, splits)
def all2allv_sync(
pg: dist.ProcessGroup,
a2ai: All2AllVInfo,
inputs: List[Tensor],
) -> List[Tensor]:
input_split_sizes = []
sum_D_local_list = sum(a2ai.D_local_list)
for m in a2ai.B_local_list:
input_split_sizes.append(m * sum_D_local_list)
output_split_sizes = []
for e in a2ai.dims_sum_per_rank:
output_split_sizes.append(a2ai.B_local * e)
input = torch.cat(inputs, dim=1).view([-1])
if a2ai.codecs is not None:
input = a2ai.codecs.forward.encode(input)
with record_function("## alltoallv_bwd_single ##"):
output = dist._functional_collectives.all_to_all_single(
input,
output_split_sizes=output_split_sizes,
input_split_sizes=input_split_sizes,
group=pg,
)
if a2ai.codecs is not None:
output = a2ai.codecs.forward.decode(output)
outputs = []
for out in output.split(output_split_sizes):
outputs.append(out.view([a2ai.B_local, -1]))
return outputs
class All2Allv_Req(Function):
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
a2ai: All2AllVInfo,
inputs: List[Tensor],
) -> Tensor:
input_split_sizes = [m * sum(a2ai.D_local_list) for m in a2ai.B_local_list]
output_split_sizes = [a2ai.B_local * e for e in a2ai.dims_sum_per_rank]
input = torch.cat(inputs, dim=1).view([-1])
if a2ai.codecs is not None:
input = a2ai.codecs.forward.encode(input)
output = input.new_empty(sum(output_split_sizes))
with record_function("## alltoallv_bwd_single ##"):
req = dist.all_to_all_single(
output,
input,
output_split_sizes,
input_split_sizes,
group=pg,
async_op=True,
)
myreq.req = req
myreq.tensor = output
myreq.wait_function = All2Allv_Wait
a2ai.input_split_sizes = input_split_sizes
a2ai.output_split_sizes = output_split_sizes
myreq.a2ai = a2ai
ctx.a2ai = a2ai
ctx.myreq = myreq
ctx.tensor = output
return myreq.dummy_tensor
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, *grad_output):
a2ai = ctx.a2ai
myreq = ctx.myreq
myreq.req.wait()
myreq.req = None
grad_input = myreq.tensor
if a2ai.codecs is not None:
grad_input = a2ai.codecs.backward.decode(grad_input)
grad_inputs = grad_input.view([a2ai.B_global, -1]).split(
a2ai.D_local_list, dim=1
)
grad_inputs = [gin.contiguous() for gin in grad_inputs]
myreq.tensor = None
myreq.dummy_tensor = None
return (None, None, None, *grad_inputs)
class QuantizedCommCodecs:
"""
The quantization codecs to use for the forward and backward pass respectively of a comm op (e.g. pooled_all_to_all, reduce_scatter, sequence_all_to_all).
"""
# pyre-ignore
forward: QuantizedCommCodec = NoOpQuantizedCommCodec()
# pyre-ignore
backward: QuantizedCommCodec = NoOpQuantizedCommCodec()
class Awaitable(abc.ABC, Generic[W]):
def __init__(self) -> None:
self._callbacks: List[Callable[[W], W]] = []
def _wait_impl(self) -> W:
pass
def wait(self) -> W:
with record_function(f"## {self.__class__.__name__} wait() ##"):
ret: W = self._wait_impl()
for callback in self.callbacks:
ret = callback(ret)
return ret
def callbacks(self) -> List[Callable[[W], W]]:
return self._callbacks
class NoWait(Awaitable[W]):
def __init__(self, obj: W) -> None:
super().__init__()
self._obj = obj
def _wait_impl(self) -> W:
return self._obj
The provided code snippet includes necessary dependencies for implementing the `alltoallv` function. Write a Python function `def alltoallv( inputs: List[Tensor], out_split: Optional[List[int]] = None, per_rank_split_lengths: Optional[List[int]] = None, group: Optional[dist.ProcessGroup] = None, codecs: Optional[QuantizedCommCodecs] = None, ) -> Awaitable[List[Tensor]]` to solve the following problem:
Performs `alltoallv` operation for a list of input embeddings. Each process scatters the list to all processes in the group. Args: inputs (List[Tensor]): list of tensors to scatter, one per rank. The tensors in the list usually have different lengths. out_split (Optional[List[int]]): output split sizes (or dim_sum_per_rank), if not specified, we will use `per_rank_split_lengths` to construct a output split with the assumption that all the embs have the same dimension. per_rank_split_lengths (Optional[List[int]]): split lengths per rank. If not specified, the `out_split` must be specified. group (Optional[dist.ProcessGroup]): the process group to work on. If None, the default process group will be used. codecs (Optional[QuantizedCommCodecs]): quantized communication codecs. Returns: Awaitable[List[Tensor]]: async work handle (`Awaitable`), which can be `wait()` later to get the resulting list of tensors. .. warning:: `alltoallv` is experimental and subject to change.
Here is the function:
def alltoallv(
inputs: List[Tensor],
out_split: Optional[List[int]] = None,
per_rank_split_lengths: Optional[List[int]] = None,
group: Optional[dist.ProcessGroup] = None,
codecs: Optional[QuantizedCommCodecs] = None,
) -> Awaitable[List[Tensor]]:
"""
Performs `alltoallv` operation for a list of input embeddings. Each process scatters
the list to all processes in the group.
Args:
inputs (List[Tensor]): list of tensors to scatter, one per rank. The tensors in
the list usually have different lengths.
out_split (Optional[List[int]]): output split sizes (or dim_sum_per_rank), if
not specified, we will use `per_rank_split_lengths` to construct a output
split with the assumption that all the embs have the same dimension.
per_rank_split_lengths (Optional[List[int]]): split lengths per rank. If not
specified, the `out_split` must be specified.
group (Optional[dist.ProcessGroup]): the process group to work on. If None, the
default process group will be used.
codecs (Optional[QuantizedCommCodecs]): quantized communication codecs.
Returns:
Awaitable[List[Tensor]]: async work handle (`Awaitable`), which can be `wait()` later to get the resulting list of tensors.
.. warning::
`alltoallv` is experimental and subject to change.
"""
if group is None:
group = dist.distributed_c10d._get_default_group()
world_size: int = group.size()
my_rank: int = group.rank()
B_global = inputs[0].size(0)
D_local_list = []
for e in inputs:
D_local_list.append(e.size()[1])
B_local, B_local_list = _get_split_lengths_by_len(world_size, my_rank, B_global)
if out_split is not None:
dims_sum_per_rank = out_split
elif per_rank_split_lengths is not None:
# all the embs have the same dimension
dims_sum_per_rank = []
for s in per_rank_split_lengths:
dims_sum_per_rank.append(s * D_local_list[0])
else:
raise RuntimeError("Need to specify either out_split or per_rank_split_lengths")
a2ai = All2AllVInfo(
dims_sum_per_rank=dims_sum_per_rank,
B_local=B_local,
B_local_list=B_local_list,
D_local_list=D_local_list,
B_global=B_global,
codecs=codecs,
)
if is_torchdynamo_compiling():
return NoWait(all2allv_sync(group, a2ai, inputs))
myreq = Request(group, device=inputs[0].device)
All2Allv_Req.apply(group, myreq, a2ai, inputs)
return myreq | Performs `alltoallv` operation for a list of input embeddings. Each process scatters the list to all processes in the group. Args: inputs (List[Tensor]): list of tensors to scatter, one per rank. The tensors in the list usually have different lengths. out_split (Optional[List[int]]): output split sizes (or dim_sum_per_rank), if not specified, we will use `per_rank_split_lengths` to construct a output split with the assumption that all the embs have the same dimension. per_rank_split_lengths (Optional[List[int]]): split lengths per rank. If not specified, the `out_split` must be specified. group (Optional[dist.ProcessGroup]): the process group to work on. If None, the default process group will be used. codecs (Optional[QuantizedCommCodecs]): quantized communication codecs. Returns: Awaitable[List[Tensor]]: async work handle (`Awaitable`), which can be `wait()` later to get the resulting list of tensors. .. warning:: `alltoallv` is experimental and subject to change. |
9,003 | from dataclasses import dataclass, field
from typing import Any, List, Optional, Tuple, TypeVar
import torch
import torch.distributed as dist
from torch import Tensor
from torch.autograd import Function
from torch.autograd.profiler import record_function
from torchrec.distributed.types import Awaitable, NoWait, QuantizedCommCodecs
from torchrec.distributed.utils import none_throws
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def is_torchdynamo_compiling() -> bool: # type: ignore[misc]
return False
class Request(Awaitable[W]):
"""
Defines a collective operation request for a process group on a tensor.
Args:
pg (dist.ProcessGroup): The process group the request is for.
"""
def __init__(self, pg: dist.ProcessGroup, device: torch.device) -> None:
super().__init__()
self.pg: dist.ProcessGroup = pg
self.req: Optional[dist.Work] = None
self.tensor: Optional[W] = None
self.a2ai = None # type: ignore
self.qcomm_ctx = None # type: ignore
self.rsi = None # type: ignore
self.agi = None # type: ignore
self.wait_function = None # type: ignore
# This dummy tensor is used to build the autograd graph between
# CommOp-Req and CommOp-Await. The actual forward tensors, and backwards gradient tensors
# are stored in self.tensor
self.dummy_tensor: torch.Tensor = torch.empty(
1,
requires_grad=True,
device=device,
)
def _wait_impl(self) -> W:
"""
Calls the wait function for this request.
"""
ret = self.wait_function.apply(self.pg, self, self.dummy_tensor)
self.req = None
self.tensor = None
return ret
class ReduceScatterInfo(object):
"""
The data class that collects the attributes when calling the `reduce_scatter_pooled`
operation.
Attributes:
input_sizes (List[torch.Size]): the sizes of the input tensors. This remembers the
sizes of the input tensors when running the backward pass and producing the
gradient.
"""
input_sizes: List[torch.Size]
codecs: Optional[QuantizedCommCodecs] = None
def reduce_scatter_sync(
pg: dist.ProcessGroup,
rsi: ReduceScatterInfo,
*inputs: Any,
) -> Tensor:
if rsi.codecs is not None:
# pyre-ignore
inputs = [rsi.codecs.forward.encode(input) for input in inputs]
with record_function("## reduce_scatter ##"):
output = dist._functional_collectives.reduce_scatter_tensor(
torch.cat(inputs),
reduceOp="sum",
scatter_dim=0,
group=pg,
)
if rsi.codecs is not None:
output = rsi.codecs.forward.decode(output)
return output
class ReduceScatter_Req(Function):
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
rsi: ReduceScatterInfo,
*inputs: Any,
) -> Tensor:
my_rank = dist.get_rank(pg)
if rsi.codecs is not None:
# pyre-ignore
inputs = [rsi.codecs.forward.encode(input) for input in inputs]
output = inputs[my_rank].new_empty(
inputs[my_rank].size(),
dtype=inputs[my_rank].dtype,
device=inputs[my_rank].device,
)
with record_function("## reduce_scatter ##"):
req = dist.reduce_scatter(
output,
list(inputs),
group=pg,
async_op=True,
)
myreq.req = req
myreq.tensor = output
myreq.wait_function = ReduceScatter_Wait
myreq.rsi = rsi
ctx.myreq = myreq
ctx.pg = pg
ctx.tensor = output
return myreq.dummy_tensor
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, *unused: Tensor) -> Tuple[Optional[Tensor], ...]:
myreq = ctx.myreq
assert myreq.req is not None
myreq.req.wait()
myreq.req = None
grad_inputs = list(myreq.tensor)
rsi = myreq.rsi
if rsi.codecs is not None:
grad_inputs = [
rsi.codecs.backward.decode(grad_input) for grad_input in grad_inputs
]
# Make it equivalent to running on a single rank.
if GRADIENT_DIVISION:
for grad_input in grad_inputs:
grad_input.div_(dist.get_world_size(ctx.pg))
myreq.tensor = None
myreq.dummy_tensor
return (None, None, None, *grad_inputs)
class QuantizedCommCodecs:
"""
The quantization codecs to use for the forward and backward pass respectively of a comm op (e.g. pooled_all_to_all, reduce_scatter, sequence_all_to_all).
"""
# pyre-ignore
forward: QuantizedCommCodec = NoOpQuantizedCommCodec()
# pyre-ignore
backward: QuantizedCommCodec = NoOpQuantizedCommCodec()
class Awaitable(abc.ABC, Generic[W]):
def __init__(self) -> None:
self._callbacks: List[Callable[[W], W]] = []
def _wait_impl(self) -> W:
pass
def wait(self) -> W:
with record_function(f"## {self.__class__.__name__} wait() ##"):
ret: W = self._wait_impl()
for callback in self.callbacks:
ret = callback(ret)
return ret
def callbacks(self) -> List[Callable[[W], W]]:
return self._callbacks
class NoWait(Awaitable[W]):
def __init__(self, obj: W) -> None:
super().__init__()
self._obj = obj
def _wait_impl(self) -> W:
return self._obj
The provided code snippet includes necessary dependencies for implementing the `reduce_scatter_pooled` function. Write a Python function `def reduce_scatter_pooled( inputs: List[Tensor], group: Optional[dist.ProcessGroup] = None, codecs: Optional[QuantizedCommCodecs] = None, ) -> Awaitable[Tensor]` to solve the following problem:
Performs reduce-scatter operation for a pooled embeddings tensor split into world size number of chunks. The result of the reduce operation gets scattered to all processes in the group. Args: inputs (List[Tensor]): list of tensors to scatter, one per rank. group (Optional[dist.ProcessGroup]): the process group to work on. If None, the default process group will be used. codecs (Optional[QuantizedCommCodecs]): quantized communication codecs. Returns: Awaitable[Tensor]: async work handle (Awaitable), which can be `wait()` later to get the resulting tensor. .. warning:: `reduce_scatter_pooled` is experimental and subject to change.
Here is the function:
def reduce_scatter_pooled(
inputs: List[Tensor],
group: Optional[dist.ProcessGroup] = None,
codecs: Optional[QuantizedCommCodecs] = None,
) -> Awaitable[Tensor]:
"""
Performs reduce-scatter operation for a pooled embeddings tensor split into world
size number of chunks. The result of the reduce operation gets scattered to all
processes in the group.
Args:
inputs (List[Tensor]): list of tensors to scatter, one per rank.
group (Optional[dist.ProcessGroup]): the process group to work on. If None, the
default process group will be used.
codecs (Optional[QuantizedCommCodecs]): quantized communication codecs.
Returns:
Awaitable[Tensor]: async work handle (Awaitable), which can be `wait()` later to get the resulting tensor.
.. warning::
`reduce_scatter_pooled` is experimental and subject to change.
"""
if group is None:
group = dist.distributed_c10d._get_default_group()
if group.size() <= 1:
return NoWait(inputs[group.rank()])
rsi = ReduceScatterInfo(
input_sizes=[tensor.size() for tensor in inputs], codecs=codecs
)
if is_torchdynamo_compiling():
return NoWait(reduce_scatter_sync(group, rsi, *inputs))
myreq = Request(group, device=inputs[0].device)
ReduceScatter_Req.apply(group, myreq, rsi, *inputs)
return myreq | Performs reduce-scatter operation for a pooled embeddings tensor split into world size number of chunks. The result of the reduce operation gets scattered to all processes in the group. Args: inputs (List[Tensor]): list of tensors to scatter, one per rank. group (Optional[dist.ProcessGroup]): the process group to work on. If None, the default process group will be used. codecs (Optional[QuantizedCommCodecs]): quantized communication codecs. Returns: Awaitable[Tensor]: async work handle (Awaitable), which can be `wait()` later to get the resulting tensor. .. warning:: `reduce_scatter_pooled` is experimental and subject to change. |
9,004 | from dataclasses import dataclass, field
from typing import Any, List, Optional, Tuple, TypeVar
import torch
import torch.distributed as dist
from torch import Tensor
from torch.autograd import Function
from torch.autograd.profiler import record_function
from torchrec.distributed.types import Awaitable, NoWait, QuantizedCommCodecs
from torchrec.distributed.utils import none_throws
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def is_torchdynamo_compiling() -> bool: # type: ignore[misc]
return False
class Request(Awaitable[W]):
"""
Defines a collective operation request for a process group on a tensor.
Args:
pg (dist.ProcessGroup): The process group the request is for.
"""
def __init__(self, pg: dist.ProcessGroup, device: torch.device) -> None:
super().__init__()
self.pg: dist.ProcessGroup = pg
self.req: Optional[dist.Work] = None
self.tensor: Optional[W] = None
self.a2ai = None # type: ignore
self.qcomm_ctx = None # type: ignore
self.rsi = None # type: ignore
self.agi = None # type: ignore
self.wait_function = None # type: ignore
# This dummy tensor is used to build the autograd graph between
# CommOp-Req and CommOp-Await. The actual forward tensors, and backwards gradient tensors
# are stored in self.tensor
self.dummy_tensor: torch.Tensor = torch.empty(
1,
requires_grad=True,
device=device,
)
def _wait_impl(self) -> W:
"""
Calls the wait function for this request.
"""
ret = self.wait_function.apply(self.pg, self, self.dummy_tensor)
self.req = None
self.tensor = None
return ret
class ReduceScatterBaseInfo(object):
"""
The data class that collects the attributes when calling the
`reduce_scatter_base_pooled` operation.
Attributes:
input_sizes (torch.Size): the sizes of the input flatten tensor.
"""
input_sizes: torch.Size
codecs: Optional[QuantizedCommCodecs] = None
def reduce_scatter_base_sync(
pg: dist.ProcessGroup,
rsi: ReduceScatterBaseInfo,
inputs: Tensor,
) -> Tensor:
my_size = pg.size()
assert inputs.size(0) % my_size == 0
if rsi.codecs is not None:
inputs = rsi.codecs.forward.encode(inputs)
with record_function("## reduce_scatter_base ##"):
output = dist._functional_collectives.reduce_scatter_tensor(
inputs,
reduceOp="sum",
scatter_dim=0,
group=pg,
)
if rsi.codecs is not None:
output = rsi.codecs.forward.decode(output)
return output
class ReduceScatterBase_Req(Function):
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
rsi: ReduceScatterBaseInfo,
inputs: Tensor,
) -> Tensor:
my_size = dist.get_world_size(pg)
assert inputs.size(0) % my_size == 0
if rsi.codecs is not None:
inputs = rsi.codecs.forward.encode(inputs)
output = inputs.new_empty((inputs.size(0) // my_size, inputs.size(1)))
with record_function("## reduce_scatter_base ##"):
req = dist._reduce_scatter_base(
output,
inputs,
group=pg,
async_op=True,
)
myreq.req = req
myreq.tensor = output
myreq.wait_function = ReduceScatterBase_Wait
myreq.rsi = rsi
myreq.tensor = output
ctx.myreq = myreq
ctx.pg = pg
return myreq.dummy_tensor
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, *unused: Tensor) -> Tuple[Optional[Tensor], ...]:
myreq = ctx.myreq
myreq.req.wait()
myreq.req = None
grad_inputs = myreq.tensor
rsi = myreq.rsi
if rsi.codecs is not None:
grad_inputs = rsi.codecs.backward.decode(grad_inputs)
# Make it equivalent to running on a single rank.
if GRADIENT_DIVISION:
grad_inputs.div_(dist.get_world_size(ctx.pg))
myreq.tensor = None
myreq.dummy_tensor = None
return (None, None, None, grad_inputs)
class QuantizedCommCodecs:
"""
The quantization codecs to use for the forward and backward pass respectively of a comm op (e.g. pooled_all_to_all, reduce_scatter, sequence_all_to_all).
"""
# pyre-ignore
forward: QuantizedCommCodec = NoOpQuantizedCommCodec()
# pyre-ignore
backward: QuantizedCommCodec = NoOpQuantizedCommCodec()
class Awaitable(abc.ABC, Generic[W]):
def __init__(self) -> None:
self._callbacks: List[Callable[[W], W]] = []
def _wait_impl(self) -> W:
pass
def wait(self) -> W:
with record_function(f"## {self.__class__.__name__} wait() ##"):
ret: W = self._wait_impl()
for callback in self.callbacks:
ret = callback(ret)
return ret
def callbacks(self) -> List[Callable[[W], W]]:
return self._callbacks
class NoWait(Awaitable[W]):
def __init__(self, obj: W) -> None:
super().__init__()
self._obj = obj
def _wait_impl(self) -> W:
return self._obj
The provided code snippet includes necessary dependencies for implementing the `reduce_scatter_base_pooled` function. Write a Python function `def reduce_scatter_base_pooled( input: Tensor, group: Optional[dist.ProcessGroup] = None, codecs: Optional[QuantizedCommCodecs] = None, ) -> Awaitable[Tensor]` to solve the following problem:
Reduces then scatters a flattened pooled embeddings tensor to all processes in a group. Input tensor is of size `output_tensor_size * world_size`. Args: input (Tensor): flattened tensor to scatter. group (Optional[dist.ProcessGroup]): the process group to work on. If None, the default process group will be used. codecs (Optional[QuantizedCommCodecs]): quantized communication codecs. Returns: Awaitable[Tensor]: async work handle (Awaitable), which can be `wait()` later to get the resulting tensor. .. warning:: `reduce_scatter_base_pooled` is experimental and subject to change.
Here is the function:
def reduce_scatter_base_pooled(
input: Tensor,
group: Optional[dist.ProcessGroup] = None,
codecs: Optional[QuantizedCommCodecs] = None,
) -> Awaitable[Tensor]:
"""
Reduces then scatters a flattened pooled embeddings tensor to all processes in a
group.
Input tensor is of size `output_tensor_size * world_size`.
Args:
input (Tensor): flattened tensor to scatter.
group (Optional[dist.ProcessGroup]): the process group to work on. If None, the
default process group will be used.
codecs (Optional[QuantizedCommCodecs]): quantized communication codecs.
Returns:
Awaitable[Tensor]: async work handle (Awaitable), which can be `wait()` later to get the resulting tensor.
.. warning::
`reduce_scatter_base_pooled` is experimental and subject to change.
"""
if group is None:
group = dist.distributed_c10d._get_default_group()
if dist.get_world_size(group) <= 1:
return NoWait(input)
rsi = ReduceScatterBaseInfo(input_sizes=input.size(), codecs=codecs)
if is_torchdynamo_compiling():
return NoWait(reduce_scatter_base_sync(group, rsi, input))
myreq = Request(group, device=input.device)
ReduceScatterBase_Req.apply(group, myreq, rsi, input)
return myreq | Reduces then scatters a flattened pooled embeddings tensor to all processes in a group. Input tensor is of size `output_tensor_size * world_size`. Args: input (Tensor): flattened tensor to scatter. group (Optional[dist.ProcessGroup]): the process group to work on. If None, the default process group will be used. codecs (Optional[QuantizedCommCodecs]): quantized communication codecs. Returns: Awaitable[Tensor]: async work handle (Awaitable), which can be `wait()` later to get the resulting tensor. .. warning:: `reduce_scatter_base_pooled` is experimental and subject to change. |
9,005 | from dataclasses import dataclass, field
from typing import Any, List, Optional, Tuple, TypeVar
import torch
import torch.distributed as dist
from torch import Tensor
from torch.autograd import Function
from torch.autograd.profiler import record_function
from torchrec.distributed.types import Awaitable, NoWait, QuantizedCommCodecs
from torchrec.distributed.utils import none_throws
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def is_torchdynamo_compiling() -> bool: # type: ignore[misc]
return False
class Request(Awaitable[W]):
"""
Defines a collective operation request for a process group on a tensor.
Args:
pg (dist.ProcessGroup): The process group the request is for.
"""
def __init__(self, pg: dist.ProcessGroup, device: torch.device) -> None:
super().__init__()
self.pg: dist.ProcessGroup = pg
self.req: Optional[dist.Work] = None
self.tensor: Optional[W] = None
self.a2ai = None # type: ignore
self.qcomm_ctx = None # type: ignore
self.rsi = None # type: ignore
self.agi = None # type: ignore
self.wait_function = None # type: ignore
# This dummy tensor is used to build the autograd graph between
# CommOp-Req and CommOp-Await. The actual forward tensors, and backwards gradient tensors
# are stored in self.tensor
self.dummy_tensor: torch.Tensor = torch.empty(
1,
requires_grad=True,
device=device,
)
def _wait_impl(self) -> W:
"""
Calls the wait function for this request.
"""
ret = self.wait_function.apply(self.pg, self, self.dummy_tensor)
self.req = None
self.tensor = None
return ret
class AllGatherBaseInfo(object):
"""
The data class that collects the attributes when calling the
`all_gatther_base_pooled` operation.
Attributes:
input_size (int): the size of the input tensor.
"""
input_size: torch.Size
codecs: Optional[QuantizedCommCodecs] = None
def all_gather_base_sync(
pg: dist.ProcessGroup,
agi: AllGatherBaseInfo,
input: Tensor,
) -> Tensor:
if agi.codecs is not None:
input = agi.codecs.forward.encode(input)
with record_function("## all_gather_base ##"):
outputs = dist._functional_collectives.all_gather_tensor(
input,
gather_dim=0,
group=pg,
)
if agi.codecs is not None:
outputs = agi.codecs.forward.decode(outputs)
return outputs
class AllGatherBase_Req(Function):
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
agi: AllGatherBaseInfo,
input: Tensor,
) -> Tensor:
my_size = dist.get_world_size(pg)
if agi.codecs is not None:
input = agi.codecs.forward.encode(input)
outputs = input.new_empty((input.size(0) * my_size, input.size(1)))
with record_function("## all_gather_base ##"):
req = dist._all_gather_base(
outputs,
input,
group=pg,
async_op=True,
)
myreq.req = req
myreq.tensor = outputs
myreq.wait_function = AllGatherBase_Wait
myreq.agi = agi
ctx.myreq = myreq
ctx.pg = pg
return myreq.dummy_tensor
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, *unused: Tensor) -> Tuple[Optional[Tensor], ...]:
myreq = ctx.myreq
assert myreq.req is not None
myreq.req.wait()
myreq.req = None
agi = myreq.agi
grad_input = myreq.tensor
if agi.codecs is not None:
grad_input = agi.codecs.backward.decode(grad_input)
# Make it equivalent to running on a single rank.
if GRADIENT_DIVISION:
grad_input.div_(dist.get_world_size(ctx.pg))
myreq.tensor = None
myreq.dummy_tensor = None
return (None, None, None, grad_input)
class QuantizedCommCodecs:
"""
The quantization codecs to use for the forward and backward pass respectively of a comm op (e.g. pooled_all_to_all, reduce_scatter, sequence_all_to_all).
"""
# pyre-ignore
forward: QuantizedCommCodec = NoOpQuantizedCommCodec()
# pyre-ignore
backward: QuantizedCommCodec = NoOpQuantizedCommCodec()
class Awaitable(abc.ABC, Generic[W]):
def __init__(self) -> None:
self._callbacks: List[Callable[[W], W]] = []
def _wait_impl(self) -> W:
pass
def wait(self) -> W:
with record_function(f"## {self.__class__.__name__} wait() ##"):
ret: W = self._wait_impl()
for callback in self.callbacks:
ret = callback(ret)
return ret
def callbacks(self) -> List[Callable[[W], W]]:
return self._callbacks
class NoWait(Awaitable[W]):
def __init__(self, obj: W) -> None:
super().__init__()
self._obj = obj
def _wait_impl(self) -> W:
return self._obj
The provided code snippet includes necessary dependencies for implementing the `all_gather_base_pooled` function. Write a Python function `def all_gather_base_pooled( input: Tensor, group: Optional[dist.ProcessGroup] = None, codecs: Optional[QuantizedCommCodecs] = None, ) -> Awaitable[Tensor]` to solve the following problem:
All-gathers tensors from all processes in a group to form a flattened pooled embeddings tensor. Input tensor is of size `output_tensor_size / world_size`. Args: input (Tensor): tensor to gather. group (Optional[dist.ProcessGroup]): the process group to work on. If None, the default process group will be used. Returns: Awaitable[Tensor]: async work handle (Awaitable), which can be `wait()` later to get the resulting tensor. .. warning:: `all_gather_base_pooled` is experimental and subject to change.
Here is the function:
def all_gather_base_pooled(
input: Tensor,
group: Optional[dist.ProcessGroup] = None,
codecs: Optional[QuantizedCommCodecs] = None,
) -> Awaitable[Tensor]:
"""
All-gathers tensors from all processes in a group to form a flattened pooled
embeddings tensor.
Input tensor is of size `output_tensor_size / world_size`.
Args:
input (Tensor): tensor to gather.
group (Optional[dist.ProcessGroup]): the process group to work on. If None, the
default process group will be used.
Returns:
Awaitable[Tensor]: async work handle (Awaitable), which can be `wait()` later to get the resulting tensor.
.. warning::
`all_gather_base_pooled` is experimental and subject to change.
"""
if group is None:
group = dist.distributed_c10d._get_default_group()
agi = AllGatherBaseInfo(input_size=input.size(), codecs=codecs)
if dist.get_world_size(group) <= 1:
return NoWait(input)
if is_torchdynamo_compiling():
return NoWait(all_gather_base_sync(group, agi, input))
myreq = Request(group, device=input.device)
AllGatherBase_Req.apply(group, myreq, agi, input)
return myreq | All-gathers tensors from all processes in a group to form a flattened pooled embeddings tensor. Input tensor is of size `output_tensor_size / world_size`. Args: input (Tensor): tensor to gather. group (Optional[dist.ProcessGroup]): the process group to work on. If None, the default process group will be used. Returns: Awaitable[Tensor]: async work handle (Awaitable), which can be `wait()` later to get the resulting tensor. .. warning:: `all_gather_base_pooled` is experimental and subject to change. |
9,006 | from dataclasses import dataclass, field
from typing import Any, List, Optional, Tuple, TypeVar
import torch
import torch.distributed as dist
from torch import Tensor
from torch.autograd import Function
from torch.autograd.profiler import record_function
from torchrec.distributed.types import Awaitable, NoWait, QuantizedCommCodecs
from torchrec.distributed.utils import none_throws
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def is_torchdynamo_compiling() -> bool: # type: ignore[misc]
return False
class Request(Awaitable[W]):
"""
Defines a collective operation request for a process group on a tensor.
Args:
pg (dist.ProcessGroup): The process group the request is for.
"""
def __init__(self, pg: dist.ProcessGroup, device: torch.device) -> None:
super().__init__()
self.pg: dist.ProcessGroup = pg
self.req: Optional[dist.Work] = None
self.tensor: Optional[W] = None
self.a2ai = None # type: ignore
self.qcomm_ctx = None # type: ignore
self.rsi = None # type: ignore
self.agi = None # type: ignore
self.wait_function = None # type: ignore
# This dummy tensor is used to build the autograd graph between
# CommOp-Req and CommOp-Await. The actual forward tensors, and backwards gradient tensors
# are stored in self.tensor
self.dummy_tensor: torch.Tensor = torch.empty(
1,
requires_grad=True,
device=device,
)
def _wait_impl(self) -> W:
"""
Calls the wait function for this request.
"""
ret = self.wait_function.apply(self.pg, self, self.dummy_tensor)
self.req = None
self.tensor = None
return ret
class ReduceScatterVInfo(object):
"""
The data class that collects the attributes when calling the `reduce_scatter_v_pooled`
operation.
Attributes:
input_sizes (List[List[int]]): the sizes of the input tensors. This saves the
sizes of the input tensors when running the backward pass and producing the
gradient.
input_splits (List[int]): the splits of the input tensors along dim 0.
equal_splits (bool): ...
total_input_size: (List[int]): total input size.
codecs (Optional[QuantizedCommCodecs]): ...
"""
input_sizes: List[List[int]]
input_splits: List[int]
equal_splits: bool
total_input_size: List[int]
codecs: Optional[QuantizedCommCodecs]
def reduce_scatter_v_sync(
pg: dist.ProcessGroup,
rsi: ReduceScatterVInfo,
input: Tensor,
) -> Tensor:
world_size = pg.size()
rank = pg.rank()
if rsi.codecs is not None:
input = rsi.codecs.forward.encode(input)
if rsi.equal_splits:
with record_function("## reduce_scatter_base ##"):
output = dist._functional_collectives.reduce_scatter_tensor(
input,
reduceOp="sum",
scatter_dim=0,
group=pg,
)
else:
with record_function("## reduce_scatter_v_via_all_to_all_single ##"):
input_splits = rsi.input_splits
output_splits = [rsi.input_splits[rank]] * world_size
# TODO(ivankobzarev): Replace with _functional_collectives.reduce_scatter_v when it is added
a2a_output = dist._functional_collectives.all_to_all_single(
input,
output_splits,
input_splits,
pg,
)
output = torch.sum(
torch.stack(torch.split(a2a_output, output_splits)), dim=0
)
if rsi.codecs is not None:
output = rsi.codecs.forward.decode(output)
return output
class ReduceScatterV_Req(Function):
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
rsi: ReduceScatterVInfo,
input: Tensor,
) -> Tensor:
my_rank = dist.get_rank(pg)
if rsi.codecs is not None:
input = rsi.codecs.forward.encode(input)
output = input.new_empty(rsi.input_sizes[my_rank])
# Use dist._reduce_scatter_base when a vector reduce-scatter is not needed
# else use dist.reduce_scatter which internally supports vector reduce-scatter
if rsi.equal_splits:
with record_function("## reduce_scatter_base ##"):
req = dist._reduce_scatter_base(
output,
input,
group=pg,
async_op=True,
)
else:
with record_function("## reduce_scatter_v ##"):
req = dist.reduce_scatter(
output,
list(torch.split(input, rsi.input_splits)),
group=pg,
async_op=True,
)
myreq.req = req
myreq.tensor = output
myreq.wait_function = ReduceScatterV_Wait
myreq.rsi = rsi
ctx.myreq = myreq
ctx.pg = pg
return myreq.dummy_tensor
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, *unused: Tensor) -> Tuple[Optional[Tensor], ...]:
myreq = ctx.myreq
assert myreq.req is not None
myreq.req.wait()
myreq.req = None
grad_input = myreq.tensor
rsi = myreq.rsi
if rsi.codecs is not None:
grad_input = rsi.codecs.backward.decode(grad_input)
# Make it equivalent to running on a single rank.
if GRADIENT_DIVISION:
grad_input.div_(dist.get_world_size(ctx.pg))
myreq.tensor = None
myreq.dummy_tensor = None
return (None, None, None, grad_input)
class QuantizedCommCodecs:
"""
The quantization codecs to use for the forward and backward pass respectively of a comm op (e.g. pooled_all_to_all, reduce_scatter, sequence_all_to_all).
"""
# pyre-ignore
forward: QuantizedCommCodec = NoOpQuantizedCommCodec()
# pyre-ignore
backward: QuantizedCommCodec = NoOpQuantizedCommCodec()
class Awaitable(abc.ABC, Generic[W]):
def __init__(self) -> None:
self._callbacks: List[Callable[[W], W]] = []
def _wait_impl(self) -> W:
pass
def wait(self) -> W:
with record_function(f"## {self.__class__.__name__} wait() ##"):
ret: W = self._wait_impl()
for callback in self.callbacks:
ret = callback(ret)
return ret
def callbacks(self) -> List[Callable[[W], W]]:
return self._callbacks
class NoWait(Awaitable[W]):
def __init__(self, obj: W) -> None:
super().__init__()
self._obj = obj
def _wait_impl(self) -> W:
return self._obj
The provided code snippet includes necessary dependencies for implementing the `reduce_scatter_v_pooled` function. Write a Python function `def reduce_scatter_v_pooled( input: Tensor, input_splits: List[int], group: Optional[dist.ProcessGroup] = None, codecs: Optional[QuantizedCommCodecs] = None, ) -> Awaitable[Tensor]` to solve the following problem:
Performs reduce-scatter-v operation for a pooled embeddings tensor split unevenly into world size number of chunks. The result of the reduce operation gets scattered to all processes in the group according to `input_splits`. Args: input (Tensor): tensor to scatter. input_splits (List[int]): input splits. group (Optional[dist.ProcessGroup]): the process group to work on. If None, the default process group will be used. Returns: Awaitable[Tensor]: async work handle (Awaitable), which can be `wait()` later to get the resulting tensor. .. warning:: `reduce_scatter_v_pooled` is experimental and subject to change.
Here is the function:
def reduce_scatter_v_pooled(
input: Tensor,
input_splits: List[int],
group: Optional[dist.ProcessGroup] = None,
codecs: Optional[QuantizedCommCodecs] = None,
) -> Awaitable[Tensor]:
"""
Performs reduce-scatter-v operation for a pooled embeddings tensor split unevenly
into world size number of chunks. The result of the reduce operation gets scattered
to all processes in the group according to `input_splits`.
Args:
input (Tensor): tensor to scatter.
input_splits (List[int]): input splits.
group (Optional[dist.ProcessGroup]): the process group to work on. If None, the
default process group will be used.
Returns:
Awaitable[Tensor]: async work handle (Awaitable), which can be `wait()` later to get the resulting tensor.
.. warning::
`reduce_scatter_v_pooled` is experimental and subject to change.
"""
if group is None:
group = dist.distributed_c10d._get_default_group()
if dist.get_world_size(group) <= 1:
return NoWait(input)
input_size = list(input.size())
input_sizes = [
[ip_split if d == 0 else input_size[d] for d in range(len(input_size))]
for ip_split in input_splits
]
equal_splits = all(ip_split == input_splits[0] for ip_split in input_splits)
rsvi = ReduceScatterVInfo(
input_sizes=input_sizes,
input_splits=input_splits,
equal_splits=equal_splits,
total_input_size=input_size,
codecs=codecs,
)
if is_torchdynamo_compiling():
return NoWait(reduce_scatter_v_sync(group, rsvi, input))
myreq = Request(group, device=input.device)
ReduceScatterV_Req.apply(group, myreq, rsvi, input)
return myreq | Performs reduce-scatter-v operation for a pooled embeddings tensor split unevenly into world size number of chunks. The result of the reduce operation gets scattered to all processes in the group according to `input_splits`. Args: input (Tensor): tensor to scatter. input_splits (List[int]): input splits. group (Optional[dist.ProcessGroup]): the process group to work on. If None, the default process group will be used. Returns: Awaitable[Tensor]: async work handle (Awaitable), which can be `wait()` later to get the resulting tensor. .. warning:: `reduce_scatter_v_pooled` is experimental and subject to change. |
9,007 | from dataclasses import dataclass, field
from typing import Any, List, Optional, Tuple, TypeVar
import torch
import torch.distributed as dist
from torch import Tensor
from torch.autograd import Function
from torch.autograd.profiler import record_function
from torchrec.distributed.types import Awaitable, NoWait, QuantizedCommCodecs
from torchrec.distributed.utils import none_throws
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def is_torchdynamo_compiling() -> bool: # type: ignore[misc]
return False
class Request(Awaitable[W]):
"""
Defines a collective operation request for a process group on a tensor.
Args:
pg (dist.ProcessGroup): The process group the request is for.
"""
def __init__(self, pg: dist.ProcessGroup, device: torch.device) -> None:
super().__init__()
self.pg: dist.ProcessGroup = pg
self.req: Optional[dist.Work] = None
self.tensor: Optional[W] = None
self.a2ai = None # type: ignore
self.qcomm_ctx = None # type: ignore
self.rsi = None # type: ignore
self.agi = None # type: ignore
self.wait_function = None # type: ignore
# This dummy tensor is used to build the autograd graph between
# CommOp-Req and CommOp-Await. The actual forward tensors, and backwards gradient tensors
# are stored in self.tensor
self.dummy_tensor: torch.Tensor = torch.empty(
1,
requires_grad=True,
device=device,
)
def _wait_impl(self) -> W:
"""
Calls the wait function for this request.
"""
ret = self.wait_function.apply(self.pg, self, self.dummy_tensor)
self.req = None
self.tensor = None
return ret
class ReduceScatterVInfo(object):
"""
The data class that collects the attributes when calling the `reduce_scatter_v_pooled`
operation.
Attributes:
input_sizes (List[List[int]]): the sizes of the input tensors. This saves the
sizes of the input tensors when running the backward pass and producing the
gradient.
input_splits (List[int]): the splits of the input tensors along dim 0.
equal_splits (bool): ...
total_input_size: (List[int]): total input size.
codecs (Optional[QuantizedCommCodecs]): ...
"""
input_sizes: List[List[int]]
input_splits: List[int]
equal_splits: bool
total_input_size: List[int]
codecs: Optional[QuantizedCommCodecs]
def reduce_scatter_v_sync(
pg: dist.ProcessGroup,
rsi: ReduceScatterVInfo,
input: Tensor,
) -> Tensor:
world_size = pg.size()
rank = pg.rank()
if rsi.codecs is not None:
input = rsi.codecs.forward.encode(input)
if rsi.equal_splits:
with record_function("## reduce_scatter_base ##"):
output = dist._functional_collectives.reduce_scatter_tensor(
input,
reduceOp="sum",
scatter_dim=0,
group=pg,
)
else:
with record_function("## reduce_scatter_v_via_all_to_all_single ##"):
input_splits = rsi.input_splits
output_splits = [rsi.input_splits[rank]] * world_size
# TODO(ivankobzarev): Replace with _functional_collectives.reduce_scatter_v when it is added
a2a_output = dist._functional_collectives.all_to_all_single(
input,
output_splits,
input_splits,
pg,
)
output = torch.sum(
torch.stack(torch.split(a2a_output, output_splits)), dim=0
)
if rsi.codecs is not None:
output = rsi.codecs.forward.decode(output)
return output
class ReduceScatterV_Req(Function):
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
rsi: ReduceScatterVInfo,
input: Tensor,
) -> Tensor:
my_rank = dist.get_rank(pg)
if rsi.codecs is not None:
input = rsi.codecs.forward.encode(input)
output = input.new_empty(rsi.input_sizes[my_rank])
# Use dist._reduce_scatter_base when a vector reduce-scatter is not needed
# else use dist.reduce_scatter which internally supports vector reduce-scatter
if rsi.equal_splits:
with record_function("## reduce_scatter_base ##"):
req = dist._reduce_scatter_base(
output,
input,
group=pg,
async_op=True,
)
else:
with record_function("## reduce_scatter_v ##"):
req = dist.reduce_scatter(
output,
list(torch.split(input, rsi.input_splits)),
group=pg,
async_op=True,
)
myreq.req = req
myreq.tensor = output
myreq.wait_function = ReduceScatterV_Wait
myreq.rsi = rsi
ctx.myreq = myreq
ctx.pg = pg
return myreq.dummy_tensor
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, *unused: Tensor) -> Tuple[Optional[Tensor], ...]:
myreq = ctx.myreq
assert myreq.req is not None
myreq.req.wait()
myreq.req = None
grad_input = myreq.tensor
rsi = myreq.rsi
if rsi.codecs is not None:
grad_input = rsi.codecs.backward.decode(grad_input)
# Make it equivalent to running on a single rank.
if GRADIENT_DIVISION:
grad_input.div_(dist.get_world_size(ctx.pg))
myreq.tensor = None
myreq.dummy_tensor = None
return (None, None, None, grad_input)
class QuantizedCommCodecs:
"""
The quantization codecs to use for the forward and backward pass respectively of a comm op (e.g. pooled_all_to_all, reduce_scatter, sequence_all_to_all).
"""
# pyre-ignore
forward: QuantizedCommCodec = NoOpQuantizedCommCodec()
# pyre-ignore
backward: QuantizedCommCodec = NoOpQuantizedCommCodec()
class Awaitable(abc.ABC, Generic[W]):
def __init__(self) -> None:
self._callbacks: List[Callable[[W], W]] = []
def _wait_impl(self) -> W:
pass
def wait(self) -> W:
with record_function(f"## {self.__class__.__name__} wait() ##"):
ret: W = self._wait_impl()
for callback in self.callbacks:
ret = callback(ret)
return ret
def callbacks(self) -> List[Callable[[W], W]]:
return self._callbacks
class NoWait(Awaitable[W]):
def __init__(self, obj: W) -> None:
super().__init__()
self._obj = obj
def _wait_impl(self) -> W:
return self._obj
The provided code snippet includes necessary dependencies for implementing the `reduce_scatter_v_per_feature_pooled` function. Write a Python function `def reduce_scatter_v_per_feature_pooled( input: Tensor, batch_size_per_rank_per_feature: List[List[int]], embedding_dims: List[int], group: Optional[dist.ProcessGroup] = None, codecs: Optional[QuantizedCommCodecs] = None, ) -> Awaitable[Tensor]` to solve the following problem:
Performs reduce-scatter-v operation for a 1-d pooled embeddings tensor of variable batch size per feature split unevenly into world size number of chunks. The result of the reduce operation gets scattered to all processes in the group. Args: input (Tensor): tensors to scatter, one per rank. batch_size_per_rank_per_feature (List[List[int]]): batch size per rank per feature used to determine input splits. embedding_dims (List[int]): embedding dimensions per feature used to determine input splits. group (Optional[dist.ProcessGroup]): The process group to work on. If None, the default process group will be used. codecs (Optional[QuantizedCommCodecs]): quantized communication codecs. Returns: Awaitable[Tensor]: async work handle (Awaitable), which can be `wait()` later to get the resulting tensor. .. warning:: `reduce_scatter_v_per_feature_pooled` is experimental and subject to change.
Here is the function:
def reduce_scatter_v_per_feature_pooled(
input: Tensor,
batch_size_per_rank_per_feature: List[List[int]],
embedding_dims: List[int],
group: Optional[dist.ProcessGroup] = None,
codecs: Optional[QuantizedCommCodecs] = None,
) -> Awaitable[Tensor]:
"""
Performs reduce-scatter-v operation for a 1-d pooled embeddings tensor of variable
batch size per feature split unevenly into world size number of chunks. The result
of the reduce operation gets scattered to all processes in the group.
Args:
input (Tensor): tensors to scatter, one per rank.
batch_size_per_rank_per_feature (List[List[int]]): batch size per rank per
feature used to determine input splits.
embedding_dims (List[int]): embedding dimensions per feature used to determine
input splits.
group (Optional[dist.ProcessGroup]): The process group to work on. If None, the
default process group will be used.
codecs (Optional[QuantizedCommCodecs]): quantized communication codecs.
Returns:
Awaitable[Tensor]: async work handle (Awaitable), which can be `wait()` later to get the resulting tensor.
.. warning::
`reduce_scatter_v_per_feature_pooled` is experimental and subject to change.
"""
if group is None:
group = dist.distributed_c10d._get_default_group()
world_size = group.size()
if world_size <= 1:
return NoWait(input)
input_splits = [0 for _ in range(world_size)]
if batch_size_per_rank_per_feature:
for rank in range(world_size):
rank_splits = 0
for batch_size, emb_dim in zip(
batch_size_per_rank_per_feature[rank], embedding_dims
):
rank_splits += batch_size * emb_dim
input_splits[rank] = rank_splits
input_sizes = [[s] for s in input_splits]
rsvi = ReduceScatterVInfo(
input_sizes=input_sizes,
input_splits=input_splits,
equal_splits=False,
total_input_size=list(input.size()),
codecs=codecs,
)
if is_torchdynamo_compiling():
return NoWait(reduce_scatter_v_sync(group, rsvi, input))
myreq = Request(group, device=input.device)
ReduceScatterV_Req.apply(group, myreq, rsvi, input)
return myreq | Performs reduce-scatter-v operation for a 1-d pooled embeddings tensor of variable batch size per feature split unevenly into world size number of chunks. The result of the reduce operation gets scattered to all processes in the group. Args: input (Tensor): tensors to scatter, one per rank. batch_size_per_rank_per_feature (List[List[int]]): batch size per rank per feature used to determine input splits. embedding_dims (List[int]): embedding dimensions per feature used to determine input splits. group (Optional[dist.ProcessGroup]): The process group to work on. If None, the default process group will be used. codecs (Optional[QuantizedCommCodecs]): quantized communication codecs. Returns: Awaitable[Tensor]: async work handle (Awaitable), which can be `wait()` later to get the resulting tensor. .. warning:: `reduce_scatter_v_per_feature_pooled` is experimental and subject to change. |
9,008 | from dataclasses import dataclass, field
from typing import Any, List, Optional, Tuple, TypeVar
import torch
import torch.distributed as dist
from torch import Tensor
from torch.autograd import Function
from torch.autograd.profiler import record_function
from torchrec.distributed.types import Awaitable, NoWait, QuantizedCommCodecs
from torchrec.distributed.utils import none_throws
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
if not torch._running_with_deploy():
for ns, op_to_autograd_fn in zip(
["c10d_functional", "_c10d_functional"],
[c10d_functional_autograd_ops, c10d_functional_autograd_ops_native_funcols],
):
c10_lib_impl = torch.library.Library(ns, "IMPL")
backend = "Autograd"
for op_name, fn in op_to_autograd_fn:
if not torch._C._dispatch_has_kernel_for_dispatch_key(
f"{ns}::{op_name}", backend
):
c10_lib_impl.impl(op_name, fn, backend)
def _recat_pooled_embedding_grad_out(
grad_output: Tensor, num_features_per_rank: List[int]
) -> Tensor:
grad_outputs_by_rank = grad_output.split(num_features_per_rank, dim=1)
return torch.cat(
[
grad_output_by_rank.contiguous().view(-1)
for grad_output_by_rank in grad_outputs_by_rank
],
dim=0,
) | null |
9,009 | from dataclasses import dataclass, field
from typing import Any, List, Optional, Tuple, TypeVar
import torch
import torch.distributed as dist
from torch import Tensor
from torch.autograd import Function
from torch.autograd.profiler import record_function
from torchrec.distributed.types import Awaitable, NoWait, QuantizedCommCodecs
from torchrec.distributed.utils import none_throws
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
if not torch._running_with_deploy():
for ns, op_to_autograd_fn in zip(
["c10d_functional", "_c10d_functional"],
[c10d_functional_autograd_ops, c10d_functional_autograd_ops_native_funcols],
):
c10_lib_impl = torch.library.Library(ns, "IMPL")
backend = "Autograd"
for op_name, fn in op_to_autograd_fn:
if not torch._C._dispatch_has_kernel_for_dispatch_key(
f"{ns}::{op_name}", backend
):
c10_lib_impl.impl(op_name, fn, backend)
def _recat_seq_embedding(
input_embeddings: Tensor,
split_sizes: List[int],
T_local: int,
my_size: int,
forward: bool,
) -> Tensor:
seq_embeddings_by_rank = input_embeddings.split(split_sizes)
if forward:
return torch.cat(
[
seq_embeddings_by_rank[t * my_size + i]
# .contiguous().view(-1)
for i in range(my_size)
for t in range(T_local)
],
dim=0,
)
else:
return torch.cat(
[
seq_embeddings_by_rank[i * T_local + t]
# .contiguous()
# .view(-1)
for t in range(T_local)
for i in range(my_size)
],
dim=0,
) | null |
9,010 | from dataclasses import dataclass, field
from typing import Any, List, Optional, Tuple, TypeVar
import torch
import torch.distributed as dist
from torch import Tensor
from torch.autograd import Function
from torch.autograd.profiler import record_function
from torchrec.distributed.types import Awaitable, NoWait, QuantizedCommCodecs
from torchrec.distributed.utils import none_throws
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
class _All2AllSingle(torch.autograd.Function):
def forward(
ctx, # pyre-ignore
input: torch.Tensor,
output_split_sizes: List[int],
input_split_sizes: List[int],
tag: str,
ranks: List[int],
group_size: int,
) -> torch.Tensor:
def backward(ctx, grad_output):
if not torch._running_with_deploy():
for ns, op_to_autograd_fn in zip(
["c10d_functional", "_c10d_functional"],
[c10d_functional_autograd_ops, c10d_functional_autograd_ops_native_funcols],
):
c10_lib_impl = torch.library.Library(ns, "IMPL")
backend = "Autograd"
for op_name, fn in op_to_autograd_fn:
if not torch._C._dispatch_has_kernel_for_dispatch_key(
f"{ns}::{op_name}", backend
):
c10_lib_impl.impl(op_name, fn, backend)
def _all_to_all_single_autograd(
input: torch.Tensor,
output_split_sizes: List[int],
input_split_sizes: List[int],
tag: str,
ranks: List[int],
group_size: int,
) -> torch.Tensor:
return _All2AllSingle.apply(
input, output_split_sizes, input_split_sizes, tag, ranks, group_size
) | null |
9,011 | from dataclasses import dataclass, field
from typing import Any, List, Optional, Tuple, TypeVar
import torch
import torch.distributed as dist
from torch import Tensor
from torch.autograd import Function
from torch.autograd.profiler import record_function
from torchrec.distributed.types import Awaitable, NoWait, QuantizedCommCodecs
from torchrec.distributed.utils import none_throws
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
class _All2AllSingle_native_funcol(torch.autograd.Function):
def forward(
ctx, # pyre-ignore
input: torch.Tensor,
output_split_sizes: List[int],
input_split_sizes: List[int],
group_name: str,
) -> torch.Tensor:
def backward(ctx, grad_output):
if not torch._running_with_deploy():
for ns, op_to_autograd_fn in zip(
["c10d_functional", "_c10d_functional"],
[c10d_functional_autograd_ops, c10d_functional_autograd_ops_native_funcols],
):
c10_lib_impl = torch.library.Library(ns, "IMPL")
backend = "Autograd"
for op_name, fn in op_to_autograd_fn:
if not torch._C._dispatch_has_kernel_for_dispatch_key(
f"{ns}::{op_name}", backend
):
c10_lib_impl.impl(op_name, fn, backend)
def _all_to_all_single_autograd_native_funcol(
input: torch.Tensor,
output_split_sizes: List[int],
input_split_sizes: List[int],
group_name: str,
) -> torch.Tensor:
return _All2AllSingle_native_funcol.apply(
input, output_split_sizes, input_split_sizes, group_name
) | null |
9,012 | from dataclasses import dataclass, field
from typing import Any, List, Optional, Tuple, TypeVar
import torch
import torch.distributed as dist
from torch import Tensor
from torch.autograd import Function
from torch.autograd.profiler import record_function
from torchrec.distributed.types import Awaitable, NoWait, QuantizedCommCodecs
from torchrec.distributed.utils import none_throws
class _ReduceScatterTensor(torch.autograd.Function):
def forward(
ctx, # pyre-ignore
input: torch.Tensor,
reduceOp: str,
tag: str,
ranks: List[int],
group_size: int,
) -> torch.Tensor:
def backward(ctx, grad_output):
def _reduce_scatter_tensor_autograd(input, reduceOp, tag, ranks, group_size):
return _ReduceScatterTensor.apply(input, reduceOp, tag, ranks, group_size) | null |
9,013 | from dataclasses import dataclass, field
from typing import Any, List, Optional, Tuple, TypeVar
import torch
import torch.distributed as dist
from torch import Tensor
from torch.autograd import Function
from torch.autograd.profiler import record_function
from torchrec.distributed.types import Awaitable, NoWait, QuantizedCommCodecs
from torchrec.distributed.utils import none_throws
class _ReduceScatterTensor_native_funcol(torch.autograd.Function):
# pyre-ignore
def forward(
ctx, # pyre-ignore
input: torch.Tensor,
reduceOp: str,
group_size: int,
group_name: str,
) -> torch.Tensor:
input = input.contiguous()
ctx.group_size = group_size
ctx.group_name = group_name
with torch._C._AutoDispatchBelowAutograd():
ret = torch.ops._c10d_functional.reduce_scatter_tensor(
input,
reduceOp,
group_size,
group_name,
)
return ret
def backward(ctx, grad_output): # pyre-ignore
grad_output = grad_output.contiguous()
return (
torch.ops._c10d_functional.all_gather_into_tensor(
grad_output, ctx.group_size, ctx.group_name
),
None,
None,
None,
)
def _reduce_scatter_tensor_autograd_native_funcol(
# pyre-ignore
input,
# pyre-ignore
reduceOp,
# pyre-ignore
group_size,
# pyre-ignore
group_name,
):
return _ReduceScatterTensor_native_funcol.apply(
input, reduceOp, group_size, group_name
) | null |
9,014 | from dataclasses import dataclass, field
from typing import Any, List, Optional, Tuple, TypeVar
import torch
import torch.distributed as dist
from torch import Tensor
from torch.autograd import Function
from torch.autograd.profiler import record_function
from torchrec.distributed.types import Awaitable, NoWait, QuantizedCommCodecs
from torchrec.distributed.utils import none_throws
class _AllGatherIntoTensor(torch.autograd.Function):
def forward(
ctx, # pyre-ignore
shard: torch.Tensor,
tag: str,
ranks: List[int],
group_size: int,
) -> torch.Tensor:
def backward(ctx, grad_output):
def _all_gather_into_tensor_autograd(shard, tag, ranks, group_size):
return _AllGatherIntoTensor.apply(shard, tag, ranks, group_size) | null |
9,015 | from dataclasses import dataclass, field
from typing import Any, List, Optional, Tuple, TypeVar
import torch
import torch.distributed as dist
from torch import Tensor
from torch.autograd import Function
from torch.autograd.profiler import record_function
from torchrec.distributed.types import Awaitable, NoWait, QuantizedCommCodecs
from torchrec.distributed.utils import none_throws
class _AllGatherIntoTensor_native_funcols(torch.autograd.Function):
# pyre-ignore
def forward(
ctx, # pyre-ignore
shard: torch.Tensor,
group_size: int,
group_name: str,
) -> torch.Tensor:
shard = shard.contiguous()
ctx.group_size = group_size
ctx.group_name = group_name
with torch._C._AutoDispatchBelowAutograd():
ret = torch.ops._c10d_functional.all_gather_into_tensor(
shard, group_size, group_name
)
return ret
def backward(ctx, grad_output): # pyre-ignore
return (
torch.ops._c10d_functional.reduce_scatter_tensor(
grad_output, "sum", ctx.group_size, ctx.group_name
),
None,
None,
)
def _all_gather_into_tensor_autograd_native_funcol(shard, group_size, group_name):
return _AllGatherIntoTensor_native_funcols.apply(shard, group_size, group_name) | null |
9,016 | from dataclasses import dataclass, field
from typing import Any, List, Optional, Tuple, TypeVar
import torch
import torch.distributed as dist
from torch import Tensor
from torch.autograd import Function
from torch.autograd.profiler import record_function
from torchrec.distributed.types import Awaitable, NoWait, QuantizedCommCodecs
from torchrec.distributed.utils import none_throws
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
class _Wait(torch.autograd.Function):
# pyre-ignore
def forward(
ctx, # pyre-ignore
input: torch.Tensor,
) -> torch.Tensor:
with torch._C._AutoDispatchBelowAutograd():
ret = torch.ops.c10d_functional.wait_tensor(input)
return ret
def backward(ctx, grad_output): # pyre-ignore
return (grad_output,)
if not torch._running_with_deploy():
for ns, op_to_autograd_fn in zip(
["c10d_functional", "_c10d_functional"],
[c10d_functional_autograd_ops, c10d_functional_autograd_ops_native_funcols],
):
c10_lib_impl = torch.library.Library(ns, "IMPL")
backend = "Autograd"
for op_name, fn in op_to_autograd_fn:
if not torch._C._dispatch_has_kernel_for_dispatch_key(
f"{ns}::{op_name}", backend
):
c10_lib_impl.impl(op_name, fn, backend)
def _wait_autograd(input: torch.Tensor) -> torch.Tensor:
return _Wait.apply(input) | null |
9,017 | import abc
import copy
import itertools
from dataclasses import dataclass
from typing import (
Any,
cast,
Dict,
Generic,
Iterator,
List,
Optional,
Tuple,
TypeVar,
Union,
)
import torch
import torch.distributed as dist
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_training import (
ComputeDevice,
DenseTableBatchedEmbeddingBagsCodegen,
EmbeddingLocation,
PoolingMode,
SparseType,
SplitTableBatchedEmbeddingBagsCodegen,
)
from torch import nn
from torchrec.distributed.composable.table_batched_embedding_slice import (
TableBatchedEmbeddingSlice,
)
from torchrec.distributed.embedding_kernel import BaseEmbedding, get_state_dict
from torchrec.distributed.embedding_types import (
compute_kernel_to_embedding_location,
GroupedEmbeddingConfig,
)
from torchrec.distributed.types import (
Shard,
ShardedTensor,
ShardedTensorMetadata,
ShardMetadata,
TensorProperties,
)
from torchrec.distributed.utils import append_prefix
from torchrec.modules.embedding_configs import (
data_type_to_sparse_type,
pooling_type_to_pooling_mode,
)
from torchrec.optim.fused import (
EmptyFusedOptimizer,
FusedOptimizer,
FusedOptimizerModule,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
class EmbeddingFusedOptimizer(FusedOptimizer):
def __init__( # noqa C901
self,
config: GroupedEmbeddingConfig,
emb_module: SplitTableBatchedEmbeddingBagsCodegen,
pg: Optional[dist.ProcessGroup] = None,
create_for_table: Optional[str] = None,
param_weight_for_table: Optional[nn.Parameter] = None,
) -> None:
"""
Implementation of a FusedOptimizer. Designed as a base class Embedding kernels
create_for_table is an optional flag, which if passed in only creates the optimizer for a single table.
This optimizer shares data with the broader optimizer (one per embedding kernel)
and is used to share step and LR changes
"""
self._emb_module: SplitTableBatchedEmbeddingBagsCodegen = emb_module
self._pg = pg
class ShardParams:
optimizer_states: List[Optional[Tuple[torch.Tensor]]]
local_metadata: List[ShardMetadata]
embedding_weights: List[torch.Tensor]
def get_optimizer_rowwise_shard_metadata_and_global_metadata(
table_global_metadata: ShardedTensorMetadata,
optimizer_state: torch.Tensor,
sharding_dim: int,
) -> Tuple[Dict[ShardMetadata, ShardMetadata], ShardedTensorMetadata]:
table_global_shards_metadata: List[ShardMetadata] = (
table_global_metadata.shards_metadata
)
# column-wise sharding
# sort the metadata based on column offset and
# we construct the momentum tensor in row-wise sharded way
if sharding_dim == 1:
table_global_shards_metadata = sorted(
table_global_shards_metadata,
key=lambda shard: shard.shard_offsets[1],
)
table_shard_metadata_to_optimizer_shard_metadata = {}
for idx, table_shard_metadata in enumerate(table_global_shards_metadata):
offset = table_shard_metadata.shard_offsets[0]
# for column-wise sharding, we still create row-wise sharded metadata for optimizer
# manually create a row-wise offset
if sharding_dim == 1:
offset = idx * table_shard_metadata.shard_sizes[0]
table_shard_metadata_to_optimizer_shard_metadata[
table_shard_metadata
] = ShardMetadata(
shard_sizes=[table_shard_metadata.shard_sizes[0]],
shard_offsets=[offset],
placement=table_shard_metadata.placement,
)
tensor_properties = TensorProperties(
dtype=optimizer_state.dtype,
layout=optimizer_state.layout,
requires_grad=False,
)
len_rw_shards = (
len(table_shard_metadata_to_optimizer_shard_metadata)
if sharding_dim == 1
else 1
)
rowwise_optimizer_st_metadata = ShardedTensorMetadata(
shards_metadata=list(
table_shard_metadata_to_optimizer_shard_metadata.values()
),
size=torch.Size([table_global_metadata.size[0] * len_rw_shards]),
tensor_properties=tensor_properties,
)
return (
table_shard_metadata_to_optimizer_shard_metadata,
rowwise_optimizer_st_metadata,
)
def get_optimizer_pointwise_shard_metadata_and_global_metadata(
table_global_metadata: ShardedTensorMetadata,
optimizer_state: torch.Tensor,
) -> Tuple[Dict[ShardMetadata, ShardMetadata], ShardedTensorMetadata]:
table_global_shards_metadata: List[ShardMetadata] = (
table_global_metadata.shards_metadata
)
table_shard_metadata_to_optimizer_shard_metadata = {}
for table_shard_metadata in table_global_shards_metadata:
table_shard_metadata_to_optimizer_shard_metadata[
table_shard_metadata
] = ShardMetadata(
shard_sizes=table_shard_metadata.shard_sizes,
shard_offsets=table_shard_metadata.shard_offsets,
placement=table_shard_metadata.placement,
)
tensor_properties = TensorProperties(
dtype=optimizer_state.dtype,
layout=optimizer_state.layout,
requires_grad=False,
)
pointwise_optimizer_st_metadata = ShardedTensorMetadata(
shards_metadata=list(
table_shard_metadata_to_optimizer_shard_metadata.values()
),
size=table_global_metadata.size,
tensor_properties=tensor_properties,
)
return (
table_shard_metadata_to_optimizer_shard_metadata,
pointwise_optimizer_st_metadata,
)
# pyre-ignore [33]
state: Dict[Any, Any] = {}
param_group: Dict[str, Any] = {
"params": [],
"lr": emb_module.optimizer_args.learning_rate,
}
params: Dict[str, Union[torch.Tensor, ShardedTensor]] = {}
# Fused optimizers use buffers (they don't use autograd) and we want to make sure
# that state_dict look identical to no-fused version.
table_to_shard_params: Dict[str, ShardParams] = {}
embedding_weights_by_table = emb_module.split_embedding_weights()
all_optimizer_states = emb_module.get_optimizer_state()
optimizer_states_keys_by_table: Dict[str, List[torch.Tensor]] = {}
for (
table_config,
optimizer_states,
weight,
) in itertools.zip_longest(
config.embedding_tables,
all_optimizer_states,
embedding_weights_by_table,
):
# When EmbeddingFusedOptimizer is created for composability, only create state
if create_for_table is not None and create_for_table != table_config.name:
continue
if table_config.name not in table_to_shard_params:
table_to_shard_params[table_config.name] = ShardParams(
optimizer_states=[], local_metadata=[], embedding_weights=[]
)
optimizer_state_values = None
if optimizer_states:
optimizer_state_values = tuple(optimizer_states.values())
for optimizer_state_value in optimizer_state_values:
assert table_config.local_rows == optimizer_state_value.size(0)
optimizer_states_keys_by_table[table_config.name] = list(
optimizer_states.keys()
)
local_metadata = table_config.local_metadata
table_to_shard_params[table_config.name].optimizer_states.append(
optimizer_state_values
)
table_to_shard_params[table_config.name].local_metadata.append(
local_metadata
)
table_to_shard_params[table_config.name].embedding_weights.append(weight)
seen_tables = set()
for table_config in config.embedding_tables:
if create_for_table is not None and create_for_table != table_config.name:
continue
if table_config.name in seen_tables:
continue
seen_tables.add(table_config.name)
table_config_global_metadata: Optional[ShardedTensorMetadata] = (
copy.deepcopy(table_config.global_metadata)
)
shard_params: ShardParams = table_to_shard_params[table_config.name]
assert table_config_global_metadata is not None
if create_for_table is None:
local_weight_shards = []
for local_weight, local_metadata in zip(
shard_params.embedding_weights, shard_params.local_metadata
):
local_weight_shards.append(Shard(local_weight, local_metadata))
table_config_global_metadata.tensor_properties.dtype = (
local_weight.dtype
)
table_config_global_metadata.tensor_properties.requires_grad = (
local_weight.requires_grad
)
# TODO share this logic to create the same TableBatchedEmbeddingSlice in FusedModules below
weight = ShardedTensor._init_from_local_shards_and_global_metadata(
local_shards=local_weight_shards,
sharded_tensor_metadata=table_config_global_metadata,
process_group=self._pg,
)
param_key = table_config.name + ".weight"
else:
assert (
param_weight_for_table is not None
), "param_weight_for_table cannot be None when using create_for_table"
weight = param_weight_for_table
param_key = ""
state[weight] = {}
param_group["params"].append(weight)
params[param_key] = weight
# Setting optimizer states
sharding_dim: int = (
1 if table_config.local_cols != table_config.embedding_dim else 0
)
if all(
opt_state is not None for opt_state in shard_params.optimizer_states
):
# pyre-ignore
def get_sharded_optim_state(momentum_idx: int) -> ShardedTensor:
assert momentum_idx > 0
momentum_local_shards: List[Shard] = []
optimizer_sharded_tensor_metadata: ShardedTensorMetadata
is_rowwise_optimizer_state: bool = (
# pyre-ignore
shard_params.optimizer_states[0][momentum_idx - 1].dim()
== 1
)
if is_rowwise_optimizer_state:
(
table_shard_metadata_to_optimizer_shard_metadata,
optimizer_sharded_tensor_metadata,
) = get_optimizer_rowwise_shard_metadata_and_global_metadata(
table_config.global_metadata,
shard_params.optimizer_states[0][momentum_idx - 1],
sharding_dim,
)
else:
(
table_shard_metadata_to_optimizer_shard_metadata,
optimizer_sharded_tensor_metadata,
) = get_optimizer_pointwise_shard_metadata_and_global_metadata(
table_config.global_metadata,
shard_params.optimizer_states[0][momentum_idx - 1],
)
for optimizer_state, table_shard_local_metadata in zip(
shard_params.optimizer_states, shard_params.local_metadata
):
local_optimizer_shard_metadata = (
table_shard_metadata_to_optimizer_shard_metadata[
table_shard_local_metadata
]
)
momentum_local_shards.append(
Shard(
optimizer_state[momentum_idx - 1],
local_optimizer_shard_metadata,
)
)
# TODO we should be creating this in SPMD fashion (e.g. init_from_local_shards), and let it derive global metadata.
return ShardedTensor._init_from_local_shards_and_global_metadata(
local_shards=momentum_local_shards,
sharded_tensor_metadata=optimizer_sharded_tensor_metadata,
process_group=self._pg,
)
num_states: int = min(
# pyre-ignore
[len(opt_state) for opt_state in shard_params.optimizer_states]
)
optimizer_state_keys = []
if num_states > 0:
optimizer_state_keys = optimizer_states_keys_by_table[
table_config.name
]
for cur_state_idx in range(0, num_states):
if cur_state_idx == 0:
# for backward compatibility
cur_state_key = "momentum1"
else:
cur_state_key = optimizer_state_keys[cur_state_idx]
state[weight][f"{table_config.name}.{cur_state_key}"] = (
get_sharded_optim_state(cur_state_idx + 1)
)
super().__init__(params, state, [param_group])
def zero_grad(self, set_to_none: bool = False) -> None:
# pyre-ignore [16]
self._emb_module.set_learning_rate(self.param_groups[0]["lr"])
# pyre-ignore [2]
def step(self, closure: Any = None) -> None:
# pyre-ignore [16]
self._emb_module.set_learning_rate(self.param_groups[0]["lr"])
class TableBatchedEmbeddingSlice(nn.Parameter):
"""
Parameter to represent a slice of a table batched embedding. The slice will be
a view of the TBE of shape (num_embeddings, embedding_dim) and contain consistent .grad
unlike nn.Parameter, requires_grad is not present and follows requires_grad of TBE.data
Args:
data (torch.Tensor): original Data (of a TBE) to make a slice of
start_offset (int):
end_offset (int):
num_embeddings (int):
embedding_dim (int):
"""
__slots__ = [
"_original_tensor",
"_start_offset",
"_end_offset",
"_num_embeddings",
"_embedding_dim",
]
def __init__(
self,
data: torch.Tensor,
start_offset: int,
end_offset: int,
num_embeddings: int,
embedding_dim: int,
) -> None:
super().__init__()
self._original_tensor: torch.Tensor = data
self._start_offset: int = start_offset
self._end_offset: int = end_offset
self._num_embeddings: int = num_embeddings
self._embedding_dim: int = embedding_dim
self._init_grad: Optional[torch.Tensor] = None
if self._original_tensor.requires_grad:
self.retain_grad()
def __new__(
cls,
data: torch.Tensor,
start_offset: int,
end_offset: int,
num_embeddings: int,
embedding_dim: int,
) -> "TableBatchedEmbeddingSlice":
_slice = data[start_offset:end_offset].view(num_embeddings, embedding_dim)
return _slice.as_subclass(cls)
def __deepcopy__(
self, memo: Dict[int, "TableBatchedEmbeddingSlice"]
) -> "TableBatchedEmbeddingSlice":
if id(self) in memo:
return memo[id(self)]
else:
result = TableBatchedEmbeddingSlice(
self._original_tensor.clone(memory_format=torch.preserve_format),
self._start_offset,
self._end_offset,
self._num_embeddings,
self._embedding_dim,
)
memo[id(self)] = result
return result
def grad(self) -> Optional[torch.Tensor]:
if self._original_tensor.grad is None:
return self._init_grad
return self._original_tensor.grad[self._start_offset : self._end_offset].view(
self._num_embeddings, self._embedding_dim
)
def grad(self, set_grad: torch.Tensor) -> None:
self._init_grad = set_grad
if set_grad is None:
self._original_tensor.grad = None
elif self._original_tensor.grad is not None:
self._original_tensor.grad[self._start_offset : self._end_offset].copy_(
set_grad.view(-1)
)
def grad_fn(self) -> None:
# set as leaf node
return None
class GroupedEmbeddingConfig:
data_type: DataType
pooling: PoolingType
is_weighted: bool
has_feature_processor: bool
compute_kernel: EmbeddingComputeKernel
embedding_tables: List[ShardedEmbeddingTable]
fused_params: Optional[Dict[str, Any]] = None
def feature_hash_sizes(self) -> List[int]:
feature_hash_sizes = []
for table in self.embedding_tables:
feature_hash_sizes.extend(table.num_features() * [table.num_embeddings])
return feature_hash_sizes
def num_features(self) -> int:
num_features = 0
for table in self.embedding_tables:
num_features += table.num_features()
return num_features
def dim_sum(self) -> int:
dim_sum = 0
for table in self.embedding_tables:
dim_sum += table.num_features() * table.local_cols
return dim_sum
def table_names(self) -> List[str]:
table_names = []
for table in self.embedding_tables:
table_names.append(table.name)
return table_names
def feature_names(self) -> List[str]:
feature_names = []
for table in self.embedding_tables:
feature_names.extend(table.feature_names)
return feature_names
def embedding_dims(self) -> List[int]:
embedding_dims = []
for table in self.embedding_tables:
embedding_dims.extend([table.local_cols] * table.num_features())
return embedding_dims
def embedding_names(self) -> List[str]:
embedding_names = []
for table in self.embedding_tables:
embedding_names.extend(table.embedding_names)
return embedding_names
def embedding_shard_metadata(self) -> List[Optional[ShardMetadata]]:
embedding_shard_metadata: List[Optional[ShardMetadata]] = []
for table in self.embedding_tables:
for _ in table.feature_names:
embedding_shard_metadata.append(table.local_metadata)
return embedding_shard_metadata
def _gen_named_parameters_by_table_fused(
emb_module: SplitTableBatchedEmbeddingBagsCodegen,
table_name_to_count: Dict[str, int],
config: GroupedEmbeddingConfig,
pg: Optional[dist.ProcessGroup] = None,
) -> Iterator[Tuple[str, TableBatchedEmbeddingSlice]]:
# TODO: move logic to FBGEMM to avoid accessing fbgemm internals
for t_idx, (rows, dim, location, _) in enumerate(emb_module.embedding_specs):
table_name = config.embedding_tables[t_idx].name
if table_name not in table_name_to_count:
continue
table_count = table_name_to_count.pop(table_name)
if emb_module.weights_precision == SparseType.INT8:
dim += emb_module.int8_emb_row_dim_offset
offset = emb_module.weights_physical_offsets[t_idx]
weights: torch.Tensor
if location == EmbeddingLocation.DEVICE.value:
weights = emb_module.weights_dev
elif location == EmbeddingLocation.HOST.value:
weights = emb_module.weights_host
else:
weights = emb_module.weights_uvm
weight = TableBatchedEmbeddingSlice(
data=weights,
start_offset=offset,
end_offset=offset + table_count * rows * dim,
num_embeddings=-1,
embedding_dim=dim,
)
# this reuses logic in EmbeddingFusedOptimizer but is per table
# pyre-ignore
weight._in_backward_optimizers = [
EmbeddingFusedOptimizer(
config=config,
emb_module=emb_module,
pg=pg,
create_for_table=table_name,
param_weight_for_table=weight,
)
]
yield (table_name, weight) | null |
9,018 | import abc
import copy
import itertools
from dataclasses import dataclass
from typing import (
Any,
cast,
Dict,
Generic,
Iterator,
List,
Optional,
Tuple,
TypeVar,
Union,
)
import torch
import torch.distributed as dist
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_training import (
ComputeDevice,
DenseTableBatchedEmbeddingBagsCodegen,
EmbeddingLocation,
PoolingMode,
SparseType,
SplitTableBatchedEmbeddingBagsCodegen,
)
from torch import nn
from torchrec.distributed.composable.table_batched_embedding_slice import (
TableBatchedEmbeddingSlice,
)
from torchrec.distributed.embedding_kernel import BaseEmbedding, get_state_dict
from torchrec.distributed.embedding_types import (
compute_kernel_to_embedding_location,
GroupedEmbeddingConfig,
)
from torchrec.distributed.types import (
Shard,
ShardedTensor,
ShardedTensorMetadata,
ShardMetadata,
TensorProperties,
)
from torchrec.distributed.utils import append_prefix
from torchrec.modules.embedding_configs import (
data_type_to_sparse_type,
pooling_type_to_pooling_mode,
)
from torchrec.optim.fused import (
EmptyFusedOptimizer,
FusedOptimizer,
FusedOptimizerModule,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
class TableBatchedEmbeddingSlice(nn.Parameter):
def __init__(
self,
data: torch.Tensor,
start_offset: int,
end_offset: int,
num_embeddings: int,
embedding_dim: int,
) -> None:
def __new__(
cls,
data: torch.Tensor,
start_offset: int,
end_offset: int,
num_embeddings: int,
embedding_dim: int,
) -> "TableBatchedEmbeddingSlice":
def __deepcopy__(
self, memo: Dict[int, "TableBatchedEmbeddingSlice"]
) -> "TableBatchedEmbeddingSlice":
def grad(self) -> Optional[torch.Tensor]:
def grad(self, set_grad: torch.Tensor) -> None:
def grad_fn(self) -> None:
class GroupedEmbeddingConfig:
def feature_hash_sizes(self) -> List[int]:
def num_features(self) -> int:
def dim_sum(self) -> int:
def table_names(self) -> List[str]:
def feature_names(self) -> List[str]:
def embedding_dims(self) -> List[int]:
def embedding_names(self) -> List[str]:
def embedding_shard_metadata(self) -> List[Optional[ShardMetadata]]:
def _gen_named_parameters_by_table_dense(
emb_module: DenseTableBatchedEmbeddingBagsCodegen,
table_name_to_count: Dict[str, int],
config: GroupedEmbeddingConfig,
) -> Iterator[Tuple[str, TableBatchedEmbeddingSlice]]:
# TODO: move logic to FBGEMM to avoid accessing fbgemm internals
for t_idx, (rows, dim) in enumerate(emb_module.embedding_specs):
table_name = config.embedding_tables[t_idx].name
if table_name not in table_name_to_count:
continue
table_count = table_name_to_count.pop(table_name)
offset = emb_module.weights_physical_offsets[t_idx]
weight = TableBatchedEmbeddingSlice(
data=emb_module.weights,
start_offset=offset,
end_offset=offset + table_count * rows * dim,
num_embeddings=-1,
embedding_dim=dim,
)
yield (table_name, weight) | null |
9,019 | from typing import Callable, Dict, List, Optional, Type, Union
import torch
import torch.distributed as dist
from torch import nn
from torch.distributed._composable.contract import contract
from torchrec.distributed.comm import get_local_size
from torchrec.distributed.model_parallel import get_default_sharders
from torchrec.distributed.planner import EmbeddingShardingPlanner, Topology
from torchrec.distributed.sharding_plan import (
get_module_to_default_sharders,
ParameterShardingGenerator,
)
from torchrec.distributed.types import (
ModuleSharder,
ModuleShardingPlan,
ShardingEnv,
ShardingPlan,
)
from torchrec.distributed.utils import init_parameters
def _shard_modules( # noqa: C901
module: nn.Module,
env: Optional[ShardingEnv] = None,
device: Optional[torch.device] = None,
plan: Optional[ShardingPlan] = None,
sharders: Optional[List[ModuleSharder[torch.nn.Module]]] = None,
init_params: Optional[bool] = False,
) -> nn.Module:
"""
See shard_modules
"""
torch._C._log_api_usage_once("torchrec.distributed.shard_modules")
if sharders is None:
sharders = get_default_sharders()
if env is None:
pg = dist.GroupMember.WORLD
assert pg is not None, "Process group is not initialized"
env = ShardingEnv.from_process_group(pg)
if device is None:
if torch.cuda.is_available():
device = torch.device(torch.cuda.current_device())
else:
device = torch.device("cpu")
sharder_map: Dict[Type[nn.Module], ModuleSharder[nn.Module]] = {
sharder.module_type: sharder for sharder in sharders
}
if plan is None:
planner = EmbeddingShardingPlanner(
topology=Topology(
local_world_size=get_local_size(env.world_size),
world_size=env.world_size,
compute_device=device.type,
)
)
pg = env.process_group
if pg is not None:
plan = planner.collective_plan(module, sharders, pg)
else:
plan = planner.plan(module, sharders)
if type(module) in sharder_map:
# If the top level module is itself a shardable module, return the sharded variant.
# Note, we cannot do an inplace replacement in this case.
return sharder_map[type(module)].shard(
module, plan.get_plan_for_module(""), env, device
)
def _replace(_model: nn.Module, path: str = "") -> None:
for child_name, child in _model.named_children():
child_path = _join_module_path(path, child_name)
if type(child) in sharder_map:
assert plan is not None
sharded_params = plan.get_plan_for_module(child_path)
if sharded_params is not None:
sharded_module = sharder_map[type(child)].shard(
child, sharded_params, env, device
)
_model.register_module(
child_name,
sharded_module,
)
else:
_replace(child, child_path)
_replace(module)
if init_params and device is not None and device.type != "meta":
init_parameters(module, device)
module = module.to(device)
return module
class ShardingPlan:
"""
Representation of sharding plan. This uses the FQN of the larger wrapped model (i.e the model that is wrapped using `DistributedModelParallel`)
EmbeddingModuleShardingPlan should be used when TorchRec composability is desired.
Attributes:
plan (Dict[str, EmbeddingModuleShardingPlan]): dict keyed by module path of
dict of parameter sharding specs keyed by parameter name.
"""
plan: Dict[str, ModuleShardingPlan]
def get_plan_for_module(self, module_path: str) -> Optional[ModuleShardingPlan]:
"""
Args:
module_path (str):
Returns:
Optional[ModuleShardingPlan]: dict of parameter sharding specs keyed by parameter name. None if sharding specs do not exist for given module_path.
"""
return self.plan.get(module_path, None)
def __str__(self) -> str:
out = ""
for i, (module_path, module_plan) in enumerate(self.plan.items()):
if i > 0:
out += "\n\n"
out += "module: " + module_path
out += str(module_plan)
return out
class ShardingEnv:
"""
Provides an abstraction over `torch.distributed.ProcessGroup`, which practically
enables `DistributedModelParallel` to be used during inference.
"""
def __init__(
self,
world_size: int,
rank: int,
pg: Optional[dist.ProcessGroup] = None,
) -> None:
self.world_size = world_size
self.rank = rank
self.process_group: Optional[dist.ProcessGroup] = pg
def from_process_group(cls, pg: dist.ProcessGroup) -> "ShardingEnv":
"""
Creates ProcessGroup-based sharding environment.
NOTE:
Typically used during training.
"""
return cls(dist.get_world_size(pg), dist.get_rank(pg), pg)
def from_local(cls, world_size: int, rank: int) -> "ShardingEnv":
"""
Creates a local host-based sharding environment.
NOTE:
Typically used during single host inference.
"""
return cls(world_size, rank, None)
class ModuleSharder(abc.ABC, Generic[M]):
"""
`ModuleSharder` is per each module, which supports sharding,
e.g. `EmbeddingBagCollection`.
Args::
qcomm_codecs_registry (Optional[Dict[str, QuantizedCommCodecs]]) : Mapping of CommOp name to QuantizedCommCodecs
"""
def __init__(
self, qcomm_codecs_registry: Optional[Dict[str, QuantizedCommCodecs]] = None
) -> None:
torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}")
self._qcomm_codecs_registry = qcomm_codecs_registry
# pyre-ignore [3]
def shard(
self,
module: M,
params: EmbeddingModuleShardingPlan,
env: ShardingEnv,
device: Optional[torch.device] = None,
) -> ShardedModule[Any, Any, Any, Any]:
"""
Does the actual sharding. It will allocate parameters on the requested locations
as specified by corresponding ParameterSharding.
Default implementation is data-parallel replication.
Args:
module (M): module to shard.
params (EmbeddingModuleShardingPlan): dict of fully qualified parameter names
(module path + parameter name, '.'-separated) to its sharding spec.
env (ShardingEnv): sharding environment that has the process group.
device (torch.device): compute device.
Returns:
ShardedModule[Any, Any, Any]: sharded module implementation.
"""
...
def module_type(self) -> Type[M]: ...
def qcomm_codecs_registry(self) -> Optional[Dict[str, QuantizedCommCodecs]]:
return self._qcomm_codecs_registry
def shardable_parameters(self, module: M) -> Dict[str, nn.Parameter]:
"""
List of parameters that can be sharded.
"""
return dict(module.named_parameters())
def sharding_types(self, compute_device_type: str) -> List[str]:
"""
List of supported sharding types. See `ShardingType` for well-known examples.
"""
return [ShardingType.DATA_PARALLEL.value]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
"""
List of supported compute kernels for a given sharding type and compute device.
"""
return [ComputeKernel.DEFAULT.value]
def storage_usage(
self, tensor: torch.Tensor, compute_device_type: str, compute_kernel: str
) -> Dict[str, int]:
"""
List of system resources and corresponding usage given a compute device and
compute kernel.
"""
assert compute_device_type in {"cuda", "cpu", "mtia"}
storage_map = {
"cuda": ParameterStorage.HBM,
"cpu": ParameterStorage.DDR,
# TODO: Update it later. Setting for MTIA is same as CPU's for now.
"mtia": ParameterStorage.DDR,
}
return {storage_map[compute_device_type].value: get_tensor_size_bytes(tensor)}
The provided code snippet includes necessary dependencies for implementing the `shard_modules` function. Write a Python function `def shard_modules( module: nn.Module, env: Optional[ShardingEnv] = None, device: Optional[torch.device] = None, plan: Optional[ShardingPlan] = None, sharders: Optional[List[ModuleSharder[torch.nn.Module]]] = None, init_params: bool = False, ) -> nn.Module` to solve the following problem:
Replaces all sub_modules that are embedding modules with their sharded variants. This embedding_module -> sharded_embedding_module mapping is derived from the passed in sharders. This will leave the other parts of the model unaffected. It returns the original module Args: module (nn.Module): module to wrap. env (Optional[ShardingEnv]): sharding environment that has the process group. device (Optional[torch.device]): compute device, defaults to cpu. plan (Optional[ShardingPlan]): plan to use when sharding, defaults to `EmbeddingShardingPlanner.collective_plan()`. sharders (Optional[List[ModuleSharder[nn.Module]]]): `ModuleSharders` available to shard with, defaults to `get_default_sharders()`. init_params: (Optional[bool]): If ``True``, will materialize parameters and buffers that are on meta device, and will move module to ``device``. Note that this only applies if `device.type != "meta"``. Default: `False`. Example:: @torch.no_grad() def init_weights(m): if isinstance(m, nn.Linear): m.weight.fill_(1.0) elif isinstance(m, EmbeddingBagCollection): for param in m.parameters(): init.kaiming_normal_(param) m = MyModel(device='meta') m = shard(m) assert isinstance(m.embedding_bag_collection, ShardedEmbeddingBagCollection)
Here is the function:
def shard_modules(
module: nn.Module,
env: Optional[ShardingEnv] = None,
device: Optional[torch.device] = None,
plan: Optional[ShardingPlan] = None,
sharders: Optional[List[ModuleSharder[torch.nn.Module]]] = None,
init_params: bool = False,
) -> nn.Module:
"""
Replaces all sub_modules that are embedding modules with their sharded variants. This embedding_module -> sharded_embedding_module mapping
is derived from the passed in sharders.
This will leave the other parts of the model unaffected.
It returns the original module
Args:
module (nn.Module): module to wrap.
env (Optional[ShardingEnv]): sharding environment that has the process group.
device (Optional[torch.device]): compute device, defaults to cpu.
plan (Optional[ShardingPlan]): plan to use when sharding, defaults to
`EmbeddingShardingPlanner.collective_plan()`.
sharders (Optional[List[ModuleSharder[nn.Module]]]): `ModuleSharders` available
to shard with, defaults to `get_default_sharders()`.
init_params: (Optional[bool]): If ``True``, will materialize parameters and
buffers that are on meta device, and will move module to ``device``. Note that
this only applies if `device.type != "meta"``. Default: `False`.
Example::
@torch.no_grad()
def init_weights(m):
if isinstance(m, nn.Linear):
m.weight.fill_(1.0)
elif isinstance(m, EmbeddingBagCollection):
for param in m.parameters():
init.kaiming_normal_(param)
m = MyModel(device='meta')
m = shard(m)
assert isinstance(m.embedding_bag_collection, ShardedEmbeddingBagCollection)
"""
torch._C._log_api_usage_once("torchrec.distributed.shard_modules")
return _shard_modules(module, env, device, plan, sharders, init_params) | Replaces all sub_modules that are embedding modules with their sharded variants. This embedding_module -> sharded_embedding_module mapping is derived from the passed in sharders. This will leave the other parts of the model unaffected. It returns the original module Args: module (nn.Module): module to wrap. env (Optional[ShardingEnv]): sharding environment that has the process group. device (Optional[torch.device]): compute device, defaults to cpu. plan (Optional[ShardingPlan]): plan to use when sharding, defaults to `EmbeddingShardingPlanner.collective_plan()`. sharders (Optional[List[ModuleSharder[nn.Module]]]): `ModuleSharders` available to shard with, defaults to `get_default_sharders()`. init_params: (Optional[bool]): If ``True``, will materialize parameters and buffers that are on meta device, and will move module to ``device``. Note that this only applies if `device.type != "meta"``. Default: `False`. Example:: @torch.no_grad() def init_weights(m): if isinstance(m, nn.Linear): m.weight.fill_(1.0) elif isinstance(m, EmbeddingBagCollection): for param in m.parameters(): init.kaiming_normal_(param) m = MyModel(device='meta') m = shard(m) assert isinstance(m.embedding_bag_collection, ShardedEmbeddingBagCollection) |
9,020 | import math
import warnings
from typing import Callable, cast, Dict, List, Optional, Tuple, Type
import torch
from torch import distributed as dist, nn
from torchrec.distributed.comm import get_local_size
from torchrec.distributed.embedding import EmbeddingCollectionSharder
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder
from torchrec.distributed.fp_embeddingbag import (
FeatureProcessedEmbeddingBagCollectionSharder,
)
from torchrec.distributed.fused_embeddingbag import FusedEmbeddingBagCollectionSharder
from torchrec.distributed.mc_embedding import ManagedCollisionEmbeddingCollectionSharder
from torchrec.distributed.mc_embeddingbag import (
ManagedCollisionEmbeddingBagCollectionSharder,
)
from torchrec.distributed.planner.constants import MIN_CW_DIM
from torchrec.distributed.quant_embedding import QuantEmbeddingCollectionSharder
from torchrec.distributed.quant_embeddingbag import QuantEmbeddingBagCollectionSharder
from torchrec.distributed.types import (
EmbeddingModuleShardingPlan,
EnumerableShardingSpec,
ModuleSharder,
ParameterSharding,
ShardingType,
ShardMetadata,
)
from torchrec.distributed.utils import none_throws
def _get_parameter_size_offsets(
param: torch.nn.Parameter,
sharding_type: ShardingType,
local_size: int,
world_size: int,
col_wise_shard_dim: Optional[int] = None,
) -> List[Tuple[List[int], List[int]]]:
(
shard_sizes,
shard_offsets,
) = calculate_shard_sizes_and_offsets(
tensor=none_throws(param),
world_size=world_size,
local_world_size=local_size,
sharding_type=sharding_type.value,
col_wise_shard_dim=col_wise_shard_dim,
)
return list(zip(shard_sizes, shard_offsets))
def _get_parameter_sharding(
param: nn.Parameter,
sharding_type: str,
size_offset_ranks: List[Tuple[List[int], List[int], int]],
local_size: int,
device_type: str,
sharder: ModuleSharder[nn.Module],
placements: Optional[List[str]] = None,
compute_kernel: Optional[str] = None,
) -> ParameterSharding:
return ParameterSharding(
sharding_spec=(
None
if sharding_type == ShardingType.DATA_PARALLEL.value
else EnumerableShardingSpec(
[
ShardMetadata(
shard_sizes=size,
shard_offsets=offset,
placement=(
placement(
device_type,
none_throws(rank),
none_throws(local_size),
)
if not device_placement
else device_placement
),
)
for (size, offset, rank), device_placement in zip(
size_offset_ranks,
placements if placements else [None] * len(size_offset_ranks),
)
]
)
),
sharding_type=sharding_type,
compute_kernel=(
compute_kernel
if compute_kernel
else _get_compute_kernel(sharder, param, sharding_type, device_type)
),
ranks=[rank for (_, _, rank) in size_offset_ranks],
)
ParameterShardingGenerator = Callable[
[
nn.Parameter,
int,
int,
str,
ModuleSharder[nn.Module],
],
ParameterSharding,
]
class ShardingType(Enum):
"""
Well-known sharding types, used by inter-module optimizations.
"""
# Replicated on all ranks
DATA_PARALLEL = "data_parallel"
# Placed on a single rank
TABLE_WISE = "table_wise"
# Placed on multiple ranks as different sharded tables
COLUMN_WISE = "column_wise"
# Range-split on the first dimension across all ranks
ROW_WISE = "row_wise"
# Row-wise on the same node and table-wise across nodes
# Useful when having multiple ranks per node
# and comms within a single node are more efficient than across nodes.
TABLE_ROW_WISE = "table_row_wise"
# Column-wise on the same node and table-wise across nodes
TABLE_COLUMN_WISE = "table_column_wise"
class ParameterSharding:
"""
Describes the sharding of the parameter.
sharding_type (str): how this parameter is sharded. See ShardingType for well-known
types.
compute_kernel (str): compute kernel to be used by this parameter.
ranks (Optional[List[int]]): rank of each shard.
sharding_spec (Optional[ShardingSpec]): list of ShardMetadata for each shard.
cache_params (Optional[CacheParams]): cache params for embedding lookup.
enforce_hbm (Optional[bool]): whether to use HBM.
stochastic_rounding (Optional[bool]): whether to use stochastic rounding.
bounds_check_mode (Optional[BoundsCheckMode]): bounds check mode.
NOTE:
ShardingType.TABLE_WISE - rank where this embedding is placed
ShardingType.COLUMN_WISE - rank where the embedding shards are placed, seen as
individual tables
ShardingType.TABLE_ROW_WISE - first rank when this embedding is placed
ShardingType.ROW_WISE, ShardingType.DATA_PARALLEL - unused
"""
sharding_type: str
compute_kernel: str
ranks: Optional[List[int]] = None
sharding_spec: Optional[ShardingSpec] = None
cache_params: Optional[CacheParams] = None
enforce_hbm: Optional[bool] = None
stochastic_rounding: Optional[bool] = None
bounds_check_mode: Optional[BoundsCheckMode] = None
class ModuleSharder(abc.ABC, Generic[M]):
"""
`ModuleSharder` is per each module, which supports sharding,
e.g. `EmbeddingBagCollection`.
Args::
qcomm_codecs_registry (Optional[Dict[str, QuantizedCommCodecs]]) : Mapping of CommOp name to QuantizedCommCodecs
"""
def __init__(
self, qcomm_codecs_registry: Optional[Dict[str, QuantizedCommCodecs]] = None
) -> None:
torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}")
self._qcomm_codecs_registry = qcomm_codecs_registry
# pyre-ignore [3]
def shard(
self,
module: M,
params: EmbeddingModuleShardingPlan,
env: ShardingEnv,
device: Optional[torch.device] = None,
) -> ShardedModule[Any, Any, Any, Any]:
"""
Does the actual sharding. It will allocate parameters on the requested locations
as specified by corresponding ParameterSharding.
Default implementation is data-parallel replication.
Args:
module (M): module to shard.
params (EmbeddingModuleShardingPlan): dict of fully qualified parameter names
(module path + parameter name, '.'-separated) to its sharding spec.
env (ShardingEnv): sharding environment that has the process group.
device (torch.device): compute device.
Returns:
ShardedModule[Any, Any, Any]: sharded module implementation.
"""
...
def module_type(self) -> Type[M]: ...
def qcomm_codecs_registry(self) -> Optional[Dict[str, QuantizedCommCodecs]]:
return self._qcomm_codecs_registry
def shardable_parameters(self, module: M) -> Dict[str, nn.Parameter]:
"""
List of parameters that can be sharded.
"""
return dict(module.named_parameters())
def sharding_types(self, compute_device_type: str) -> List[str]:
"""
List of supported sharding types. See `ShardingType` for well-known examples.
"""
return [ShardingType.DATA_PARALLEL.value]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
"""
List of supported compute kernels for a given sharding type and compute device.
"""
return [ComputeKernel.DEFAULT.value]
def storage_usage(
self, tensor: torch.Tensor, compute_device_type: str, compute_kernel: str
) -> Dict[str, int]:
"""
List of system resources and corresponding usage given a compute device and
compute kernel.
"""
assert compute_device_type in {"cuda", "cpu", "mtia"}
storage_map = {
"cuda": ParameterStorage.HBM,
"cpu": ParameterStorage.DDR,
# TODO: Update it later. Setting for MTIA is same as CPU's for now.
"mtia": ParameterStorage.DDR,
}
return {storage_map[compute_device_type].value: get_tensor_size_bytes(tensor)}
The provided code snippet includes necessary dependencies for implementing the `data_parallel` function. Write a Python function `def data_parallel() -> ParameterShardingGenerator` to solve the following problem:
Returns a generator of ParameterShardingPlan for `ShardingType::DATA_PARALLEL` for construct_module_sharding_plan. Example:: ebc = EmbeddingBagCollection(...) plan = construct_module_sharding_plan( ebc, { "table_0": data_parallel(), }, )
Here is the function:
def data_parallel() -> ParameterShardingGenerator:
"""
Returns a generator of ParameterShardingPlan for `ShardingType::DATA_PARALLEL` for construct_module_sharding_plan.
Example::
ebc = EmbeddingBagCollection(...)
plan = construct_module_sharding_plan(
ebc,
{
"table_0": data_parallel(),
},
)
"""
def _parameter_sharding_generator(
param: nn.Parameter,
local_size: int,
world_size: int,
device_type: str,
sharder: ModuleSharder[nn.Module],
) -> ParameterSharding:
size_and_offsets = _get_parameter_size_offsets(
param,
ShardingType.DATA_PARALLEL,
local_size,
world_size,
)
size_offset_ranks = []
assert len(size_and_offsets) == world_size
for (size, offset), rank in zip(size_and_offsets, range(world_size)):
size_offset_ranks.append((size, offset, rank))
return _get_parameter_sharding(
param,
ShardingType.DATA_PARALLEL.value,
size_offset_ranks,
local_size,
device_type,
sharder,
)
return _parameter_sharding_generator | Returns a generator of ParameterShardingPlan for `ShardingType::DATA_PARALLEL` for construct_module_sharding_plan. Example:: ebc = EmbeddingBagCollection(...) plan = construct_module_sharding_plan( ebc, { "table_0": data_parallel(), }, ) |
9,021 | import math
import warnings
from typing import Callable, cast, Dict, List, Optional, Tuple, Type
import torch
from torch import distributed as dist, nn
from torchrec.distributed.comm import get_local_size
from torchrec.distributed.embedding import EmbeddingCollectionSharder
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder
from torchrec.distributed.fp_embeddingbag import (
FeatureProcessedEmbeddingBagCollectionSharder,
)
from torchrec.distributed.fused_embeddingbag import FusedEmbeddingBagCollectionSharder
from torchrec.distributed.mc_embedding import ManagedCollisionEmbeddingCollectionSharder
from torchrec.distributed.mc_embeddingbag import (
ManagedCollisionEmbeddingBagCollectionSharder,
)
from torchrec.distributed.planner.constants import MIN_CW_DIM
from torchrec.distributed.quant_embedding import QuantEmbeddingCollectionSharder
from torchrec.distributed.quant_embeddingbag import QuantEmbeddingBagCollectionSharder
from torchrec.distributed.types import (
EmbeddingModuleShardingPlan,
EnumerableShardingSpec,
ModuleSharder,
ParameterSharding,
ShardingType,
ShardMetadata,
)
from torchrec.distributed.utils import none_throws
def _get_parameter_size_offsets(
param: torch.nn.Parameter,
sharding_type: ShardingType,
local_size: int,
world_size: int,
col_wise_shard_dim: Optional[int] = None,
) -> List[Tuple[List[int], List[int]]]:
(
shard_sizes,
shard_offsets,
) = calculate_shard_sizes_and_offsets(
tensor=none_throws(param),
world_size=world_size,
local_world_size=local_size,
sharding_type=sharding_type.value,
col_wise_shard_dim=col_wise_shard_dim,
)
return list(zip(shard_sizes, shard_offsets))
def _get_parameter_sharding(
param: nn.Parameter,
sharding_type: str,
size_offset_ranks: List[Tuple[List[int], List[int], int]],
local_size: int,
device_type: str,
sharder: ModuleSharder[nn.Module],
placements: Optional[List[str]] = None,
compute_kernel: Optional[str] = None,
) -> ParameterSharding:
return ParameterSharding(
sharding_spec=(
None
if sharding_type == ShardingType.DATA_PARALLEL.value
else EnumerableShardingSpec(
[
ShardMetadata(
shard_sizes=size,
shard_offsets=offset,
placement=(
placement(
device_type,
none_throws(rank),
none_throws(local_size),
)
if not device_placement
else device_placement
),
)
for (size, offset, rank), device_placement in zip(
size_offset_ranks,
placements if placements else [None] * len(size_offset_ranks),
)
]
)
),
sharding_type=sharding_type,
compute_kernel=(
compute_kernel
if compute_kernel
else _get_compute_kernel(sharder, param, sharding_type, device_type)
),
ranks=[rank for (_, _, rank) in size_offset_ranks],
)
ParameterShardingGenerator = Callable[
[
nn.Parameter,
int,
int,
str,
ModuleSharder[nn.Module],
],
ParameterSharding,
]
class ShardingType(Enum):
"""
Well-known sharding types, used by inter-module optimizations.
"""
# Replicated on all ranks
DATA_PARALLEL = "data_parallel"
# Placed on a single rank
TABLE_WISE = "table_wise"
# Placed on multiple ranks as different sharded tables
COLUMN_WISE = "column_wise"
# Range-split on the first dimension across all ranks
ROW_WISE = "row_wise"
# Row-wise on the same node and table-wise across nodes
# Useful when having multiple ranks per node
# and comms within a single node are more efficient than across nodes.
TABLE_ROW_WISE = "table_row_wise"
# Column-wise on the same node and table-wise across nodes
TABLE_COLUMN_WISE = "table_column_wise"
class ParameterSharding:
"""
Describes the sharding of the parameter.
sharding_type (str): how this parameter is sharded. See ShardingType for well-known
types.
compute_kernel (str): compute kernel to be used by this parameter.
ranks (Optional[List[int]]): rank of each shard.
sharding_spec (Optional[ShardingSpec]): list of ShardMetadata for each shard.
cache_params (Optional[CacheParams]): cache params for embedding lookup.
enforce_hbm (Optional[bool]): whether to use HBM.
stochastic_rounding (Optional[bool]): whether to use stochastic rounding.
bounds_check_mode (Optional[BoundsCheckMode]): bounds check mode.
NOTE:
ShardingType.TABLE_WISE - rank where this embedding is placed
ShardingType.COLUMN_WISE - rank where the embedding shards are placed, seen as
individual tables
ShardingType.TABLE_ROW_WISE - first rank when this embedding is placed
ShardingType.ROW_WISE, ShardingType.DATA_PARALLEL - unused
"""
sharding_type: str
compute_kernel: str
ranks: Optional[List[int]] = None
sharding_spec: Optional[ShardingSpec] = None
cache_params: Optional[CacheParams] = None
enforce_hbm: Optional[bool] = None
stochastic_rounding: Optional[bool] = None
bounds_check_mode: Optional[BoundsCheckMode] = None
class ModuleSharder(abc.ABC, Generic[M]):
"""
`ModuleSharder` is per each module, which supports sharding,
e.g. `EmbeddingBagCollection`.
Args::
qcomm_codecs_registry (Optional[Dict[str, QuantizedCommCodecs]]) : Mapping of CommOp name to QuantizedCommCodecs
"""
def __init__(
self, qcomm_codecs_registry: Optional[Dict[str, QuantizedCommCodecs]] = None
) -> None:
torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}")
self._qcomm_codecs_registry = qcomm_codecs_registry
# pyre-ignore [3]
def shard(
self,
module: M,
params: EmbeddingModuleShardingPlan,
env: ShardingEnv,
device: Optional[torch.device] = None,
) -> ShardedModule[Any, Any, Any, Any]:
"""
Does the actual sharding. It will allocate parameters on the requested locations
as specified by corresponding ParameterSharding.
Default implementation is data-parallel replication.
Args:
module (M): module to shard.
params (EmbeddingModuleShardingPlan): dict of fully qualified parameter names
(module path + parameter name, '.'-separated) to its sharding spec.
env (ShardingEnv): sharding environment that has the process group.
device (torch.device): compute device.
Returns:
ShardedModule[Any, Any, Any]: sharded module implementation.
"""
...
def module_type(self) -> Type[M]: ...
def qcomm_codecs_registry(self) -> Optional[Dict[str, QuantizedCommCodecs]]:
return self._qcomm_codecs_registry
def shardable_parameters(self, module: M) -> Dict[str, nn.Parameter]:
"""
List of parameters that can be sharded.
"""
return dict(module.named_parameters())
def sharding_types(self, compute_device_type: str) -> List[str]:
"""
List of supported sharding types. See `ShardingType` for well-known examples.
"""
return [ShardingType.DATA_PARALLEL.value]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
"""
List of supported compute kernels for a given sharding type and compute device.
"""
return [ComputeKernel.DEFAULT.value]
def storage_usage(
self, tensor: torch.Tensor, compute_device_type: str, compute_kernel: str
) -> Dict[str, int]:
"""
List of system resources and corresponding usage given a compute device and
compute kernel.
"""
assert compute_device_type in {"cuda", "cpu", "mtia"}
storage_map = {
"cuda": ParameterStorage.HBM,
"cpu": ParameterStorage.DDR,
# TODO: Update it later. Setting for MTIA is same as CPU's for now.
"mtia": ParameterStorage.DDR,
}
return {storage_map[compute_device_type].value: get_tensor_size_bytes(tensor)}
The provided code snippet includes necessary dependencies for implementing the `table_wise` function. Write a Python function `def table_wise( rank: int, ) -> ParameterShardingGenerator` to solve the following problem:
Returns a generator of ParameterShardingPlan for `ShardingType::TABLE_WISE` for construct_module_sharding_plan. Args: rank (int): rank to place table when doing table wise Example:: ebc = EmbeddingBagCollection(...) plan = construct_module_sharding_plan( ebc, { "table_0": table_wise(rank=0), }, )
Here is the function:
def table_wise(
rank: int,
) -> ParameterShardingGenerator:
"""
Returns a generator of ParameterShardingPlan for `ShardingType::TABLE_WISE` for construct_module_sharding_plan.
Args:
rank (int): rank to place table when doing table wise
Example::
ebc = EmbeddingBagCollection(...)
plan = construct_module_sharding_plan(
ebc,
{
"table_0": table_wise(rank=0),
},
)
"""
def _parameter_sharding_generator(
param: nn.Parameter,
local_size: int,
world_size: int,
device_type: str,
sharder: ModuleSharder[nn.Module],
) -> ParameterSharding:
size_and_offsets = _get_parameter_size_offsets(
param,
ShardingType.TABLE_WISE,
local_size,
world_size,
)
assert len(size_and_offsets) == 1
(size, offset) = size_and_offsets[0]
size_offset_ranks = [(size, offset, rank)]
return _get_parameter_sharding(
param,
ShardingType.TABLE_WISE.value,
size_offset_ranks,
local_size,
device_type,
sharder,
)
return _parameter_sharding_generator | Returns a generator of ParameterShardingPlan for `ShardingType::TABLE_WISE` for construct_module_sharding_plan. Args: rank (int): rank to place table when doing table wise Example:: ebc = EmbeddingBagCollection(...) plan = construct_module_sharding_plan( ebc, { "table_0": table_wise(rank=0), }, ) |
9,022 | import math
import warnings
from typing import Callable, cast, Dict, List, Optional, Tuple, Type
import torch
from torch import distributed as dist, nn
from torchrec.distributed.comm import get_local_size
from torchrec.distributed.embedding import EmbeddingCollectionSharder
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder
from torchrec.distributed.fp_embeddingbag import (
FeatureProcessedEmbeddingBagCollectionSharder,
)
from torchrec.distributed.fused_embeddingbag import FusedEmbeddingBagCollectionSharder
from torchrec.distributed.mc_embedding import ManagedCollisionEmbeddingCollectionSharder
from torchrec.distributed.mc_embeddingbag import (
ManagedCollisionEmbeddingBagCollectionSharder,
)
from torchrec.distributed.planner.constants import MIN_CW_DIM
from torchrec.distributed.quant_embedding import QuantEmbeddingCollectionSharder
from torchrec.distributed.quant_embeddingbag import QuantEmbeddingBagCollectionSharder
from torchrec.distributed.types import (
EmbeddingModuleShardingPlan,
EnumerableShardingSpec,
ModuleSharder,
ParameterSharding,
ShardingType,
ShardMetadata,
)
from torchrec.distributed.utils import none_throws
def _get_parameter_size_offsets(
param: torch.nn.Parameter,
sharding_type: ShardingType,
local_size: int,
world_size: int,
col_wise_shard_dim: Optional[int] = None,
) -> List[Tuple[List[int], List[int]]]:
(
shard_sizes,
shard_offsets,
) = calculate_shard_sizes_and_offsets(
tensor=none_throws(param),
world_size=world_size,
local_world_size=local_size,
sharding_type=sharding_type.value,
col_wise_shard_dim=col_wise_shard_dim,
)
return list(zip(shard_sizes, shard_offsets))
def _get_parameter_sharding(
param: nn.Parameter,
sharding_type: str,
size_offset_ranks: List[Tuple[List[int], List[int], int]],
local_size: int,
device_type: str,
sharder: ModuleSharder[nn.Module],
placements: Optional[List[str]] = None,
compute_kernel: Optional[str] = None,
) -> ParameterSharding:
return ParameterSharding(
sharding_spec=(
None
if sharding_type == ShardingType.DATA_PARALLEL.value
else EnumerableShardingSpec(
[
ShardMetadata(
shard_sizes=size,
shard_offsets=offset,
placement=(
placement(
device_type,
none_throws(rank),
none_throws(local_size),
)
if not device_placement
else device_placement
),
)
for (size, offset, rank), device_placement in zip(
size_offset_ranks,
placements if placements else [None] * len(size_offset_ranks),
)
]
)
),
sharding_type=sharding_type,
compute_kernel=(
compute_kernel
if compute_kernel
else _get_compute_kernel(sharder, param, sharding_type, device_type)
),
ranks=[rank for (_, _, rank) in size_offset_ranks],
)
ParameterShardingGenerator = Callable[
[
nn.Parameter,
int,
int,
str,
ModuleSharder[nn.Module],
],
ParameterSharding,
]
class EmbeddingComputeKernel(Enum):
DENSE = "dense"
FUSED = "fused"
FUSED_UVM = "fused_uvm"
FUSED_UVM_CACHING = "fused_uvm_caching"
QUANT = "quant"
QUANT_UVM = "quant_uvm"
QUANT_UVM_CACHING = "quant_uvm_caching"
class ShardingType(Enum):
"""
Well-known sharding types, used by inter-module optimizations.
"""
# Replicated on all ranks
DATA_PARALLEL = "data_parallel"
# Placed on a single rank
TABLE_WISE = "table_wise"
# Placed on multiple ranks as different sharded tables
COLUMN_WISE = "column_wise"
# Range-split on the first dimension across all ranks
ROW_WISE = "row_wise"
# Row-wise on the same node and table-wise across nodes
# Useful when having multiple ranks per node
# and comms within a single node are more efficient than across nodes.
TABLE_ROW_WISE = "table_row_wise"
# Column-wise on the same node and table-wise across nodes
TABLE_COLUMN_WISE = "table_column_wise"
class ParameterSharding:
"""
Describes the sharding of the parameter.
sharding_type (str): how this parameter is sharded. See ShardingType for well-known
types.
compute_kernel (str): compute kernel to be used by this parameter.
ranks (Optional[List[int]]): rank of each shard.
sharding_spec (Optional[ShardingSpec]): list of ShardMetadata for each shard.
cache_params (Optional[CacheParams]): cache params for embedding lookup.
enforce_hbm (Optional[bool]): whether to use HBM.
stochastic_rounding (Optional[bool]): whether to use stochastic rounding.
bounds_check_mode (Optional[BoundsCheckMode]): bounds check mode.
NOTE:
ShardingType.TABLE_WISE - rank where this embedding is placed
ShardingType.COLUMN_WISE - rank where the embedding shards are placed, seen as
individual tables
ShardingType.TABLE_ROW_WISE - first rank when this embedding is placed
ShardingType.ROW_WISE, ShardingType.DATA_PARALLEL - unused
"""
sharding_type: str
compute_kernel: str
ranks: Optional[List[int]] = None
sharding_spec: Optional[ShardingSpec] = None
cache_params: Optional[CacheParams] = None
enforce_hbm: Optional[bool] = None
stochastic_rounding: Optional[bool] = None
bounds_check_mode: Optional[BoundsCheckMode] = None
class ModuleSharder(abc.ABC, Generic[M]):
"""
`ModuleSharder` is per each module, which supports sharding,
e.g. `EmbeddingBagCollection`.
Args::
qcomm_codecs_registry (Optional[Dict[str, QuantizedCommCodecs]]) : Mapping of CommOp name to QuantizedCommCodecs
"""
def __init__(
self, qcomm_codecs_registry: Optional[Dict[str, QuantizedCommCodecs]] = None
) -> None:
torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}")
self._qcomm_codecs_registry = qcomm_codecs_registry
# pyre-ignore [3]
def shard(
self,
module: M,
params: EmbeddingModuleShardingPlan,
env: ShardingEnv,
device: Optional[torch.device] = None,
) -> ShardedModule[Any, Any, Any, Any]:
"""
Does the actual sharding. It will allocate parameters on the requested locations
as specified by corresponding ParameterSharding.
Default implementation is data-parallel replication.
Args:
module (M): module to shard.
params (EmbeddingModuleShardingPlan): dict of fully qualified parameter names
(module path + parameter name, '.'-separated) to its sharding spec.
env (ShardingEnv): sharding environment that has the process group.
device (torch.device): compute device.
Returns:
ShardedModule[Any, Any, Any]: sharded module implementation.
"""
...
def module_type(self) -> Type[M]: ...
def qcomm_codecs_registry(self) -> Optional[Dict[str, QuantizedCommCodecs]]:
return self._qcomm_codecs_registry
def shardable_parameters(self, module: M) -> Dict[str, nn.Parameter]:
"""
List of parameters that can be sharded.
"""
return dict(module.named_parameters())
def sharding_types(self, compute_device_type: str) -> List[str]:
"""
List of supported sharding types. See `ShardingType` for well-known examples.
"""
return [ShardingType.DATA_PARALLEL.value]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
"""
List of supported compute kernels for a given sharding type and compute device.
"""
return [ComputeKernel.DEFAULT.value]
def storage_usage(
self, tensor: torch.Tensor, compute_device_type: str, compute_kernel: str
) -> Dict[str, int]:
"""
List of system resources and corresponding usage given a compute device and
compute kernel.
"""
assert compute_device_type in {"cuda", "cpu", "mtia"}
storage_map = {
"cuda": ParameterStorage.HBM,
"cpu": ParameterStorage.DDR,
# TODO: Update it later. Setting for MTIA is same as CPU's for now.
"mtia": ParameterStorage.DDR,
}
return {storage_map[compute_device_type].value: get_tensor_size_bytes(tensor)}
The provided code snippet includes necessary dependencies for implementing the `row_wise` function. Write a Python function `def row_wise( sizes_placement: Optional[Tuple[List[int], str]] = None ) -> ParameterShardingGenerator` to solve the following problem:
Returns a generator of ParameterShardingPlan for `ShardingType::ROW_WISE` for construct_module_sharding_plan. Args: sizes_placement (Optional[Tuple[List[int], str]]): Only use it in inference for uneven shardinglist of tuples of (sizes, placement); sizes is the row size list Example:: ebc = EmbeddingBagCollection(...) plan = construct_module_sharding_plan( ebc, { "table_1": row_wise(), "table_2": row_wise([10, 5, 0, 3], "cpu") }, )
Here is the function:
def row_wise(
sizes_placement: Optional[Tuple[List[int], str]] = None
) -> ParameterShardingGenerator:
"""
Returns a generator of ParameterShardingPlan for `ShardingType::ROW_WISE` for construct_module_sharding_plan.
Args:
sizes_placement (Optional[Tuple[List[int], str]]): Only use it in inference for uneven shardinglist of tuples of (sizes, placement); sizes is the row size list
Example::
ebc = EmbeddingBagCollection(...)
plan = construct_module_sharding_plan(
ebc,
{
"table_1": row_wise(),
"table_2": row_wise([10, 5, 0, 3], "cpu")
},
)
"""
def _parameter_sharding_generator(
param: nn.Parameter,
local_size: int,
world_size: int,
device_type: str,
sharder: ModuleSharder[nn.Module],
) -> ParameterSharding:
if sizes_placement is None:
size_and_offsets = _get_parameter_size_offsets(
param,
ShardingType.ROW_WISE,
local_size,
world_size,
)
assert len(size_and_offsets) <= world_size
size_offset_ranks = []
for (size, offset), rank in zip(size_and_offsets, range(world_size)):
size_offset_ranks.append((size, offset, rank))
else:
size_offset_ranks = []
sizes = sizes_placement[0]
(rows, cols) = param.shape
cur_offset = 0
prev_offset = 0
for rank, size in enumerate(sizes):
per_rank_row = size
cur_offset += per_rank_row
cur_offset = min(cur_offset, rows)
per_rank_row = cur_offset - prev_offset
size_offset_ranks.append(([per_rank_row, cols], [prev_offset, 0], rank))
prev_offset = cur_offset
if cur_offset < rows:
raise ValueError(
f"Cannot fit tensor of {rows, cols} into sizes_ranks_placements = {sizes_placement}"
)
def placement_helper(device_type: str, index: int = 0) -> str:
if device_type == "cpu":
return f"rank:0/{device_type}" # cpu only use rank 0
return f"rank:{index}/{device_type}:{index}"
return _get_parameter_sharding(
param,
ShardingType.ROW_WISE.value,
size_offset_ranks,
local_size,
device_type,
sharder,
placements=(
[
placement_helper(sizes_placement[1], i)
for i in range(len(sizes_placement[0]))
]
if sizes_placement
else None
),
compute_kernel=(
EmbeddingComputeKernel.QUANT.value if sizes_placement else None
),
)
return _parameter_sharding_generator | Returns a generator of ParameterShardingPlan for `ShardingType::ROW_WISE` for construct_module_sharding_plan. Args: sizes_placement (Optional[Tuple[List[int], str]]): Only use it in inference for uneven shardinglist of tuples of (sizes, placement); sizes is the row size list Example:: ebc = EmbeddingBagCollection(...) plan = construct_module_sharding_plan( ebc, { "table_1": row_wise(), "table_2": row_wise([10, 5, 0, 3], "cpu") }, ) |
9,023 | import math
import warnings
from typing import Callable, cast, Dict, List, Optional, Tuple, Type
import torch
from torch import distributed as dist, nn
from torchrec.distributed.comm import get_local_size
from torchrec.distributed.embedding import EmbeddingCollectionSharder
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder
from torchrec.distributed.fp_embeddingbag import (
FeatureProcessedEmbeddingBagCollectionSharder,
)
from torchrec.distributed.fused_embeddingbag import FusedEmbeddingBagCollectionSharder
from torchrec.distributed.mc_embedding import ManagedCollisionEmbeddingCollectionSharder
from torchrec.distributed.mc_embeddingbag import (
ManagedCollisionEmbeddingBagCollectionSharder,
)
from torchrec.distributed.planner.constants import MIN_CW_DIM
from torchrec.distributed.quant_embedding import QuantEmbeddingCollectionSharder
from torchrec.distributed.quant_embeddingbag import QuantEmbeddingBagCollectionSharder
from torchrec.distributed.types import (
EmbeddingModuleShardingPlan,
EnumerableShardingSpec,
ModuleSharder,
ParameterSharding,
ShardingType,
ShardMetadata,
)
from torchrec.distributed.utils import none_throws
def _get_parameter_size_offsets(
param: torch.nn.Parameter,
sharding_type: ShardingType,
local_size: int,
world_size: int,
col_wise_shard_dim: Optional[int] = None,
) -> List[Tuple[List[int], List[int]]]:
(
shard_sizes,
shard_offsets,
) = calculate_shard_sizes_and_offsets(
tensor=none_throws(param),
world_size=world_size,
local_world_size=local_size,
sharding_type=sharding_type.value,
col_wise_shard_dim=col_wise_shard_dim,
)
return list(zip(shard_sizes, shard_offsets))
def _get_parameter_sharding(
param: nn.Parameter,
sharding_type: str,
size_offset_ranks: List[Tuple[List[int], List[int], int]],
local_size: int,
device_type: str,
sharder: ModuleSharder[nn.Module],
placements: Optional[List[str]] = None,
compute_kernel: Optional[str] = None,
) -> ParameterSharding:
return ParameterSharding(
sharding_spec=(
None
if sharding_type == ShardingType.DATA_PARALLEL.value
else EnumerableShardingSpec(
[
ShardMetadata(
shard_sizes=size,
shard_offsets=offset,
placement=(
placement(
device_type,
none_throws(rank),
none_throws(local_size),
)
if not device_placement
else device_placement
),
)
for (size, offset, rank), device_placement in zip(
size_offset_ranks,
placements if placements else [None] * len(size_offset_ranks),
)
]
)
),
sharding_type=sharding_type,
compute_kernel=(
compute_kernel
if compute_kernel
else _get_compute_kernel(sharder, param, sharding_type, device_type)
),
ranks=[rank for (_, _, rank) in size_offset_ranks],
)
ParameterShardingGenerator = Callable[
[
nn.Parameter,
int,
int,
str,
ModuleSharder[nn.Module],
],
ParameterSharding,
]
class ShardingType(Enum):
"""
Well-known sharding types, used by inter-module optimizations.
"""
# Replicated on all ranks
DATA_PARALLEL = "data_parallel"
# Placed on a single rank
TABLE_WISE = "table_wise"
# Placed on multiple ranks as different sharded tables
COLUMN_WISE = "column_wise"
# Range-split on the first dimension across all ranks
ROW_WISE = "row_wise"
# Row-wise on the same node and table-wise across nodes
# Useful when having multiple ranks per node
# and comms within a single node are more efficient than across nodes.
TABLE_ROW_WISE = "table_row_wise"
# Column-wise on the same node and table-wise across nodes
TABLE_COLUMN_WISE = "table_column_wise"
class ParameterSharding:
"""
Describes the sharding of the parameter.
sharding_type (str): how this parameter is sharded. See ShardingType for well-known
types.
compute_kernel (str): compute kernel to be used by this parameter.
ranks (Optional[List[int]]): rank of each shard.
sharding_spec (Optional[ShardingSpec]): list of ShardMetadata for each shard.
cache_params (Optional[CacheParams]): cache params for embedding lookup.
enforce_hbm (Optional[bool]): whether to use HBM.
stochastic_rounding (Optional[bool]): whether to use stochastic rounding.
bounds_check_mode (Optional[BoundsCheckMode]): bounds check mode.
NOTE:
ShardingType.TABLE_WISE - rank where this embedding is placed
ShardingType.COLUMN_WISE - rank where the embedding shards are placed, seen as
individual tables
ShardingType.TABLE_ROW_WISE - first rank when this embedding is placed
ShardingType.ROW_WISE, ShardingType.DATA_PARALLEL - unused
"""
sharding_type: str
compute_kernel: str
ranks: Optional[List[int]] = None
sharding_spec: Optional[ShardingSpec] = None
cache_params: Optional[CacheParams] = None
enforce_hbm: Optional[bool] = None
stochastic_rounding: Optional[bool] = None
bounds_check_mode: Optional[BoundsCheckMode] = None
class ModuleSharder(abc.ABC, Generic[M]):
"""
`ModuleSharder` is per each module, which supports sharding,
e.g. `EmbeddingBagCollection`.
Args::
qcomm_codecs_registry (Optional[Dict[str, QuantizedCommCodecs]]) : Mapping of CommOp name to QuantizedCommCodecs
"""
def __init__(
self, qcomm_codecs_registry: Optional[Dict[str, QuantizedCommCodecs]] = None
) -> None:
torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}")
self._qcomm_codecs_registry = qcomm_codecs_registry
# pyre-ignore [3]
def shard(
self,
module: M,
params: EmbeddingModuleShardingPlan,
env: ShardingEnv,
device: Optional[torch.device] = None,
) -> ShardedModule[Any, Any, Any, Any]:
"""
Does the actual sharding. It will allocate parameters on the requested locations
as specified by corresponding ParameterSharding.
Default implementation is data-parallel replication.
Args:
module (M): module to shard.
params (EmbeddingModuleShardingPlan): dict of fully qualified parameter names
(module path + parameter name, '.'-separated) to its sharding spec.
env (ShardingEnv): sharding environment that has the process group.
device (torch.device): compute device.
Returns:
ShardedModule[Any, Any, Any]: sharded module implementation.
"""
...
def module_type(self) -> Type[M]: ...
def qcomm_codecs_registry(self) -> Optional[Dict[str, QuantizedCommCodecs]]:
return self._qcomm_codecs_registry
def shardable_parameters(self, module: M) -> Dict[str, nn.Parameter]:
"""
List of parameters that can be sharded.
"""
return dict(module.named_parameters())
def sharding_types(self, compute_device_type: str) -> List[str]:
"""
List of supported sharding types. See `ShardingType` for well-known examples.
"""
return [ShardingType.DATA_PARALLEL.value]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
"""
List of supported compute kernels for a given sharding type and compute device.
"""
return [ComputeKernel.DEFAULT.value]
def storage_usage(
self, tensor: torch.Tensor, compute_device_type: str, compute_kernel: str
) -> Dict[str, int]:
"""
List of system resources and corresponding usage given a compute device and
compute kernel.
"""
assert compute_device_type in {"cuda", "cpu", "mtia"}
storage_map = {
"cuda": ParameterStorage.HBM,
"cpu": ParameterStorage.DDR,
# TODO: Update it later. Setting for MTIA is same as CPU's for now.
"mtia": ParameterStorage.DDR,
}
return {storage_map[compute_device_type].value: get_tensor_size_bytes(tensor)}
The provided code snippet includes necessary dependencies for implementing the `column_wise` function. Write a Python function `def column_wise( ranks: List[int], ) -> ParameterShardingGenerator` to solve the following problem:
Returns a generator of ParameterShardingPlan for `ShardingType::COLUMN_WISE` for construct_module_sharding_plan. Table will the sharded column-wise evenly across specified ranks (and can reuse ranks). Args: ranks (List[int]): ranks to place columns Example:: ebc = EmbeddingBagCollection(...) plan = construct_module_sharding_plan( ebc, { "table_3": column_wise(ranks=[0,1,2]), }, )
Here is the function:
def column_wise(
ranks: List[int],
) -> ParameterShardingGenerator:
"""
Returns a generator of ParameterShardingPlan for `ShardingType::COLUMN_WISE` for construct_module_sharding_plan.
Table will the sharded column-wise evenly across specified ranks (and can reuse ranks).
Args:
ranks (List[int]): ranks to place columns
Example::
ebc = EmbeddingBagCollection(...)
plan = construct_module_sharding_plan(
ebc,
{
"table_3": column_wise(ranks=[0,1,2]),
},
)
"""
def _parameter_sharding_generator(
param: nn.Parameter,
local_size: int,
world_size: int,
device_type: str,
sharder: ModuleSharder[nn.Module],
) -> ParameterSharding:
if param.shape[1] % len(ranks) != 0:
raise ValueError(
f"column dim of {param.shape[1]} cannot be evenly divided across {ranks}"
)
shard_dim = param.shape[1] // len(ranks)
size_and_offsets = _get_parameter_size_offsets(
param,
ShardingType.COLUMN_WISE,
local_size,
world_size,
col_wise_shard_dim=shard_dim,
)
size_offset_ranks = []
for (size, offset), rank in zip(size_and_offsets, ranks):
size_offset_ranks.append((size, offset, rank))
return _get_parameter_sharding(
param,
ShardingType.COLUMN_WISE.value,
size_offset_ranks,
local_size,
device_type,
sharder,
)
return _parameter_sharding_generator | Returns a generator of ParameterShardingPlan for `ShardingType::COLUMN_WISE` for construct_module_sharding_plan. Table will the sharded column-wise evenly across specified ranks (and can reuse ranks). Args: ranks (List[int]): ranks to place columns Example:: ebc = EmbeddingBagCollection(...) plan = construct_module_sharding_plan( ebc, { "table_3": column_wise(ranks=[0,1,2]), }, ) |
9,024 | import math
import warnings
from typing import Callable, cast, Dict, List, Optional, Tuple, Type
import torch
from torch import distributed as dist, nn
from torchrec.distributed.comm import get_local_size
from torchrec.distributed.embedding import EmbeddingCollectionSharder
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder
from torchrec.distributed.fp_embeddingbag import (
FeatureProcessedEmbeddingBagCollectionSharder,
)
from torchrec.distributed.fused_embeddingbag import FusedEmbeddingBagCollectionSharder
from torchrec.distributed.mc_embedding import ManagedCollisionEmbeddingCollectionSharder
from torchrec.distributed.mc_embeddingbag import (
ManagedCollisionEmbeddingBagCollectionSharder,
)
from torchrec.distributed.planner.constants import MIN_CW_DIM
from torchrec.distributed.quant_embedding import QuantEmbeddingCollectionSharder
from torchrec.distributed.quant_embeddingbag import QuantEmbeddingBagCollectionSharder
from torchrec.distributed.types import (
EmbeddingModuleShardingPlan,
EnumerableShardingSpec,
ModuleSharder,
ParameterSharding,
ShardingType,
ShardMetadata,
)
from torchrec.distributed.utils import none_throws
def _get_parameter_size_offsets(
param: torch.nn.Parameter,
sharding_type: ShardingType,
local_size: int,
world_size: int,
col_wise_shard_dim: Optional[int] = None,
) -> List[Tuple[List[int], List[int]]]:
(
shard_sizes,
shard_offsets,
) = calculate_shard_sizes_and_offsets(
tensor=none_throws(param),
world_size=world_size,
local_world_size=local_size,
sharding_type=sharding_type.value,
col_wise_shard_dim=col_wise_shard_dim,
)
return list(zip(shard_sizes, shard_offsets))
def _get_parameter_sharding(
param: nn.Parameter,
sharding_type: str,
size_offset_ranks: List[Tuple[List[int], List[int], int]],
local_size: int,
device_type: str,
sharder: ModuleSharder[nn.Module],
placements: Optional[List[str]] = None,
compute_kernel: Optional[str] = None,
) -> ParameterSharding:
return ParameterSharding(
sharding_spec=(
None
if sharding_type == ShardingType.DATA_PARALLEL.value
else EnumerableShardingSpec(
[
ShardMetadata(
shard_sizes=size,
shard_offsets=offset,
placement=(
placement(
device_type,
none_throws(rank),
none_throws(local_size),
)
if not device_placement
else device_placement
),
)
for (size, offset, rank), device_placement in zip(
size_offset_ranks,
placements if placements else [None] * len(size_offset_ranks),
)
]
)
),
sharding_type=sharding_type,
compute_kernel=(
compute_kernel
if compute_kernel
else _get_compute_kernel(sharder, param, sharding_type, device_type)
),
ranks=[rank for (_, _, rank) in size_offset_ranks],
)
ParameterShardingGenerator = Callable[
[
nn.Parameter,
int,
int,
str,
ModuleSharder[nn.Module],
],
ParameterSharding,
]
class ShardingType(Enum):
"""
Well-known sharding types, used by inter-module optimizations.
"""
# Replicated on all ranks
DATA_PARALLEL = "data_parallel"
# Placed on a single rank
TABLE_WISE = "table_wise"
# Placed on multiple ranks as different sharded tables
COLUMN_WISE = "column_wise"
# Range-split on the first dimension across all ranks
ROW_WISE = "row_wise"
# Row-wise on the same node and table-wise across nodes
# Useful when having multiple ranks per node
# and comms within a single node are more efficient than across nodes.
TABLE_ROW_WISE = "table_row_wise"
# Column-wise on the same node and table-wise across nodes
TABLE_COLUMN_WISE = "table_column_wise"
class ParameterSharding:
"""
Describes the sharding of the parameter.
sharding_type (str): how this parameter is sharded. See ShardingType for well-known
types.
compute_kernel (str): compute kernel to be used by this parameter.
ranks (Optional[List[int]]): rank of each shard.
sharding_spec (Optional[ShardingSpec]): list of ShardMetadata for each shard.
cache_params (Optional[CacheParams]): cache params for embedding lookup.
enforce_hbm (Optional[bool]): whether to use HBM.
stochastic_rounding (Optional[bool]): whether to use stochastic rounding.
bounds_check_mode (Optional[BoundsCheckMode]): bounds check mode.
NOTE:
ShardingType.TABLE_WISE - rank where this embedding is placed
ShardingType.COLUMN_WISE - rank where the embedding shards are placed, seen as
individual tables
ShardingType.TABLE_ROW_WISE - first rank when this embedding is placed
ShardingType.ROW_WISE, ShardingType.DATA_PARALLEL - unused
"""
sharding_type: str
compute_kernel: str
ranks: Optional[List[int]] = None
sharding_spec: Optional[ShardingSpec] = None
cache_params: Optional[CacheParams] = None
enforce_hbm: Optional[bool] = None
stochastic_rounding: Optional[bool] = None
bounds_check_mode: Optional[BoundsCheckMode] = None
class ModuleSharder(abc.ABC, Generic[M]):
"""
`ModuleSharder` is per each module, which supports sharding,
e.g. `EmbeddingBagCollection`.
Args::
qcomm_codecs_registry (Optional[Dict[str, QuantizedCommCodecs]]) : Mapping of CommOp name to QuantizedCommCodecs
"""
def __init__(
self, qcomm_codecs_registry: Optional[Dict[str, QuantizedCommCodecs]] = None
) -> None:
torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}")
self._qcomm_codecs_registry = qcomm_codecs_registry
# pyre-ignore [3]
def shard(
self,
module: M,
params: EmbeddingModuleShardingPlan,
env: ShardingEnv,
device: Optional[torch.device] = None,
) -> ShardedModule[Any, Any, Any, Any]:
"""
Does the actual sharding. It will allocate parameters on the requested locations
as specified by corresponding ParameterSharding.
Default implementation is data-parallel replication.
Args:
module (M): module to shard.
params (EmbeddingModuleShardingPlan): dict of fully qualified parameter names
(module path + parameter name, '.'-separated) to its sharding spec.
env (ShardingEnv): sharding environment that has the process group.
device (torch.device): compute device.
Returns:
ShardedModule[Any, Any, Any]: sharded module implementation.
"""
...
def module_type(self) -> Type[M]: ...
def qcomm_codecs_registry(self) -> Optional[Dict[str, QuantizedCommCodecs]]:
return self._qcomm_codecs_registry
def shardable_parameters(self, module: M) -> Dict[str, nn.Parameter]:
"""
List of parameters that can be sharded.
"""
return dict(module.named_parameters())
def sharding_types(self, compute_device_type: str) -> List[str]:
"""
List of supported sharding types. See `ShardingType` for well-known examples.
"""
return [ShardingType.DATA_PARALLEL.value]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
"""
List of supported compute kernels for a given sharding type and compute device.
"""
return [ComputeKernel.DEFAULT.value]
def storage_usage(
self, tensor: torch.Tensor, compute_device_type: str, compute_kernel: str
) -> Dict[str, int]:
"""
List of system resources and corresponding usage given a compute device and
compute kernel.
"""
assert compute_device_type in {"cuda", "cpu", "mtia"}
storage_map = {
"cuda": ParameterStorage.HBM,
"cpu": ParameterStorage.DDR,
# TODO: Update it later. Setting for MTIA is same as CPU's for now.
"mtia": ParameterStorage.DDR,
}
return {storage_map[compute_device_type].value: get_tensor_size_bytes(tensor)}
The provided code snippet includes necessary dependencies for implementing the `table_row_wise` function. Write a Python function `def table_row_wise( host_index: int, ) -> ParameterShardingGenerator` to solve the following problem:
Returns a generator of ParameterShardingPlan for `ShardingType::TABLE_ROW_WISE` for construct_module_sharding_plan. Args: host_index (int): index of host (node) to do row wise Example:: ebc = EmbeddingBagCollection(...) plan = construct_module_sharding_plan( ebc, { "table_4": table_row_wise(host_index=2), }, )
Here is the function:
def table_row_wise(
host_index: int,
) -> ParameterShardingGenerator:
"""
Returns a generator of ParameterShardingPlan for `ShardingType::TABLE_ROW_WISE` for construct_module_sharding_plan.
Args:
host_index (int): index of host (node) to do row wise
Example::
ebc = EmbeddingBagCollection(...)
plan = construct_module_sharding_plan(
ebc,
{
"table_4": table_row_wise(host_index=2),
},
)
"""
def _parameter_sharding_generator(
param: nn.Parameter,
local_size: int,
world_size: int,
device_type: str,
sharder: ModuleSharder[nn.Module],
) -> ParameterSharding:
size_and_offsets = _get_parameter_size_offsets(
param,
ShardingType.TABLE_ROW_WISE,
local_size,
world_size,
)
size_offset_ranks = []
assert len(size_and_offsets) <= local_size
for (size, offset), rank in zip(size_and_offsets, range(local_size)):
rank_offset = host_index * local_size
size_offset_ranks.append((size, offset, rank_offset + rank))
return _get_parameter_sharding(
param,
ShardingType.TABLE_ROW_WISE.value,
size_offset_ranks,
local_size,
device_type,
sharder,
)
return _parameter_sharding_generator | Returns a generator of ParameterShardingPlan for `ShardingType::TABLE_ROW_WISE` for construct_module_sharding_plan. Args: host_index (int): index of host (node) to do row wise Example:: ebc = EmbeddingBagCollection(...) plan = construct_module_sharding_plan( ebc, { "table_4": table_row_wise(host_index=2), }, ) |
9,025 | import math
import warnings
from typing import Callable, cast, Dict, List, Optional, Tuple, Type
import torch
from torch import distributed as dist, nn
from torchrec.distributed.comm import get_local_size
from torchrec.distributed.embedding import EmbeddingCollectionSharder
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder
from torchrec.distributed.fp_embeddingbag import (
FeatureProcessedEmbeddingBagCollectionSharder,
)
from torchrec.distributed.fused_embeddingbag import FusedEmbeddingBagCollectionSharder
from torchrec.distributed.mc_embedding import ManagedCollisionEmbeddingCollectionSharder
from torchrec.distributed.mc_embeddingbag import (
ManagedCollisionEmbeddingBagCollectionSharder,
)
from torchrec.distributed.planner.constants import MIN_CW_DIM
from torchrec.distributed.quant_embedding import QuantEmbeddingCollectionSharder
from torchrec.distributed.quant_embeddingbag import QuantEmbeddingBagCollectionSharder
from torchrec.distributed.types import (
EmbeddingModuleShardingPlan,
EnumerableShardingSpec,
ModuleSharder,
ParameterSharding,
ShardingType,
ShardMetadata,
)
from torchrec.distributed.utils import none_throws
def get_module_to_default_sharders() -> Dict[Type[nn.Module], ModuleSharder[nn.Module]]:
return {sharder.module_type: sharder for sharder in get_default_sharders()}
ParameterShardingGenerator = Callable[
[
nn.Parameter,
int,
int,
str,
ModuleSharder[nn.Module],
],
ParameterSharding,
]
class ModuleSharder(abc.ABC, Generic[M]):
"""
`ModuleSharder` is per each module, which supports sharding,
e.g. `EmbeddingBagCollection`.
Args::
qcomm_codecs_registry (Optional[Dict[str, QuantizedCommCodecs]]) : Mapping of CommOp name to QuantizedCommCodecs
"""
def __init__(
self, qcomm_codecs_registry: Optional[Dict[str, QuantizedCommCodecs]] = None
) -> None:
torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}")
self._qcomm_codecs_registry = qcomm_codecs_registry
# pyre-ignore [3]
def shard(
self,
module: M,
params: EmbeddingModuleShardingPlan,
env: ShardingEnv,
device: Optional[torch.device] = None,
) -> ShardedModule[Any, Any, Any, Any]:
"""
Does the actual sharding. It will allocate parameters on the requested locations
as specified by corresponding ParameterSharding.
Default implementation is data-parallel replication.
Args:
module (M): module to shard.
params (EmbeddingModuleShardingPlan): dict of fully qualified parameter names
(module path + parameter name, '.'-separated) to its sharding spec.
env (ShardingEnv): sharding environment that has the process group.
device (torch.device): compute device.
Returns:
ShardedModule[Any, Any, Any]: sharded module implementation.
"""
...
def module_type(self) -> Type[M]: ...
def qcomm_codecs_registry(self) -> Optional[Dict[str, QuantizedCommCodecs]]:
return self._qcomm_codecs_registry
def shardable_parameters(self, module: M) -> Dict[str, nn.Parameter]:
"""
List of parameters that can be sharded.
"""
return dict(module.named_parameters())
def sharding_types(self, compute_device_type: str) -> List[str]:
"""
List of supported sharding types. See `ShardingType` for well-known examples.
"""
return [ShardingType.DATA_PARALLEL.value]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
"""
List of supported compute kernels for a given sharding type and compute device.
"""
return [ComputeKernel.DEFAULT.value]
def storage_usage(
self, tensor: torch.Tensor, compute_device_type: str, compute_kernel: str
) -> Dict[str, int]:
"""
List of system resources and corresponding usage given a compute device and
compute kernel.
"""
assert compute_device_type in {"cuda", "cpu", "mtia"}
storage_map = {
"cuda": ParameterStorage.HBM,
"cpu": ParameterStorage.DDR,
# TODO: Update it later. Setting for MTIA is same as CPU's for now.
"mtia": ParameterStorage.DDR,
}
return {storage_map[compute_device_type].value: get_tensor_size_bytes(tensor)}
The provided code snippet includes necessary dependencies for implementing the `apply_to_all` function. Write a Python function `def apply_to_all( module: nn.Module, parameter_sharding_generator: ParameterShardingGenerator, sharder: Optional[ModuleSharder[nn.Module]] = None, ) -> Dict[str, ParameterShardingGenerator]` to solve the following problem:
Convenience function to apply a sharding scheme generator for all modules in construct_module_sharding_plan. Example:: ebc = EmbeddingBagCollection(...) sharder = EmbeddingBagCollectionSharder() plan = construct_parameter_sharding_plan( ebc, apply_to_all(ebc, row_wise(), sharder), )
Here is the function:
def apply_to_all(
module: nn.Module,
parameter_sharding_generator: ParameterShardingGenerator,
sharder: Optional[ModuleSharder[nn.Module]] = None,
) -> Dict[str, ParameterShardingGenerator]:
"""
Convenience function to apply a sharding scheme generator for all modules in construct_module_sharding_plan.
Example::
ebc = EmbeddingBagCollection(...)
sharder = EmbeddingBagCollectionSharder()
plan = construct_parameter_sharding_plan(
ebc,
apply_to_all(ebc, row_wise(), sharder),
)
"""
if sharder is None:
sharder = get_module_to_default_sharders().get(type(module), None)
else:
assert isinstance(
module, sharder.module_type
), f"Incorrect sharder for module type {type(module)}"
assert (
sharder is not None
), f"Could not find a valid sharder type for {type(module)}"
shardable_parameters = sharder.shardable_parameters(module)
return {
param_name: parameter_sharding_generator for param_name in shardable_parameters
} | Convenience function to apply a sharding scheme generator for all modules in construct_module_sharding_plan. Example:: ebc = EmbeddingBagCollection(...) sharder = EmbeddingBagCollectionSharder() plan = construct_parameter_sharding_plan( ebc, apply_to_all(ebc, row_wise(), sharder), ) |
9,026 | import math
import warnings
from typing import Callable, cast, Dict, List, Optional, Tuple, Type
import torch
from torch import distributed as dist, nn
from torchrec.distributed.comm import get_local_size
from torchrec.distributed.embedding import EmbeddingCollectionSharder
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder
from torchrec.distributed.fp_embeddingbag import (
FeatureProcessedEmbeddingBagCollectionSharder,
)
from torchrec.distributed.fused_embeddingbag import FusedEmbeddingBagCollectionSharder
from torchrec.distributed.mc_embedding import ManagedCollisionEmbeddingCollectionSharder
from torchrec.distributed.mc_embeddingbag import (
ManagedCollisionEmbeddingBagCollectionSharder,
)
from torchrec.distributed.planner.constants import MIN_CW_DIM
from torchrec.distributed.quant_embedding import QuantEmbeddingCollectionSharder
from torchrec.distributed.quant_embeddingbag import QuantEmbeddingBagCollectionSharder
from torchrec.distributed.types import (
EmbeddingModuleShardingPlan,
EnumerableShardingSpec,
ModuleSharder,
ParameterSharding,
ShardingType,
ShardMetadata,
)
from torchrec.distributed.utils import none_throws
def get_module_to_default_sharders() -> Dict[Type[nn.Module], ModuleSharder[nn.Module]]:
return {sharder.module_type: sharder for sharder in get_default_sharders()}
ParameterShardingGenerator = Callable[
[
nn.Parameter,
int,
int,
str,
ModuleSharder[nn.Module],
],
ParameterSharding,
]
def get_local_size(world_size: Optional[int] = None) -> int:
if world_size is None:
world_size = dist.get_world_size()
"""
Gets the local world size (see https://pytorch.org/docs/stable/elastic/run.html)
This is usually the size of workers on each node, or nproc_per_node
"""
local_size = _env2int(
[
"LOCAL_WORLD_SIZE",
"MPI_LOCALNRANKS",
"OMPI_COMM_WORLD_LOCAL_SIZE",
"MV2_COMM_WORLD_LOCAL_SIZE",
],
8,
)
if local_size == -1 or world_size % local_size != 0:
logging.warning(
"Could not determine LOCAL_WORLD_SIZE from environment, falling back to WORLD_SIZE."
)
local_size = world_size
return local_size
class EmbeddingModuleShardingPlan(ModuleShardingPlan, Dict[str, ParameterSharding]):
"""
Map of ParameterSharding per parameter (usually a table). This describes the sharding plan for a torchrec module (e.g. `EmbeddingBagCollection`)
"""
def __str__(self) -> str:
out = ""
param_table = []
shard_table = []
for param_name, param_sharding in self.items():
param_table.append(
[
param_name,
param_sharding.sharding_type,
param_sharding.compute_kernel,
param_sharding.ranks,
]
)
if isinstance(param_sharding.sharding_spec, EnumerableShardingSpec):
shards = param_sharding.sharding_spec.shards
if shards is not None:
for shard in shards:
shard_table.append(
[
param_name,
shard.shard_offsets,
shard.shard_sizes,
shard.placement,
]
)
out += "\n\n" + _tabulate(
param_table, ["param", "sharding type", "compute kernel", "ranks"]
)
out += "\n\n" + _tabulate(
shard_table, ["param", "shard offsets", "shard sizes", "placement"]
)
return out
class ModuleSharder(abc.ABC, Generic[M]):
"""
`ModuleSharder` is per each module, which supports sharding,
e.g. `EmbeddingBagCollection`.
Args::
qcomm_codecs_registry (Optional[Dict[str, QuantizedCommCodecs]]) : Mapping of CommOp name to QuantizedCommCodecs
"""
def __init__(
self, qcomm_codecs_registry: Optional[Dict[str, QuantizedCommCodecs]] = None
) -> None:
torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}")
self._qcomm_codecs_registry = qcomm_codecs_registry
# pyre-ignore [3]
def shard(
self,
module: M,
params: EmbeddingModuleShardingPlan,
env: ShardingEnv,
device: Optional[torch.device] = None,
) -> ShardedModule[Any, Any, Any, Any]:
"""
Does the actual sharding. It will allocate parameters on the requested locations
as specified by corresponding ParameterSharding.
Default implementation is data-parallel replication.
Args:
module (M): module to shard.
params (EmbeddingModuleShardingPlan): dict of fully qualified parameter names
(module path + parameter name, '.'-separated) to its sharding spec.
env (ShardingEnv): sharding environment that has the process group.
device (torch.device): compute device.
Returns:
ShardedModule[Any, Any, Any]: sharded module implementation.
"""
...
def module_type(self) -> Type[M]: ...
def qcomm_codecs_registry(self) -> Optional[Dict[str, QuantizedCommCodecs]]:
return self._qcomm_codecs_registry
def shardable_parameters(self, module: M) -> Dict[str, nn.Parameter]:
"""
List of parameters that can be sharded.
"""
return dict(module.named_parameters())
def sharding_types(self, compute_device_type: str) -> List[str]:
"""
List of supported sharding types. See `ShardingType` for well-known examples.
"""
return [ShardingType.DATA_PARALLEL.value]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
"""
List of supported compute kernels for a given sharding type and compute device.
"""
return [ComputeKernel.DEFAULT.value]
def storage_usage(
self, tensor: torch.Tensor, compute_device_type: str, compute_kernel: str
) -> Dict[str, int]:
"""
List of system resources and corresponding usage given a compute device and
compute kernel.
"""
assert compute_device_type in {"cuda", "cpu", "mtia"}
storage_map = {
"cuda": ParameterStorage.HBM,
"cpu": ParameterStorage.DDR,
# TODO: Update it later. Setting for MTIA is same as CPU's for now.
"mtia": ParameterStorage.DDR,
}
return {storage_map[compute_device_type].value: get_tensor_size_bytes(tensor)}
The provided code snippet includes necessary dependencies for implementing the `construct_module_sharding_plan` function. Write a Python function `def construct_module_sharding_plan( module: nn.Module, per_param_sharding: Dict[str, ParameterShardingGenerator], sharder: Optional[ModuleSharder[nn.Module]] = None, local_size: Optional[int] = None, world_size: Optional[int] = None, device_type: Optional[str] = None, ) -> EmbeddingModuleShardingPlan` to solve the following problem:
Helper function to create module sharding plans (EmbeddingModuleShardingPlan) for an module Args: module (nn.Module): module to create plan for. per_param_sharding: Dict[str, Callable[[nn.Parameter, int, int, str], ParameterSharding]]: A mapping of parameter names to a generator function that takes in [parameter, local_size, world_size, device_type] and returns a ParameterSharding. We recommend using one of the predefined generator functions e.g. table_wise_sharding, row_wise_sharding, etc, sharder: Optional[ModuleSharder[nn.Module]]: Sharder that we are creating a plan for. If set to none, we will try to derive it from the module. We recommend setting this to None. local_size: Optional[int] = None: process group local size world_size: Optional[int] = None: process_group world_size device_type: str : Torch device type, Example:: ebc = EmbeddingBagCollection(...) plan = construct_module_sharding_plan( ebc, { "table_0": data_parallel(), "table_1": row_wise(), "table_2": column_wise(), "table_3": column_wise(ranks=[0,1,2]), "table_4": table_row_wise(host_index=2), }, )
Here is the function:
def construct_module_sharding_plan(
module: nn.Module,
per_param_sharding: Dict[str, ParameterShardingGenerator],
sharder: Optional[ModuleSharder[nn.Module]] = None,
local_size: Optional[int] = None,
world_size: Optional[int] = None,
device_type: Optional[str] = None,
) -> EmbeddingModuleShardingPlan:
"""
Helper function to create module sharding plans (EmbeddingModuleShardingPlan) for an module
Args:
module (nn.Module): module to create plan for.
per_param_sharding: Dict[str, Callable[[nn.Parameter, int, int, str], ParameterSharding]]: A mapping of parameter names to a generator function
that takes in [parameter, local_size, world_size, device_type] and returns a ParameterSharding. We recommend using one of the predefined generator functions
e.g. table_wise_sharding, row_wise_sharding, etc,
sharder: Optional[ModuleSharder[nn.Module]]: Sharder that we are creating a plan for. If set to none, we will try to derive it from the module. We recommend setting this to None.
local_size: Optional[int] = None: process group local size
world_size: Optional[int] = None: process_group world_size
device_type: str : Torch device type,
Example::
ebc = EmbeddingBagCollection(...)
plan = construct_module_sharding_plan(
ebc,
{
"table_0": data_parallel(),
"table_1": row_wise(),
"table_2": column_wise(),
"table_3": column_wise(ranks=[0,1,2]),
"table_4": table_row_wise(host_index=2),
},
)
"""
if device_type is None:
device_type = "cuda" if torch.cuda.is_available() else "cpu"
if sharder is None:
sharder = get_module_to_default_sharders().get(type(module), None)
assert (
sharder is not None
), f"Could not find a valid sharder type for {type(module)}"
assert isinstance(
module, sharder.module_type
), f"Incorrect sharder for module type {type(module)}"
shardable_parameters = sharder.shardable_parameters(module)
assert (
shardable_parameters.keys() == per_param_sharding.keys()
), "per_param_sharding_config doesn't match the shardable parameters of the module"
local_size = local_size or get_local_size()
world_size = world_size or dist.get_world_size()
per_parameter_sharding = EmbeddingModuleShardingPlan()
for table_name, sharding_plan_generator in per_param_sharding.items():
param = shardable_parameters[table_name]
per_parameter_sharding[table_name] = sharding_plan_generator(
param, local_size, world_size, device_type, sharder
)
return per_parameter_sharding | Helper function to create module sharding plans (EmbeddingModuleShardingPlan) for an module Args: module (nn.Module): module to create plan for. per_param_sharding: Dict[str, Callable[[nn.Parameter, int, int, str], ParameterSharding]]: A mapping of parameter names to a generator function that takes in [parameter, local_size, world_size, device_type] and returns a ParameterSharding. We recommend using one of the predefined generator functions e.g. table_wise_sharding, row_wise_sharding, etc, sharder: Optional[ModuleSharder[nn.Module]]: Sharder that we are creating a plan for. If set to none, we will try to derive it from the module. We recommend setting this to None. local_size: Optional[int] = None: process group local size world_size: Optional[int] = None: process_group world_size device_type: str : Torch device type, Example:: ebc = EmbeddingBagCollection(...) plan = construct_module_sharding_plan( ebc, { "table_0": data_parallel(), "table_1": row_wise(), "table_2": column_wise(), "table_3": column_wise(ranks=[0,1,2]), "table_4": table_row_wise(host_index=2), }, ) |
9,027 | import abc
import copy
from collections import OrderedDict
from typing import Any, cast, Dict, Iterator, List, Optional, Set, Tuple, Type
import torch
import torch.distributed as dist
from torch import nn
from torch.distributed.algorithms.ddp_comm_hooks import (
default_hooks as ddp_default_hooks,
)
from torch.distributed.fsdp import FullyShardedDataParallel
from torch.nn.modules.module import _IncompatibleKeys
from torch.nn.parallel import DistributedDataParallel
from torchrec.distributed.comm import get_local_size
from torchrec.distributed.planner import EmbeddingShardingPlanner, Topology
from torchrec.distributed.sharding_plan import get_default_sharders
from torchrec.distributed.types import (
ModuleSharder,
ShardedModule,
ShardingEnv,
ShardingPlan,
)
from torchrec.distributed.utils import (
add_prefix_to_state_dict,
append_prefix,
copy_to_device,
filter_state_dict,
sharded_model_copy,
)
from torchrec.optim.fused import FusedOptimizerModule
from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizer
class DistributedModelParallel(nn.Module, FusedOptimizerModule):
"""
Entry point to model parallelism.
Args:
module (nn.Module): module to wrap.
env (Optional[ShardingEnv]): sharding environment that has the process group.
device (Optional[torch.device]): compute device, defaults to cpu.
plan (Optional[ShardingPlan]): plan to use when sharding, defaults to
`EmbeddingShardingPlanner.collective_plan()`.
sharders (Optional[List[ModuleSharder[nn.Module]]]): `ModuleSharders` available
to shard with, defaults to `EmbeddingBagCollectionSharder()`.
init_data_parallel (bool): data-parallel modules can be lazy, i.e. they delay
parameter initialization until the first forward pass. Pass `True` to delay
initialization of data parallel modules. Do first forward pass and then call
DistributedModelParallel.init_data_parallel().
init_parameters (bool): initialize parameters for modules still on meta device.
data_parallel_wrapper (Optional[DataParallelWrapper]): custom wrapper for data
parallel modules.
Example::
def init_weights(m):
if isinstance(m, nn.Linear):
m.weight.fill_(1.0)
elif isinstance(m, EmbeddingBagCollection):
for param in m.parameters():
init.kaiming_normal_(param)
m = MyModel(device='meta')
m = DistributedModelParallel(m)
m.apply(init_weights)
"""
def __init__(
self,
module: nn.Module,
env: Optional[ShardingEnv] = None,
device: Optional[torch.device] = None,
plan: Optional[ShardingPlan] = None,
sharders: Optional[List[ModuleSharder[torch.nn.Module]]] = None,
init_data_parallel: bool = True,
init_parameters: bool = True,
data_parallel_wrapper: Optional[DataParallelWrapper] = None,
) -> None:
super().__init__()
torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}")
self.init_parameters = init_parameters
self._ddp_wrapped: bool = False
if env is None:
pg = dist.GroupMember.WORLD
assert pg is not None, "Process group is not initialized"
env = ShardingEnv.from_process_group(pg)
self._env: ShardingEnv = env
if device is None:
device = torch.device("cpu")
self.device: torch.device = device
if sharders is None:
sharders = get_default_sharders()
self._sharder_map: Dict[Type[nn.Module], ModuleSharder[nn.Module]] = {
sharder.module_type: sharder for sharder in sharders
}
if data_parallel_wrapper is None:
data_parallel_wrapper = DefaultDataParallelWrapper()
self._data_parallel_wrapper: DataParallelWrapper = data_parallel_wrapper
if plan is None:
planner = EmbeddingShardingPlanner(
topology=Topology(
local_world_size=get_local_size(self._env.world_size),
world_size=self._env.world_size,
compute_device=self.device.type,
)
)
pg = self._env.process_group
if pg is not None:
plan = planner.collective_plan(module, sharders, pg)
else:
plan = planner.plan(module, sharders)
self._plan: ShardingPlan = plan
self._dmp_wrapped_module: nn.Module = self._init_dmp(module)
self._optim: CombinedOptimizer = self._init_optim(self._dmp_wrapped_module)
if init_parameters:
self._init_parameters(self.module)
if init_data_parallel:
self.init_data_parallel()
def module(self) -> nn.Module:
"""
Property to directly access sharded module, which will not be wrapped in DDP,
FSDP, DMP, or any other parallelism wrappers.
"""
return get_unwrapped_module(self)
def module(self, value: nn.Module) -> None:
if isinstance(self.module, DistributedDataParallel) or isinstance(
self.module, FullyShardedDataParallel
):
raise RuntimeError(
"module can't be set after calling init_data_parallel(...)"
)
else:
self._dmp_wrapped_module = value
# pyre-ignore [2, 3]
def forward(self, *args, **kwargs) -> Any:
return self._dmp_wrapped_module(*args, **kwargs)
def init_data_parallel(self) -> None:
"""
See init_data_parallel c-tor argument for usage.
It's safe to call this method multiple times.
"""
if not self._ddp_wrapped:
# Allocate any 'meta' tensors
if self.init_parameters:
self._init_parameters(self._dmp_wrapped_module)
self._data_parallel_wrapper.wrap(self, self._env, self.device)
self._ddp_wrapped = True
def copy(
self,
device: torch.device,
) -> "DistributedModelParallel":
"""
Recursively copy submodules to new device by calling per-module customized copy
process, since some modules needs to use the original references (like
`ShardedModule` for inference).
"""
assert isinstance(device, torch.device)
# dmp code deep copy
with sharded_model_copy(device=None):
copy_dmp = copy.deepcopy(self)
# tensor resident module deep copy
copy_dmp_wrapped_module = copy_to_device(
self._dmp_wrapped_module, self.device, device
)
copy_dmp._dmp_wrapped_module = copy_dmp_wrapped_module
return copy_dmp
def _init_dmp(self, module: nn.Module) -> nn.Module:
return self._shard_modules_impl(module)
def _init_optim(self, module: nn.Module) -> CombinedOptimizer:
# pyre-ignore [6]
return CombinedOptimizer(self._fused_optim_impl(module, []))
def _fused_optim_impl(
self,
module: nn.Module,
fused_optims: List[Tuple[str, KeyedOptimizer]],
path: str = "",
) -> List[Tuple[str, KeyedOptimizer]]:
if isinstance(module, FusedOptimizerModule):
fused_optims.append((path, module.fused_optimizer))
return fused_optims
for name, child in module.named_children():
self._fused_optim_impl(
child,
fused_optims,
path + "." + name if path else name,
)
return fused_optims
def _shard_modules_impl(
self,
module: nn.Module,
path: str = "",
) -> nn.Module:
# pre-sharded module
if isinstance(module, ShardedModule):
return module
# shardable module
module_sharding_plan = self._plan.get_plan_for_module(path)
if module_sharding_plan:
sharder_key = type(module)
module = self._sharder_map[sharder_key].shard(
module,
module_sharding_plan,
self._env,
self.device,
)
return module
for name, child in module.named_children():
child = self._shard_modules_impl(
child,
path + "." + name if path else name,
)
setattr(module, name, child)
return module
def _init_parameters(self, module: nn.Module) -> None:
def init_parameters(module: nn.Module) -> None:
# Allocate parameters and buffers if over 'meta' device.
has_meta_param = False
for name, param in module._parameters.items():
if isinstance(param, torch.Tensor) and param.device.type == "meta":
module._parameters[name] = nn.Parameter(
torch.empty_like(param, device=self.device),
requires_grad=param.requires_grad,
)
has_meta_param = True
for name, buffer in module._buffers.items():
if isinstance(buffer, torch.Tensor) and buffer.device.type == "meta":
module._buffers[name] = torch.zeros_like(buffer, device=self.device)
# Init parameters if at least one parameter is over 'meta' device.
if has_meta_param and hasattr(module, "reset_parameters"):
module.reset_parameters()
module.apply(init_parameters)
def sparse_grad_parameter_names(
self, destination: Optional[List[str]] = None, prefix: str = ""
) -> List[str]:
destination = [] if destination is None else destination
return self._sparse_grad_parameter_names(self.module, destination, prefix)
def _sparse_grad_parameter_names(
self, module: nn.Module, destination: List[str], prefix: str = ""
) -> List[str]:
module = get_unwrapped_module(module)
if isinstance(module, ShardedModule):
pass
elif isinstance(module, nn.Embedding):
if module.sparse:
destination.append(append_prefix(prefix, "weight"))
elif isinstance(module, nn.EmbeddingBag):
if module.sparse:
destination.append(append_prefix(prefix, "weight"))
else:
for name, child in module.named_children():
self._sparse_grad_parameter_names(
child, destination, append_prefix(prefix, name)
)
return destination
# pyre-ignore [14]
def state_dict(
self,
destination: Optional[Dict[str, Any]] = None,
prefix: str = "",
keep_vars: bool = False,
) -> Dict[str, Any]:
state_dict = get_module(self).state_dict(
destination=destination, prefix=prefix, keep_vars=keep_vars
)
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict, prefix + _DDP_STATE_DICT_PREFIX
)
add_prefix_to_state_dict(state_dict, prefix)
return state_dict
# pyre-fixme[14]: `load_state_dict` overrides method defined in `Module`
# inconsistently.
def load_state_dict(
self,
state_dict: "OrderedDict[str, torch.Tensor]",
prefix: str = "",
strict: bool = True,
) -> _IncompatibleKeys:
return self._load_state_dict(self, state_dict, prefix, strict)
def _load_state_dict(
self,
module: nn.Module,
state_dict: "OrderedDict[str, torch.Tensor]",
prefix: str = "",
strict: bool = True,
) -> _IncompatibleKeys:
missing_keys = []
unexpected_keys = []
module = get_module(module)
if isinstance(module, DistributedDataParallel):
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict, prefix
)
add_prefix_to_state_dict(state_dict, prefix + _DDP_STATE_DICT_PREFIX)
if isinstance(module, ShardedModule):
return module.load_state_dict(state_dict, strict=strict)
else:
module._load_from_state_dict(
state_dict, prefix, {}, strict, missing_keys, unexpected_keys, []
)
for name, child in module.named_children():
m_keys, u_keys = self._load_state_dict(
child,
filter_state_dict(state_dict, prefix + name),
"",
strict,
)
missing_keys.extend(m_keys)
unexpected_keys.extend(u_keys)
return _IncompatibleKeys(
missing_keys=missing_keys, unexpected_keys=unexpected_keys
)
def _named_parameters(
self,
module: nn.Module,
prefix: str = "",
recurse: bool = True,
strip_ddp: bool = True,
) -> Iterator[Tuple[str, torch.nn.Parameter]]:
if strip_ddp:
module = get_unwrapped_module(module)
if isinstance(module, ShardedModule):
yield from module.named_parameters(prefix, recurse)
else:
yield from module.named_parameters(prefix, recurse=False)
for name, child in module.named_children():
yield from self._named_parameters(
child,
append_prefix(prefix, name),
recurse,
strip_ddp,
)
def named_parameters(
self,
prefix: str = "",
recurse: bool = True,
remove_duplicate: bool = True,
) -> Iterator[Tuple[str, torch.nn.Parameter]]:
gen = self._named_parameters(
self.module,
prefix,
recurse,
)
memo = set()
for key, param in gen:
if param in memo:
continue
if remove_duplicate:
memo.add(param)
yield key, param
def bare_named_parameters(
self,
prefix: str = "",
recurse: bool = True,
) -> Iterator[Tuple[str, torch.nn.Parameter]]:
gen = self._named_parameters(
self.module,
prefix,
recurse,
)
memo = set()
for key, param in gen:
if param in memo:
continue
memo.add(param)
yield key, param
def _sharded_parameter_names(module: nn.Module, prefix: str = "") -> Iterator[str]:
module = get_unwrapped_module(module)
if isinstance(module, ShardedModule):
yield from module.sharded_parameter_names(prefix)
else:
for name, child in module.named_children():
yield from DistributedModelParallel._sharded_parameter_names(
child, append_prefix(prefix, name)
)
def _named_buffers(
self, module: nn.Module, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]:
module = get_unwrapped_module(module)
if isinstance(module, ShardedModule):
yield from module.named_buffers(prefix, recurse)
else:
yield from module.named_buffers(prefix, recurse=False)
for name, child in module.named_children():
yield from self._named_buffers(
child, append_prefix(prefix, name), recurse
)
def named_buffers(
self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]:
gen = self._named_buffers(self.module, prefix, recurse)
memo = set()
for key, param in gen:
if param in memo:
continue
if remove_duplicate:
memo.add(param)
yield key, param
def fused_optimizer(self) -> KeyedOptimizer:
return self._optim
def plan(self) -> ShardingPlan:
return self._plan
def _reset_parameters(module: nn.Module) -> None:
for _, m in module.named_modules():
if hasattr(m, "reset_parameters"):
m.reset_parameters()
The provided code snippet includes necessary dependencies for implementing the `get_unwrapped_module` function. Write a Python function `def get_unwrapped_module(module: nn.Module) -> nn.Module` to solve the following problem:
Unwraps module wrapped by DMP, DDP, or FSDP.
Here is the function:
def get_unwrapped_module(module: nn.Module) -> nn.Module:
"""
Unwraps module wrapped by DMP, DDP, or FSDP.
"""
while (
isinstance(module, DistributedModelParallel)
or isinstance(module, DistributedDataParallel)
or isinstance(module, FullyShardedDataParallel)
):
if isinstance(module, DistributedModelParallel):
module = module._dmp_wrapped_module
elif isinstance(module, FullyShardedDataParallel):
module = module._fsdp_wrapped_module
else:
module = module.module
return module | Unwraps module wrapped by DMP, DDP, or FSDP. |
9,028 | import abc
import copy
from collections import OrderedDict
from typing import Any, cast, Dict, Iterator, List, Optional, Set, Tuple, Type
import torch
import torch.distributed as dist
from torch import nn
from torch.distributed.algorithms.ddp_comm_hooks import (
default_hooks as ddp_default_hooks,
)
from torch.distributed.fsdp import FullyShardedDataParallel
from torch.nn.modules.module import _IncompatibleKeys
from torch.nn.parallel import DistributedDataParallel
from torchrec.distributed.comm import get_local_size
from torchrec.distributed.planner import EmbeddingShardingPlanner, Topology
from torchrec.distributed.sharding_plan import get_default_sharders
from torchrec.distributed.types import (
ModuleSharder,
ShardedModule,
ShardingEnv,
ShardingPlan,
)
from torchrec.distributed.utils import (
add_prefix_to_state_dict,
append_prefix,
copy_to_device,
filter_state_dict,
sharded_model_copy,
)
from torchrec.optim.fused import FusedOptimizerModule
from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizer
class DistributedModelParallel(nn.Module, FusedOptimizerModule):
"""
Entry point to model parallelism.
Args:
module (nn.Module): module to wrap.
env (Optional[ShardingEnv]): sharding environment that has the process group.
device (Optional[torch.device]): compute device, defaults to cpu.
plan (Optional[ShardingPlan]): plan to use when sharding, defaults to
`EmbeddingShardingPlanner.collective_plan()`.
sharders (Optional[List[ModuleSharder[nn.Module]]]): `ModuleSharders` available
to shard with, defaults to `EmbeddingBagCollectionSharder()`.
init_data_parallel (bool): data-parallel modules can be lazy, i.e. they delay
parameter initialization until the first forward pass. Pass `True` to delay
initialization of data parallel modules. Do first forward pass and then call
DistributedModelParallel.init_data_parallel().
init_parameters (bool): initialize parameters for modules still on meta device.
data_parallel_wrapper (Optional[DataParallelWrapper]): custom wrapper for data
parallel modules.
Example::
def init_weights(m):
if isinstance(m, nn.Linear):
m.weight.fill_(1.0)
elif isinstance(m, EmbeddingBagCollection):
for param in m.parameters():
init.kaiming_normal_(param)
m = MyModel(device='meta')
m = DistributedModelParallel(m)
m.apply(init_weights)
"""
def __init__(
self,
module: nn.Module,
env: Optional[ShardingEnv] = None,
device: Optional[torch.device] = None,
plan: Optional[ShardingPlan] = None,
sharders: Optional[List[ModuleSharder[torch.nn.Module]]] = None,
init_data_parallel: bool = True,
init_parameters: bool = True,
data_parallel_wrapper: Optional[DataParallelWrapper] = None,
) -> None:
super().__init__()
torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}")
self.init_parameters = init_parameters
self._ddp_wrapped: bool = False
if env is None:
pg = dist.GroupMember.WORLD
assert pg is not None, "Process group is not initialized"
env = ShardingEnv.from_process_group(pg)
self._env: ShardingEnv = env
if device is None:
device = torch.device("cpu")
self.device: torch.device = device
if sharders is None:
sharders = get_default_sharders()
self._sharder_map: Dict[Type[nn.Module], ModuleSharder[nn.Module]] = {
sharder.module_type: sharder for sharder in sharders
}
if data_parallel_wrapper is None:
data_parallel_wrapper = DefaultDataParallelWrapper()
self._data_parallel_wrapper: DataParallelWrapper = data_parallel_wrapper
if plan is None:
planner = EmbeddingShardingPlanner(
topology=Topology(
local_world_size=get_local_size(self._env.world_size),
world_size=self._env.world_size,
compute_device=self.device.type,
)
)
pg = self._env.process_group
if pg is not None:
plan = planner.collective_plan(module, sharders, pg)
else:
plan = planner.plan(module, sharders)
self._plan: ShardingPlan = plan
self._dmp_wrapped_module: nn.Module = self._init_dmp(module)
self._optim: CombinedOptimizer = self._init_optim(self._dmp_wrapped_module)
if init_parameters:
self._init_parameters(self.module)
if init_data_parallel:
self.init_data_parallel()
def module(self) -> nn.Module:
"""
Property to directly access sharded module, which will not be wrapped in DDP,
FSDP, DMP, or any other parallelism wrappers.
"""
return get_unwrapped_module(self)
def module(self, value: nn.Module) -> None:
if isinstance(self.module, DistributedDataParallel) or isinstance(
self.module, FullyShardedDataParallel
):
raise RuntimeError(
"module can't be set after calling init_data_parallel(...)"
)
else:
self._dmp_wrapped_module = value
# pyre-ignore [2, 3]
def forward(self, *args, **kwargs) -> Any:
return self._dmp_wrapped_module(*args, **kwargs)
def init_data_parallel(self) -> None:
"""
See init_data_parallel c-tor argument for usage.
It's safe to call this method multiple times.
"""
if not self._ddp_wrapped:
# Allocate any 'meta' tensors
if self.init_parameters:
self._init_parameters(self._dmp_wrapped_module)
self._data_parallel_wrapper.wrap(self, self._env, self.device)
self._ddp_wrapped = True
def copy(
self,
device: torch.device,
) -> "DistributedModelParallel":
"""
Recursively copy submodules to new device by calling per-module customized copy
process, since some modules needs to use the original references (like
`ShardedModule` for inference).
"""
assert isinstance(device, torch.device)
# dmp code deep copy
with sharded_model_copy(device=None):
copy_dmp = copy.deepcopy(self)
# tensor resident module deep copy
copy_dmp_wrapped_module = copy_to_device(
self._dmp_wrapped_module, self.device, device
)
copy_dmp._dmp_wrapped_module = copy_dmp_wrapped_module
return copy_dmp
def _init_dmp(self, module: nn.Module) -> nn.Module:
return self._shard_modules_impl(module)
def _init_optim(self, module: nn.Module) -> CombinedOptimizer:
# pyre-ignore [6]
return CombinedOptimizer(self._fused_optim_impl(module, []))
def _fused_optim_impl(
self,
module: nn.Module,
fused_optims: List[Tuple[str, KeyedOptimizer]],
path: str = "",
) -> List[Tuple[str, KeyedOptimizer]]:
if isinstance(module, FusedOptimizerModule):
fused_optims.append((path, module.fused_optimizer))
return fused_optims
for name, child in module.named_children():
self._fused_optim_impl(
child,
fused_optims,
path + "." + name if path else name,
)
return fused_optims
def _shard_modules_impl(
self,
module: nn.Module,
path: str = "",
) -> nn.Module:
# pre-sharded module
if isinstance(module, ShardedModule):
return module
# shardable module
module_sharding_plan = self._plan.get_plan_for_module(path)
if module_sharding_plan:
sharder_key = type(module)
module = self._sharder_map[sharder_key].shard(
module,
module_sharding_plan,
self._env,
self.device,
)
return module
for name, child in module.named_children():
child = self._shard_modules_impl(
child,
path + "." + name if path else name,
)
setattr(module, name, child)
return module
def _init_parameters(self, module: nn.Module) -> None:
def init_parameters(module: nn.Module) -> None:
# Allocate parameters and buffers if over 'meta' device.
has_meta_param = False
for name, param in module._parameters.items():
if isinstance(param, torch.Tensor) and param.device.type == "meta":
module._parameters[name] = nn.Parameter(
torch.empty_like(param, device=self.device),
requires_grad=param.requires_grad,
)
has_meta_param = True
for name, buffer in module._buffers.items():
if isinstance(buffer, torch.Tensor) and buffer.device.type == "meta":
module._buffers[name] = torch.zeros_like(buffer, device=self.device)
# Init parameters if at least one parameter is over 'meta' device.
if has_meta_param and hasattr(module, "reset_parameters"):
module.reset_parameters()
module.apply(init_parameters)
def sparse_grad_parameter_names(
self, destination: Optional[List[str]] = None, prefix: str = ""
) -> List[str]:
destination = [] if destination is None else destination
return self._sparse_grad_parameter_names(self.module, destination, prefix)
def _sparse_grad_parameter_names(
self, module: nn.Module, destination: List[str], prefix: str = ""
) -> List[str]:
module = get_unwrapped_module(module)
if isinstance(module, ShardedModule):
pass
elif isinstance(module, nn.Embedding):
if module.sparse:
destination.append(append_prefix(prefix, "weight"))
elif isinstance(module, nn.EmbeddingBag):
if module.sparse:
destination.append(append_prefix(prefix, "weight"))
else:
for name, child in module.named_children():
self._sparse_grad_parameter_names(
child, destination, append_prefix(prefix, name)
)
return destination
# pyre-ignore [14]
def state_dict(
self,
destination: Optional[Dict[str, Any]] = None,
prefix: str = "",
keep_vars: bool = False,
) -> Dict[str, Any]:
state_dict = get_module(self).state_dict(
destination=destination, prefix=prefix, keep_vars=keep_vars
)
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict, prefix + _DDP_STATE_DICT_PREFIX
)
add_prefix_to_state_dict(state_dict, prefix)
return state_dict
# pyre-fixme[14]: `load_state_dict` overrides method defined in `Module`
# inconsistently.
def load_state_dict(
self,
state_dict: "OrderedDict[str, torch.Tensor]",
prefix: str = "",
strict: bool = True,
) -> _IncompatibleKeys:
return self._load_state_dict(self, state_dict, prefix, strict)
def _load_state_dict(
self,
module: nn.Module,
state_dict: "OrderedDict[str, torch.Tensor]",
prefix: str = "",
strict: bool = True,
) -> _IncompatibleKeys:
missing_keys = []
unexpected_keys = []
module = get_module(module)
if isinstance(module, DistributedDataParallel):
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict, prefix
)
add_prefix_to_state_dict(state_dict, prefix + _DDP_STATE_DICT_PREFIX)
if isinstance(module, ShardedModule):
return module.load_state_dict(state_dict, strict=strict)
else:
module._load_from_state_dict(
state_dict, prefix, {}, strict, missing_keys, unexpected_keys, []
)
for name, child in module.named_children():
m_keys, u_keys = self._load_state_dict(
child,
filter_state_dict(state_dict, prefix + name),
"",
strict,
)
missing_keys.extend(m_keys)
unexpected_keys.extend(u_keys)
return _IncompatibleKeys(
missing_keys=missing_keys, unexpected_keys=unexpected_keys
)
def _named_parameters(
self,
module: nn.Module,
prefix: str = "",
recurse: bool = True,
strip_ddp: bool = True,
) -> Iterator[Tuple[str, torch.nn.Parameter]]:
if strip_ddp:
module = get_unwrapped_module(module)
if isinstance(module, ShardedModule):
yield from module.named_parameters(prefix, recurse)
else:
yield from module.named_parameters(prefix, recurse=False)
for name, child in module.named_children():
yield from self._named_parameters(
child,
append_prefix(prefix, name),
recurse,
strip_ddp,
)
def named_parameters(
self,
prefix: str = "",
recurse: bool = True,
remove_duplicate: bool = True,
) -> Iterator[Tuple[str, torch.nn.Parameter]]:
gen = self._named_parameters(
self.module,
prefix,
recurse,
)
memo = set()
for key, param in gen:
if param in memo:
continue
if remove_duplicate:
memo.add(param)
yield key, param
def bare_named_parameters(
self,
prefix: str = "",
recurse: bool = True,
) -> Iterator[Tuple[str, torch.nn.Parameter]]:
gen = self._named_parameters(
self.module,
prefix,
recurse,
)
memo = set()
for key, param in gen:
if param in memo:
continue
memo.add(param)
yield key, param
def _sharded_parameter_names(module: nn.Module, prefix: str = "") -> Iterator[str]:
module = get_unwrapped_module(module)
if isinstance(module, ShardedModule):
yield from module.sharded_parameter_names(prefix)
else:
for name, child in module.named_children():
yield from DistributedModelParallel._sharded_parameter_names(
child, append_prefix(prefix, name)
)
def _named_buffers(
self, module: nn.Module, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]:
module = get_unwrapped_module(module)
if isinstance(module, ShardedModule):
yield from module.named_buffers(prefix, recurse)
else:
yield from module.named_buffers(prefix, recurse=False)
for name, child in module.named_children():
yield from self._named_buffers(
child, append_prefix(prefix, name), recurse
)
def named_buffers(
self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]:
gen = self._named_buffers(self.module, prefix, recurse)
memo = set()
for key, param in gen:
if param in memo:
continue
if remove_duplicate:
memo.add(param)
yield key, param
def fused_optimizer(self) -> KeyedOptimizer:
return self._optim
def plan(self) -> ShardingPlan:
return self._plan
def _reset_parameters(module: nn.Module) -> None:
for _, m in module.named_modules():
if hasattr(m, "reset_parameters"):
m.reset_parameters()
The provided code snippet includes necessary dependencies for implementing the `get_module` function. Write a Python function `def get_module(module: nn.Module) -> nn.Module` to solve the following problem:
Unwraps DMP module. Does not unwrap data parallel wrappers (i.e. DDP/FSDP), so overriding implementations by the wrappers can be used.
Here is the function:
def get_module(module: nn.Module) -> nn.Module:
"""
Unwraps DMP module.
Does not unwrap data parallel wrappers (i.e. DDP/FSDP), so overriding
implementations by the wrappers can be used.
"""
while isinstance(module, DistributedModelParallel):
module = module._dmp_wrapped_module
return module | Unwraps DMP module. Does not unwrap data parallel wrappers (i.e. DDP/FSDP), so overriding implementations by the wrappers can be used. |
9,029 | from typing import List, Tuple
from torchrec.distributed.quant_embeddingbag import ShardedQuantEmbeddingBagCollection
class ShardedQuantEmbeddingBagCollection(
ShardedQuantEmbeddingModuleState[
ListOfKJTList,
List[List[torch.Tensor]],
KeyedTensor,
NullShardedModuleContext,
],
):
"""
Sharded implementation of `EmbeddingBagCollection`.
This is part of the public API to allow for manual data dist pipelining.
"""
def __init__(
self,
module: EmbeddingBagCollectionInterface,
table_name_to_parameter_sharding: Dict[str, ParameterSharding],
env: ShardingEnv,
fused_params: Optional[Dict[str, Any]] = None,
device: Optional[torch.device] = None,
) -> None:
super().__init__()
self._embedding_bag_configs: List[EmbeddingBagConfig] = (
module.embedding_bag_configs()
)
self._sharding_type_to_sharding_infos: Dict[
str, List[EmbeddingShardingInfo]
] = create_sharding_infos_by_sharding(
module, table_name_to_parameter_sharding, "embedding_bags.", fused_params
)
self._sharding_type_to_sharding: Dict[
str,
EmbeddingSharding[
NullShardingContext,
KJTList,
List[torch.Tensor],
torch.Tensor,
],
] = {
sharding_type: create_infer_embedding_bag_sharding(
sharding_type, embedding_confings, env
)
for sharding_type, embedding_confings in self._sharding_type_to_sharding_infos.items()
}
self._device = device
self._is_weighted: bool = module.is_weighted()
self._input_dists: List[nn.Module] = []
self._lookups: List[nn.Module] = []
self._create_lookups(fused_params, device)
# Ensure output dist is set for post processing from an inference runtime (ie. setting device from runtime).
self._output_dists: torch.nn.ModuleList = torch.nn.ModuleList()
self._embedding_names: List[str] = []
self._embedding_dims: List[int] = []
self._feature_splits: List[int] = []
self._features_order: List[int] = []
# forward pass flow control
self._has_uninitialized_input_dist: bool = True
self._has_uninitialized_output_dist: bool = True
self._has_features_permute: bool = True
tbes: Dict[IntNBitTableBatchedEmbeddingBagsCodegen, GroupedEmbeddingConfig] = (
get_tbes_to_register_from_iterable(self._lookups)
)
self._tbes_configs: Dict[
IntNBitTableBatchedEmbeddingBagsCodegen, GroupedEmbeddingConfig
] = tbes
# Optional registration of TBEs for model post processing utilities
if is_fused_param_register_tbe(fused_params):
self.tbes: torch.nn.ModuleList = torch.nn.ModuleList(tbes.keys())
quant_state_dict_split_scale_bias = (
is_fused_param_quant_state_dict_split_scale_bias(fused_params)
)
if quant_state_dict_split_scale_bias:
self._initialize_torch_state(
tbes=tbes,
table_name_to_parameter_sharding=table_name_to_parameter_sharding,
tables_weights_prefix="embedding_bags",
)
else:
table_wise_sharded_only: bool = all(
sharding_type == ShardingType.TABLE_WISE.value
for sharding_type in self._sharding_type_to_sharding.keys()
)
assert (
table_wise_sharded_only
), "ROW_WISE,COLUMN_WISE shardings can be used only in 'quant_state_dict_split_scale_bias' mode, specify fused_params[FUSED_PARAM_QUANT_STATE_DICT_SPLIT_SCALE_BIAS]=True to __init__ argument"
self.embedding_bags: nn.ModuleDict = nn.ModuleDict()
for table in self._embedding_bag_configs:
self.embedding_bags[table.name] = torch.nn.Module()
for _sharding_type, lookup in zip(
self._sharding_type_to_sharding.keys(), self._lookups
):
lookup_state_dict = lookup.state_dict()
for key in lookup_state_dict:
if key.endswith(".weight"):
table_name = key[: -len(".weight")]
self.embedding_bags[table_name].register_buffer(
"weight", lookup_state_dict[key]
)
def tbes_configs(
self,
) -> Dict[IntNBitTableBatchedEmbeddingBagsCodegen, GroupedEmbeddingConfig]:
return self._tbes_configs
def sharding_type_to_sharding_infos(self) -> Dict[str, List[EmbeddingShardingInfo]]:
return self._sharding_type_to_sharding_infos
def embedding_bag_configs(self) -> List[EmbeddingBagConfig]:
return self._embedding_bag_configs
def _create_input_dist(
self,
input_feature_names: List[str],
features_device: torch.device,
input_dist_device: Optional[torch.device] = None,
) -> None:
feature_names: List[str] = []
for sharding in self._sharding_type_to_sharding.values():
self._input_dists.append(
sharding.create_input_dist(device=input_dist_device)
)
feature_names.extend(sharding.feature_names())
self._feature_splits.append(len(sharding.feature_names()))
if feature_names == input_feature_names:
self._has_features_permute = False
else:
for f in feature_names:
self._features_order.append(input_feature_names.index(f))
self.register_buffer(
"_features_order_tensor",
torch.tensor(
self._features_order, device=features_device, dtype=torch.int32
),
persistent=False,
)
def _create_lookups(
self,
fused_params: Optional[Dict[str, Any]],
device: Optional[torch.device] = None,
) -> None:
for sharding in self._sharding_type_to_sharding.values():
self._lookups.append(
sharding.create_lookup(
device=device,
fused_params=fused_params,
)
)
def _create_output_dist(self, device: Optional[torch.device] = None) -> None:
for sharding in self._sharding_type_to_sharding.values():
self._output_dists.append(sharding.create_output_dist(device))
self._embedding_names.extend(sharding.embedding_names())
self._embedding_dims.extend(sharding.embedding_dims())
# pyre-ignore [14]
# pyre-ignore
def input_dist(
self, ctx: NullShardedModuleContext, features: KeyedJaggedTensor
) -> ListOfKJTList:
if self._has_uninitialized_input_dist:
self._create_input_dist(
features.keys(),
features.device(),
self._device,
)
self._has_uninitialized_input_dist = False
if self._has_uninitialized_output_dist:
self._create_output_dist(features.device())
self._has_uninitialized_output_dist = False
with torch.no_grad():
if self._has_features_permute:
features = features.permute(
self._features_order,
self._features_order_tensor,
)
else:
features = flatten_feature_lengths(features)
features_by_shards = (
[features]
if len(self._feature_splits) == 1
else features.split(self._feature_splits)
)
return ListOfKJTList(
[
self._input_dists[i].forward(features_by_shards[i])
for i in range(len(self._input_dists))
]
)
def compute(
self,
ctx: NullShardedModuleContext,
dist_input: ListOfKJTList,
) -> List[List[torch.Tensor]]:
# syntax for torchscript
return [lookup.forward(dist_input[i]) for i, lookup in enumerate(self._lookups)]
# pyre-ignore
def output_dist(
self,
ctx: NullShardedModuleContext,
output: List[List[torch.Tensor]],
) -> KeyedTensor:
return construct_output_kt(
embeddings=[
dist.forward(output[i]) for i, dist in enumerate(self._output_dists)
],
embedding_dims=self._embedding_dims,
embedding_names=self._embedding_names,
)
# pyre-ignore
def compute_and_output_dist(
self, ctx: NullShardedModuleContext, input: ListOfKJTList
) -> KeyedTensor:
return self.output_dist(ctx, self.compute(ctx, input))
# pyre-ignore
def forward(self, *input, **kwargs) -> KeyedTensor:
ctx = self.create_context()
dist_input = self.input_dist(ctx, *input, **kwargs)
return self.compute_and_output_dist(ctx, dist_input)
def copy(self, device: torch.device) -> nn.Module:
if self._has_uninitialized_output_dist:
self._create_output_dist(device)
self._has_uninitialized_output_dist = False
return super().copy(device)
def shardings(self) -> Dict[str, FeatureShardingMixIn]:
# pyre-ignore [7]
return self._sharding_type_to_sharding
def create_context(self) -> NullShardedModuleContext:
if is_torchdynamo_compiling():
# Context creation is not supported by dynamo yet.
# Context is not needed for TW sharding =>
# Unblocking dynamo TW with None.
# pyre-ignore
return None
return NullShardedModuleContext()
def get_tbe_specs_from_sqebc(
sqebc: ShardedQuantEmbeddingBagCollection,
) -> List[
Tuple[str, int, int, str, str]
]: # # tuple of (feature_names, rows, dims, str(SparseType), str(EmbeddingLocation/placement))
tbe_specs = []
for lookup in sqebc._lookups:
for lookup_per_rank in lookup._embedding_lookups_per_rank:
for emb_module in lookup_per_rank._emb_modules:
for spec in emb_module._emb_module.embedding_specs:
tbe_specs.append(
(
spec[0],
spec[1],
spec[2],
str(spec[3]),
str(spec[4]),
)
)
return tbe_specs | null |
9,030 | import abc
import copy
import uuid
from collections import defaultdict
from dataclasses import dataclass
from typing import Any, Dict, Generic, List, Optional, Tuple, TypeVar, Union
import torch
from torch import distributed as dist, nn
from torchrec.distributed.dist_data import (
KJTAllToAllTensorsAwaitable,
SplitsAllToAllAwaitable,
)
from torchrec.distributed.embedding_dim_bucketer import (
EmbDimBucketer,
EmbDimBucketerPolicy,
should_do_dim_bucketing,
)
from torchrec.distributed.embedding_types import (
BaseEmbeddingLookup,
BaseGroupedFeatureProcessor,
EmbeddingComputeKernel,
FeatureShardingMixIn,
GroupedEmbeddingConfig,
KJTList,
ListOfKJTList,
ShardedEmbeddingTable,
)
from torchrec.distributed.types import (
Awaitable,
ParameterSharding,
QuantizedCommCodecs,
ShardMetadata,
)
from torchrec.fx.utils import assert_fx_safe
from torchrec.modules.embedding_configs import EmbeddingTableConfig
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.streamable import Multistreamable
torch.fx.wrap("len")
def _fx_wrap_tensor_to_device_dtype(
t: torch.Tensor, tensor_device_dtype: torch.Tensor
) -> torch.Tensor:
return t.to(device=tensor_device_dtype.device, dtype=tensor_device_dtype.dtype)
def _fx_wrap_batch_size_per_feature(kjt: KeyedJaggedTensor) -> Optional[torch.Tensor]:
return (
torch.tensor(
kjt.stride_per_key(), device=kjt.device(), dtype=kjt.lengths().dtype
)
if kjt.variable_stride_per_key()
else None
)
def _fx_wrap_max_B(kjt: KeyedJaggedTensor) -> int:
return max(kjt.stride_per_key()) if kjt.variable_stride_per_key() else -1
def _fx_wrap_stride(kjt: KeyedJaggedTensor) -> Optional[int]:
return None if kjt.variable_stride_per_key() else kjt.stride()
def _fx_wrap_stride_per_key_per_rank(
kjt: KeyedJaggedTensor, num_buckets: int
) -> Optional[List[List[int]]]:
return (
kjt.stride_per_key_per_rank() * num_buckets
if kjt.variable_stride_per_key()
else None
)
def _fx_wrap_gen_list_n_times(ls: List[str], n: int) -> List[str]:
# Syntax for dynamo (instead of generator kjt.keys() * num_buckets)
ret: List[str] = []
for _ in range(n):
ret.extend(ls)
return ret
# pyre-ignore
def assert_fx_safe(condition: bool, message: str) -> None:
if not is_fx_tracing():
assert condition, message
# pyre-ignore
class KeyedJaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""Represents an (optionally weighted) keyed jagged tensor.
A `KeyedJaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. Keyed on first dimension and jagged on the last
dimension.
Implementation is torch.jit.script-able.
Args:
keys (List[str]): keys to the jagged Tensor.
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if the values have weights. Tensor with the
same shape as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
stride (Optional[int]): number of examples per batch.
stride_per_key_per_rank (Optional[List[List[int]]]): batch size
(number of examples) per key per rank, with the outer list representing the
keys and the inner list representing the values.
Each value in the inner list represents the number of examples in the batch
from the rank of its index in a distributed context.
length_per_key (Optional[List[int]]): start length for each key.
offset_per_key (Optional[List[int]]): start offset for each key and final
offset.
index_per_key (Optional[Dict[str, int]]): index for each key.
jt_dict (Optional[Dict[str, JaggedTensor]]):
inverse_indices (Optional[Tuple[List[str], torch.Tensor]]): inverse indices to
expand deduplicated embedding output for variable stride per key.
Example::
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
dim_0: keyed dimension (ie. `Feature0`, `Feature1`)
dim_1: optional second dimension (ie. batch size)
dim_2: The jagged dimension which has slice lengths between 0-3 in the above example
# We represent this data with following inputs:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
"""
# This is the subset of fields on KJT which are required (all other fields
# can be derived from these fields, and are only cached)
_fields = [
"_values",
"_weights",
"_lengths",
"_offsets",
]
def __init__(
self,
keys: List[str],
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
# Below exposed to ensure torch.script-able
length_per_key: Optional[List[int]] = None,
offset_per_key: Optional[List[int]] = None,
index_per_key: Optional[Dict[str, int]] = None,
jt_dict: Optional[Dict[str, JaggedTensor]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> None:
self._keys: List[str] = keys
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
self._stride_per_key_per_rank: List[List[int]] = []
self._stride_per_key: List[int] = []
self._variable_stride_per_key: bool = False
self._stride: int = -1
if stride_per_key_per_rank is not None:
if stride is not None:
raise ValueError(
"Cannot initialize KJT with both `stride` and `stride_per_key_per_rank`"
)
self._stride_per_key_per_rank = stride_per_key_per_rank
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
self._variable_stride_per_key = True
if not stride_per_key_per_rank:
self._stride = 0
elif all(s == self.stride_per_key()[0] for s in self.stride_per_key()):
self._stride = self.stride_per_key()[0]
else:
if torch.jit.is_tracing():
stride = _maybe_compute_stride_kjt_scripted(
keys, stride, lengths, offsets
)[0]
else:
stride = _maybe_compute_stride_kjt(keys, stride, lengths, offsets)
self._stride = stride
self._stride_per_key_per_rank = [[stride]] * len(self._keys)
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
# lazy fields
self._length_per_key: Optional[List[int]] = length_per_key
self._offset_per_key: Optional[List[int]] = offset_per_key
self._index_per_key: Optional[Dict[str, int]] = index_per_key
self._jt_dict: Optional[Dict[str, JaggedTensor]] = jt_dict
self._inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = (
inverse_indices
)
self._lengths_offset_per_key: List[int] = []
def from_offsets_sync(
keys: List[str],
values: torch.Tensor,
offsets: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
offsets=offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def from_lengths_sync(
keys: List[str],
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def concat(
kjt_list: List["KeyedJaggedTensor"],
) -> "KeyedJaggedTensor":
if len(kjt_list) == 0:
raise ValueError("Can't concat empty KJT list")
is_weighted: bool = kjt_list[0].weights_or_none() is not None
has_length_per_key: bool = True
length_per_key: List[int] = []
keys: List[str] = []
value_list: List[torch.Tensor] = []
weight_list: List[torch.Tensor] = []
length_list: List[torch.Tensor] = []
stride_per_key_per_rank: List[List[int]] = []
stride: Optional[int] = None
variable_stride_per_key_list = [
kjt.variable_stride_per_key() for kjt in kjt_list
]
assert all(variable_stride_per_key_list) or not any(
variable_stride_per_key_list
), "variable stride per key must be consistent for all KJTs"
variable_stride_per_key = all(variable_stride_per_key_list)
for kjt in kjt_list:
curr_is_weighted: bool = kjt.weights_or_none() is not None
if is_weighted != curr_is_weighted:
raise ValueError("Can't merge weighted KJT with unweighted KJT")
_length_per_key: Optional[List[int]] = None
if kjt._length_per_key is None:
has_length_per_key = False
else:
_length_per_key = kjt._length_per_key
if has_length_per_key and _length_per_key is not None:
length_per_key += _length_per_key
keys += kjt.keys()
value_list.append(kjt.values())
if is_weighted:
weight_list.append(kjt.weights())
length_list.append(kjt.lengths())
if variable_stride_per_key:
stride_per_key_per_rank += kjt.stride_per_key_per_rank()
elif stride is None:
stride = kjt.stride()
else:
assert stride == kjt.stride(), "strides must be consistent for all KJTs"
return KeyedJaggedTensor(
keys=keys,
values=torch.cat(value_list, dim=0),
weights=torch.cat(weight_list, dim=0) if is_weighted else None,
lengths=torch.cat(length_list, dim=0),
stride=stride,
stride_per_key_per_rank=(
stride_per_key_per_rank if variable_stride_per_key else None
),
length_per_key=length_per_key if has_length_per_key else None,
)
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "KeyedJaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return KeyedJaggedTensor(
keys=torch.jit.annotate(List[str], []),
values=torch.empty(0, dtype=values_dtype, device=device),
weights=weights,
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
stride=0,
)
def empty_like(kjt: "KeyedJaggedTensor") -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, kjt.stride_per_key_per_rank())
if kjt.variable_stride_per_key()
else (kjt.stride(), None)
)
return KeyedJaggedTensor(
keys=[],
values=torch.empty(0, device=kjt.device(), dtype=kjt.values().dtype),
weights=(
None
if kjt.weights_or_none() is None
else torch.empty(0, device=kjt.device(), dtype=kjt.weights().dtype)
),
lengths=torch.empty(0, device=kjt.device(), dtype=kjt.lengths().dtype),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
)
def from_jt_dict(jt_dict: Dict[str, JaggedTensor]) -> "KeyedJaggedTensor":
"""
Constructs a KeyedJaggedTensor from a Dict[str, JaggedTensor],
but this function will ONLY work if the JaggedTensors all
have the same "implicit" batch_size dimension.
Basically, we can visualize JaggedTensors as 2-D tensors
of the format of [batch_size x variable_feature_dim].
In case, we have some batch without a feature value,
the input JaggedTensor could just not include any values.
But KeyedJaggedTensor (by default) typically pad "None"
so that all the JaggedTensors stored in the KeyedJaggedTensor
have the same batch_size dimension. That is, in the case,
the JaggedTensor input didn't automatically pad
for the empty batches, this function would error / not work.
Consider the visualization of the following KeyedJaggedTensor:
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
Notice that the inputs for this KeyedJaggedTensor would have looked like:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
Now if the input jt_dict = {
# "Feature0" [V0,V1] [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
} and the "None" is left out from each JaggedTensor,
then this function would fail as we would not correctly
be able to pad "None" as it does not technically know
the correct batch / place to pad within the JaggedTensor.
Essentially, the lengths Tensor inferred by this function
would be [2, 1, 1, 1, 3] indicating variable batch_size
dim_1 violates the existing assumption / precondition
that KeyedJaggedTensor's should have fixed batch_size dimension.
"""
kjt_keys = list(jt_dict.keys())
kjt_vals_list: List[torch.Tensor] = []
kjt_lens_list: List[torch.Tensor] = []
kjt_weights_list: List[torch.Tensor] = []
stride_per_key: List[int] = []
for jt in jt_dict.values():
stride_per_key.append(len(jt.lengths()))
kjt_vals_list.append(jt.values())
kjt_lens_list.append(jt.lengths())
weight = jt.weights_or_none()
if weight is not None:
kjt_weights_list.append(weight)
kjt_vals = torch.concat(kjt_vals_list)
kjt_lens = torch.concat(kjt_lens_list)
kjt_weights = (
torch.concat(kjt_weights_list) if len(kjt_weights_list) > 0 else None
)
kjt_stride, kjt_stride_per_key_per_rank = (
(stride_per_key[0], None)
if all(s == stride_per_key[0] for s in stride_per_key)
else (None, [[stride] for stride in stride_per_key])
)
kjt = KeyedJaggedTensor(
keys=kjt_keys,
values=kjt_vals,
weights=kjt_weights,
lengths=kjt_lens,
stride=kjt_stride,
stride_per_key_per_rank=kjt_stride_per_key_per_rank,
).sync()
return kjt
def sync(self) -> "KeyedJaggedTensor":
self.length_per_key()
self.offset_per_key()
return self
def unsync(self) -> "KeyedJaggedTensor":
self._length_per_key = None
self._offset_per_key = None
return self
def device(self) -> torch.device:
return self._values.device
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def keys(self) -> List[str]:
return self._keys
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def stride(self) -> int:
return self._stride
def stride_per_key(self) -> List[int]:
return self._stride_per_key
def stride_per_key_per_rank(self) -> List[List[int]]:
return self._stride_per_key_per_rank
def variable_stride_per_key(self) -> bool:
return self._variable_stride_per_key
def inverse_indices(self) -> Tuple[List[str], torch.Tensor]:
return _get_inverse_indices_or_throw(self._inverse_indices)
def inverse_indices_or_none(self) -> Optional[Tuple[List[str], torch.Tensor]]:
return self._inverse_indices
def _key_indices(self) -> Dict[str, int]:
_index_per_key: Dict[str, int] = _maybe_compute_index_per_key(
self._keys,
self._index_per_key,
)
self._index_per_key = _index_per_key
return _index_per_key
def length_per_key(self) -> List[int]:
_length_per_key = _maybe_compute_length_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
return _length_per_key
def length_per_key_or_none(self) -> Optional[List[int]]:
return self._length_per_key
def offset_per_key(self) -> List[int]:
_length_per_key, _offset_per_key = _maybe_compute_offset_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
self._offset_per_key = _offset_per_key
return _offset_per_key
def offset_per_key_or_none(self) -> Optional[List[int]]:
return self._offset_per_key
def lengths_offset_per_key(self) -> List[int]:
if not self._lengths_offset_per_key:
self._lengths_offset_per_key = _cumsum(self.stride_per_key())
return self._lengths_offset_per_key
def split(self, segments: List[int]) -> List["KeyedJaggedTensor"]:
split_list: List[KeyedJaggedTensor] = []
start = 0
start_offset = 0
_length_per_key = self.length_per_key()
_offset_per_key = self.offset_per_key()
for segment in segments:
end = start + segment
end_offset = _offset_per_key[end]
keys: List[str] = self._keys[start:end]
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank()[start:end])
if self.variable_stride_per_key()
else (self._stride, None)
)
if segment == len(self._keys):
# no torch slicing required
split_list.append(
KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self.weights_or_none(),
lengths=self._lengths,
offsets=self._offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=self._jt_dict,
inverse_indices=None,
)
)
elif segment == 0:
empty_int_list: List[int] = torch.jit.annotate(List[int], [])
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=torch.tensor(
empty_int_list,
device=self.device(),
dtype=self._values.dtype,
),
weights=(
None
if self.weights_or_none() is None
else torch.tensor(
empty_int_list,
device=self.device(),
dtype=self.weights().dtype,
)
),
lengths=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
offsets=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
else:
split_length_per_key = _length_per_key[start:end]
if not torch.jit.is_scripting() and is_torchdynamo_compiling():
# Checks for dynamo dynamic shapes tracing
torch._check_is_size(start_offset)
torch._check_is_size(end_offset)
torch._check_is_size(end_offset - start_offset)
torch._check(start_offset <= self._values.size(0))
torch._check(end_offset <= self._values.size(0))
torch._check(end_offset >= start_offset)
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[
start
] : self.lengths_offset_per_key()[end]
],
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=split_length_per_key,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
start = end
start_offset = end_offset
return split_list
def permute(
self,
indices: List[int],
indices_tensor: Optional[torch.Tensor] = None,
include_inverse_indices: bool = False,
) -> "KeyedJaggedTensor":
if indices_tensor is None:
indices_tensor = torch.tensor(
indices, dtype=torch.int, device=self.device()
)
length_per_key = self.length_per_key()
permuted_keys: List[str] = []
permuted_stride_per_key_per_rank: List[List[int]] = []
permuted_length_per_key: List[int] = []
permuted_lengths_sum = 0
for index in indices:
key = self.keys()[index]
permuted_keys.append(key)
permuted_stride_per_key_per_rank.append(
self.stride_per_key_per_rank()[index]
)
permuted_length_per_key.append(length_per_key[index])
permuted_lengths_sum += length_per_key[index]
if self.variable_stride_per_key():
length_per_key_tensor = _pin_and_move(
torch.tensor(self.length_per_key()), self.device()
)
stride_per_key_tensor = _pin_and_move(
torch.tensor(self.stride_per_key()), self.device()
)
permuted_lengths, _ = _permute_tensor_by_segments(
self.lengths(),
stride_per_key_tensor,
indices_tensor,
None,
)
permuted_values, permuted_weights = _permute_tensor_by_segments(
self.values(),
length_per_key_tensor,
indices_tensor,
self.weights_or_none(),
)
else:
(
permuted_lengths,
permuted_values,
permuted_weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
indices_tensor,
self.lengths().view(len(self._keys), -1),
self.values(),
self.weights_or_none(),
permuted_lengths_sum,
)
stride, optional_permuted_stride_per_key_per_rank = (
(None, permuted_stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
kjt = KeyedJaggedTensor(
keys=permuted_keys,
values=permuted_values,
weights=permuted_weights,
lengths=permuted_lengths.view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=optional_permuted_stride_per_key_per_rank,
length_per_key=permuted_length_per_key if len(permuted_keys) > 0 else None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=(
self.inverse_indices_or_none() if include_inverse_indices else None
),
)
return kjt
def flatten_lengths(self) -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank())
if self.variable_stride_per_key()
else (self._stride, None)
)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self._weights,
lengths=self.lengths().view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self.length_per_key(),
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
def __getitem__(self, key: str) -> JaggedTensor:
offset_per_key = self.offset_per_key()
index = self._key_indices()[key]
start_offset = offset_per_key[index]
end_offset = (
offset_per_key[index + 1]
if index + 1 < len(offset_per_key)
else start_offset
)
return JaggedTensor(
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[index] : self.lengths_offset_per_key()[
index + 1
]
],
offsets=None,
)
def to_dict(self) -> Dict[str, JaggedTensor]:
_jt_dict = _maybe_compute_kjt_to_jt_dict(
stride=self.stride(),
stride_per_key=self.stride_per_key(),
keys=self.keys(),
length_per_key=self.length_per_key(),
lengths=self.lengths(),
values=self.values(),
variable_stride_per_key=self.variable_stride_per_key(),
weights=self.weights_or_none(),
jt_dict=self._jt_dict,
)
self._jt_dict = _jt_dict
return _jt_dict
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def to(
self,
device: torch.device,
non_blocking: bool = False,
dtype: Optional[torch.dtype] = None,
) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
length_per_key = self._length_per_key
offset_per_key = self._offset_per_key
index_per_key = self._index_per_key
jt_dict = self._jt_dict
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (
inverse_indices[0],
inverse_indices[1].to(device, non_blocking=non_blocking),
)
if weights is not None:
if dtype is not None:
weights = weights.to(
dtype=dtype, device=device, non_blocking=non_blocking
)
else:
weights = weights.to(device=device, non_blocking=non_blocking)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.to(device, non_blocking=non_blocking),
weights=weights,
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=length_per_key,
offset_per_key=offset_per_key,
index_per_key=index_per_key,
jt_dict=jt_dict,
inverse_indices=inverse_indices,
)
def __str__(self) -> str:
if len(self._keys) == 0 or self._offsets is None and self._lengths is None:
return "KeyedJaggedTensor()\n"
offsets = self.offsets()
return (
"KeyedJaggedTensor({\n"
+ ",\n".join(
[
" "
+ _jagged_tensor_string(
self._keys[index],
self._values,
self._weights,
offsets,
sum(self.stride_per_key()[:index]),
sum(self.stride_per_key()[: index + 1]),
)
for index in range(len(self._keys))
]
)
+ "\n})\n"
)
def pin_memory(self) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (inverse_indices[0], inverse_indices[1].pin_memory())
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.pin_memory(),
weights=weights.pin_memory() if weights is not None else None,
lengths=lengths.pin_memory() if lengths is not None else None,
offsets=offsets.pin_memory() if offsets is not None else None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=None,
inverse_indices=inverse_indices,
)
def dist_labels(self) -> List[str]:
labels = ["lengths", "values"]
if self.variable_stride_per_key():
labels.append("strides")
if self.weights_or_none() is not None:
labels.append("weights")
return labels
def dist_splits(self, key_splits: List[int]) -> List[List[int]]:
batch_size_per_split = _sum_by_splits(self.stride_per_key(), key_splits)
length_per_split = _sum_by_splits(self.length_per_key(), key_splits)
splits = [batch_size_per_split, length_per_split]
if self.variable_stride_per_key():
splits.append(key_splits)
if self.weights_or_none() is not None:
splits.append(length_per_split)
return splits
def dist_tensors(self) -> List[torch.Tensor]:
tensors = [self.lengths(), self.values()]
if self.variable_stride_per_key():
strides = _pin_and_move(torch.tensor(self.stride_per_key()), self.device())
tensors.append(strides)
if self.weights_or_none() is not None:
tensors.append(self.weights())
return tensors
def dist_init(
keys: List[str],
tensors: List[torch.Tensor],
variable_stride_per_key: bool,
num_workers: int,
recat: Optional[torch.Tensor],
stride_per_rank: Optional[List[int]],
stagger: int = 1,
) -> "KeyedJaggedTensor":
assert len(tensors) in [2, 3, 4]
lengths = tensors[0]
values = tensors[1]
stride_per_rank_per_key = tensors[2] if variable_stride_per_key else None
weights = (
tensors[-1]
if (variable_stride_per_key and len(tensors) == 4)
or (not variable_stride_per_key and len(tensors) == 3)
else None
)
if variable_stride_per_key:
assert stride_per_rank_per_key is not None
stride_per_key_per_rank: List[List[int]] = stride_per_rank_per_key.view(
num_workers, len(keys)
).T.tolist()
strides_cumsum: List[int] = torch.ops.fbgemm.asynchronous_complete_cumsum(
stride_per_rank_per_key
).tolist()
cumsum_lengths = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
length_per_key = (
cumsum_lengths[strides_cumsum[1:]] - cumsum_lengths[strides_cumsum[:-1]]
)
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
lengths, _ = _permute_tensor_by_segments(
lengths,
stride_per_rank_per_key,
recat,
None,
)
values, weights = _permute_tensor_by_segments(
values,
length_per_key,
recat,
weights,
)
if not stride_per_key_per_rank:
stride_per_key_per_rank = [[0]] * len(keys)
if stagger > 1:
stride_per_key_per_rank_stagger: List[List[int]] = []
local_world_size = num_workers // stagger
for i in range(len(keys)):
stride_per_rank_stagger: List[int] = []
for j in range(local_world_size):
stride_per_rank_stagger.extend(
stride_per_key_per_rank[i][j::local_world_size]
)
stride_per_key_per_rank_stagger.append(stride_per_rank_stagger)
stride_per_key_per_rank = stride_per_key_per_rank_stagger
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride_per_key_per_rank=stride_per_key_per_rank,
)
return kjt.sync()
else:
assert stride_per_rank is not None
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
stride = stride_per_rank[0]
if all(s == stride for s in stride_per_rank):
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
recat,
lengths.view(-1, stride),
values,
weights,
values.numel(),
)
lengths = lengths.view(-1)
else: # variable batch size per rank
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_1D_sparse_data(
recat,
lengths.view(-1),
values,
weights,
values.numel(),
)
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=sum(stride_per_rank),
)
return kjt.sync()
The provided code snippet includes necessary dependencies for implementing the `bucketize_kjt_before_all2all` function. Write a Python function `def bucketize_kjt_before_all2all( kjt: KeyedJaggedTensor, num_buckets: int, block_sizes: torch.Tensor, output_permute: bool = False, bucketize_pos: bool = False, block_bucketize_row_pos: Optional[List[torch.Tensor]] = None, ) -> Tuple[KeyedJaggedTensor, Optional[torch.Tensor]]` to solve the following problem:
Bucketizes the `values` in KeyedJaggedTensor into `num_buckets` buckets, `lengths` are readjusted based on the bucketization results. Note: This function should be used only for row-wise sharding before calling `KJTAllToAll`. Args: num_buckets (int): number of buckets to bucketize the values into. block_sizes: (torch.Tensor): bucket sizes for the keyed dimension. output_permute (bool): output the memory location mapping from the unbucketized values to bucketized values or not. bucketize_pos (bool): output the changed position of the bucketized values or not. block_bucketize_row_pos (Optional[List[torch.Tensor]]): The offsets of shard size for each feature. Returns: Tuple[KeyedJaggedTensor, Optional[torch.Tensor]]: the bucketized `KeyedJaggedTensor` and the optional permute mapping from the unbucketized values to bucketized value.
Here is the function:
def bucketize_kjt_before_all2all(
kjt: KeyedJaggedTensor,
num_buckets: int,
block_sizes: torch.Tensor,
output_permute: bool = False,
bucketize_pos: bool = False,
block_bucketize_row_pos: Optional[List[torch.Tensor]] = None,
) -> Tuple[KeyedJaggedTensor, Optional[torch.Tensor]]:
"""
Bucketizes the `values` in KeyedJaggedTensor into `num_buckets` buckets,
`lengths` are readjusted based on the bucketization results.
Note: This function should be used only for row-wise sharding before calling
`KJTAllToAll`.
Args:
num_buckets (int): number of buckets to bucketize the values into.
block_sizes: (torch.Tensor): bucket sizes for the keyed dimension.
output_permute (bool): output the memory location mapping from the unbucketized
values to bucketized values or not.
bucketize_pos (bool): output the changed position of the bucketized values or
not.
block_bucketize_row_pos (Optional[List[torch.Tensor]]): The offsets of shard size for each feature.
Returns:
Tuple[KeyedJaggedTensor, Optional[torch.Tensor]]: the bucketized `KeyedJaggedTensor` and the optional permute mapping from the unbucketized values to bucketized value.
"""
num_features = len(kjt.keys())
assert_fx_safe(
block_sizes.numel() == num_features,
f"Expecting block sizes for {num_features} features, but {block_sizes.numel()} received.",
)
block_sizes_new_type = _fx_wrap_tensor_to_device_dtype(block_sizes, kjt.values())
(
bucketized_lengths,
bucketized_indices,
bucketized_weights,
pos,
unbucketize_permute,
) = torch.ops.fbgemm.block_bucketize_sparse_features(
kjt.lengths().view(-1),
kjt.values(),
bucketize_pos=bucketize_pos,
sequence=output_permute,
block_sizes=block_sizes_new_type,
my_size=num_buckets,
weights=kjt.weights_or_none(),
batch_size_per_feature=_fx_wrap_batch_size_per_feature(kjt),
max_B=_fx_wrap_max_B(kjt),
block_bucketize_pos=block_bucketize_row_pos, # each tensor should have the same dtype as kjt.lengths()
)
return (
KeyedJaggedTensor(
# duplicate keys will be resolved by AllToAll
keys=_fx_wrap_gen_list_n_times(kjt.keys(), num_buckets),
values=bucketized_indices,
weights=pos if bucketize_pos else bucketized_weights,
lengths=bucketized_lengths.view(-1),
offsets=None,
stride=_fx_wrap_stride(kjt),
stride_per_key_per_rank=_fx_wrap_stride_per_key_per_rank(kjt, num_buckets),
length_per_key=None,
offset_per_key=None,
index_per_key=None,
),
unbucketize_permute,
) | Bucketizes the `values` in KeyedJaggedTensor into `num_buckets` buckets, `lengths` are readjusted based on the bucketization results. Note: This function should be used only for row-wise sharding before calling `KJTAllToAll`. Args: num_buckets (int): number of buckets to bucketize the values into. block_sizes: (torch.Tensor): bucket sizes for the keyed dimension. output_permute (bool): output the memory location mapping from the unbucketized values to bucketized values or not. bucketize_pos (bool): output the changed position of the bucketized values or not. block_bucketize_row_pos (Optional[List[torch.Tensor]]): The offsets of shard size for each feature. Returns: Tuple[KeyedJaggedTensor, Optional[torch.Tensor]]: the bucketized `KeyedJaggedTensor` and the optional permute mapping from the unbucketized values to bucketized value. |
9,031 | import abc
import copy
import uuid
from collections import defaultdict
from dataclasses import dataclass
from typing import Any, Dict, Generic, List, Optional, Tuple, TypeVar, Union
import torch
from torch import distributed as dist, nn
from torchrec.distributed.dist_data import (
KJTAllToAllTensorsAwaitable,
SplitsAllToAllAwaitable,
)
from torchrec.distributed.embedding_dim_bucketer import (
EmbDimBucketer,
EmbDimBucketerPolicy,
should_do_dim_bucketing,
)
from torchrec.distributed.embedding_types import (
BaseEmbeddingLookup,
BaseGroupedFeatureProcessor,
EmbeddingComputeKernel,
FeatureShardingMixIn,
GroupedEmbeddingConfig,
KJTList,
ListOfKJTList,
ShardedEmbeddingTable,
)
from torchrec.distributed.types import (
Awaitable,
ParameterSharding,
QuantizedCommCodecs,
ShardMetadata,
)
from torchrec.fx.utils import assert_fx_safe
from torchrec.modules.embedding_configs import EmbeddingTableConfig
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.streamable import Multistreamable
CACHE_LOAD_FACTOR_STR: str = "cache_load_factor"
USE_ONE_TBE_PER_TABLE: str = "use_one_tbe_per_table"
def _get_weighted_avg_cache_load_factor(
embedding_tables: List[ShardedEmbeddingTable],
) -> Optional[float]:
"""
Calculate the weighted average cache load factor of all tables. The cache
load factors are weighted by the hash size of each table.
"""
cache_load_factor_sum: float = 0.0
weight: int = 0
for table in embedding_tables:
if (
table.compute_kernel == EmbeddingComputeKernel.FUSED_UVM_CACHING
and table.fused_params
and CACHE_LOAD_FACTOR_STR in table.fused_params
):
cache_load_factor_sum += (
table.fused_params[CACHE_LOAD_FACTOR_STR] * table.num_embeddings
)
weight += table.num_embeddings
# if no fused_uvm_caching tables, return default cache load factor
if weight == 0:
return None
return cache_load_factor_sum / weight
def _get_grouping_fused_params(
fused_params: Optional[Dict[str, Any]]
) -> Optional[Dict[str, Any]]:
"""
Only shallow copy the fused params we need for grouping tables into TBEs. In
particular, we do not copy cache_load_factor.
"""
grouping_fused_params: Optional[Dict[str, Any]] = copy.copy(fused_params)
if not grouping_fused_params:
return grouping_fused_params
if CACHE_LOAD_FACTOR_STR in grouping_fused_params:
del grouping_fused_params[CACHE_LOAD_FACTOR_STR]
if grouping_fused_params.get(USE_ONE_TBE_PER_TABLE, False):
# Replace with unique value to force it into singleton group.
grouping_fused_params[USE_ONE_TBE_PER_TABLE] = str(uuid.uuid4())
return grouping_fused_params
def _get_compute_kernel_type(
compute_kernel: EmbeddingComputeKernel,
) -> EmbeddingComputeKernel:
"""
Return the compute kernel type for the given compute kernel.
"""
compute_kernel_type = compute_kernel
if compute_kernel_type in [
EmbeddingComputeKernel.FUSED_UVM,
EmbeddingComputeKernel.FUSED_UVM_CACHING,
]:
compute_kernel_type = EmbeddingComputeKernel.FUSED
elif compute_kernel_type in [
EmbeddingComputeKernel.QUANT_UVM,
EmbeddingComputeKernel.QUANT_UVM_CACHING,
]:
compute_kernel_type = EmbeddingComputeKernel.QUANT
return compute_kernel_type
class EmbDimBucketerPolicy(Enum):
"""
Config to specify how to bucketize embedding tables based on dimensions.
single_bucket: All embedding tables are put into a single bucket.
all_buckets: All the embedding tables with the same dim size are put in the same bucket.
cacheline_buckets: All the embedding tables with the same dim cacheline size are put in the same bucket.
"""
SINGLE_BUCKET = "single_bucket"
ALL_BUCKETS = "all_buckets"
CACHELINE_BUCKETS = "cacheline_buckets"
class EmbDimBucketer:
"""
Buckets embedding dimensions into different groups based on their sizes. This is intended to be leveraged
once planning is done, and at the sharding stage, per rank.
The rationale to use bucketization is
- When UVM_CACHING is used: FBGEMM Table Batched Embedding Operator supports a software managed cache for the embeddings placed on UVM (Host memory).
However, the cache uses maximum embedding dim of all the tables batched in the operator as its unit of allocation. This results in waisted HBM memory,
and higher miss rate, hence lower performance. Bucketizing can address this issue, allowing for higher effective cache size and better performace.
- When all tables are placed on HBM: When tables with widely different embedding dimension are batched together, the register allocation in GPU will
be mainly decided by the table with largest embedding dimension. This can lead to worse performance due to lower number of threads and lower occupancy.
Note that Column wise sharding also to some extent addresses this problem, but has its own limitations.
Generally, we expect the CACHELINE_BUCKETS policy perform better than ALL_BUCKETS, as it addresses the main issues and limits the number of buckets.
Args:
embedding_tables (List[ShardedEmbeddingTable]): list of sharded embedding
cfg (EmbDimBucketerPolicy): Bucketing policy
returns:
emb_dim_buckets (Dict[int, int]): Mapping from embedding dim to bucket id
Example:
emb_dim_bucketer = EmbDimBucketer(embedding_tables, EmbDimBucketerPolicy.SINGLE_BUCKET)
...
bucket = emb_dim_bucketer.get_bucket(embedding_tables[0], embedding_tables[0].data_type) # bucket table 0 is assigned to.
"""
def __init__(
self, embedding_tables: List[ShardedEmbeddingTable], cfg: EmbDimBucketerPolicy
) -> None:
self.embedding_dim_buckets: Dict[int, int] = {}
self.num_buckets = 1
self.cacheline = 128
if cfg == EmbDimBucketerPolicy.CACHELINE_BUCKETS:
self.emb_dim_buckets: Dict[int, int] = self.cacheline_emb_buckets(
embedding_tables
)
elif cfg == EmbDimBucketerPolicy.ALL_BUCKETS:
self.emb_dim_buckets: Dict[int, int] = self.all_emb_buckets(
embedding_tables
)
elif cfg == EmbDimBucketerPolicy.SINGLE_BUCKET:
self.emb_dim_buckets: Dict[int, int] = self.single_emb_bucket(
embedding_tables
)
else:
AssertionError(f"Invalid bucketization config {cfg}")
def bucket_count(self) -> int:
return self.num_buckets
def get_bucket(self, embedding_dim: int, dtype: DataType) -> int:
if self.num_buckets == 1:
return 0
else:
return self.bucket(embedding_dim, dtype)
def single_emb_bucket(
self,
embedding_tables: List[ShardedEmbeddingTable],
) -> Dict[int, int]:
buckets: Dict[int, int] = {}
bucket_id = 0
for table in embedding_tables:
dim_in_bytes = self.dim_in_bytes(table.embedding_dim, table.data_type)
buckets[dim_in_bytes] = bucket_id
self.num_buckets = 1
return buckets
def all_emb_buckets(
self,
embedding_tables: List[ShardedEmbeddingTable],
) -> Dict[int, int]:
buckets: Dict[int, int] = {}
bucket_id = -1
for table in embedding_tables:
dim_in_bytes = self.dim_in_bytes(table.embedding_dim, table.data_type)
if dim_in_bytes not in buckets.keys():
bucket_id += 1
buckets[dim_in_bytes] = bucket_id
self.num_buckets = bucket_id + 1 # id starts from 0
return buckets
def cacheline_emb_buckets(
self,
embedding_tables: List[ShardedEmbeddingTable],
) -> Dict[int, int]:
buckets: Dict[int, int] = {}
cl_buckets: Dict[int, int] = {}
bucket_id = -1
for table in embedding_tables:
dim_in_bytes = self.dim_in_bytes(table.embedding_dim, table.data_type)
cl_dim = dim_in_bytes // self.cacheline
if cl_dim not in cl_buckets.keys():
bucket_id += 1
cl_buckets[cl_dim] = bucket_id
if dim_in_bytes not in buckets.keys():
buckets[dim_in_bytes] = cl_buckets[cl_dim]
self.num_buckets = bucket_id + 1 # id starts from 0
return buckets
def bucket(self, dim: int, dtype: DataType) -> int:
return self.emb_dim_buckets[self.dim_in_bytes(dim, dtype)]
def dim_in_bytes(self, dim: int, dtype: DataType) -> int:
return dim * DATA_TYPE_NUM_BITS[dtype] // 8
def should_do_dim_bucketing(
embedding_tables: List[ShardedEmbeddingTable],
) -> bool:
"""
When embedding memory offloading with caching is enabled, we prefer to
do dim bucketing for better utilization of cache space. Only applied to
"prefetch-sparse-dist" training pipeline.
Currently using the compute kernel to deduct caching is enabled.
"""
table_pipeline_count = 0
for table in embedding_tables:
if (
table.fused_params is not None
and "prefetch_pipeline" in table.fused_params
and table.fused_params["prefetch_pipeline"]
):
table_pipeline_count += 1
if table_pipeline_count > 0 and table_pipeline_count != len(embedding_tables):
AssertionError(
f"Only {table_pipeline_count} tables have prefetch-sparse-dist pipeline. It should be all {len(embedding_tables)} tables."
)
for table in embedding_tables:
if (
table.compute_kernel == EmbeddingComputeKernel.FUSED_UVM_CACHING
and table_pipeline_count
):
return True
return False
class ShardedEmbeddingTable(
ShardedMetaConfig,
EmbeddingAttributes,
EmbeddingTableConfig,
):
fused_params: Optional[Dict[str, Any]] = None
class GroupedEmbeddingConfig:
data_type: DataType
pooling: PoolingType
is_weighted: bool
has_feature_processor: bool
compute_kernel: EmbeddingComputeKernel
embedding_tables: List[ShardedEmbeddingTable]
fused_params: Optional[Dict[str, Any]] = None
def feature_hash_sizes(self) -> List[int]:
feature_hash_sizes = []
for table in self.embedding_tables:
feature_hash_sizes.extend(table.num_features() * [table.num_embeddings])
return feature_hash_sizes
def num_features(self) -> int:
num_features = 0
for table in self.embedding_tables:
num_features += table.num_features()
return num_features
def dim_sum(self) -> int:
dim_sum = 0
for table in self.embedding_tables:
dim_sum += table.num_features() * table.local_cols
return dim_sum
def table_names(self) -> List[str]:
table_names = []
for table in self.embedding_tables:
table_names.append(table.name)
return table_names
def feature_names(self) -> List[str]:
feature_names = []
for table in self.embedding_tables:
feature_names.extend(table.feature_names)
return feature_names
def embedding_dims(self) -> List[int]:
embedding_dims = []
for table in self.embedding_tables:
embedding_dims.extend([table.local_cols] * table.num_features())
return embedding_dims
def embedding_names(self) -> List[str]:
embedding_names = []
for table in self.embedding_tables:
embedding_names.extend(table.embedding_names)
return embedding_names
def embedding_shard_metadata(self) -> List[Optional[ShardMetadata]]:
embedding_shard_metadata: List[Optional[ShardMetadata]] = []
for table in self.embedding_tables:
for _ in table.feature_names:
embedding_shard_metadata.append(table.local_metadata)
return embedding_shard_metadata
The provided code snippet includes necessary dependencies for implementing the `group_tables` function. Write a Python function `def group_tables( tables_per_rank: List[List[ShardedEmbeddingTable]], ) -> List[List[GroupedEmbeddingConfig]]` to solve the following problem:
Groups tables by `DataType`, `PoolingType`, and `EmbeddingComputeKernel`. Args: tables_per_rank (List[List[ShardedEmbeddingTable]]): list of sharded embedding tables per rank with consistent weightedness. Returns: List[List[GroupedEmbeddingConfig]]: per rank list of GroupedEmbeddingConfig for features.
Here is the function:
def group_tables(
tables_per_rank: List[List[ShardedEmbeddingTable]],
) -> List[List[GroupedEmbeddingConfig]]:
"""
Groups tables by `DataType`, `PoolingType`, and `EmbeddingComputeKernel`.
Args:
tables_per_rank (List[List[ShardedEmbeddingTable]]): list of sharded embedding
tables per rank with consistent weightedness.
Returns:
List[List[GroupedEmbeddingConfig]]: per rank list of GroupedEmbeddingConfig for features.
"""
def _group_tables_per_rank(
embedding_tables: List[ShardedEmbeddingTable],
) -> List[GroupedEmbeddingConfig]:
grouped_embedding_configs: List[GroupedEmbeddingConfig] = []
emb_dim_bucketer_policy = (
EmbDimBucketerPolicy.ALL_BUCKETS
if should_do_dim_bucketing(embedding_tables)
else EmbDimBucketerPolicy.SINGLE_BUCKET
)
emb_dim_bucketer = EmbDimBucketer(embedding_tables, emb_dim_bucketer_policy)
# all embedding tables have the same weight status
is_weighted = (
embedding_tables[0].is_weighted if len(embedding_tables) > 0 else False
)
# Collect groups
groups = defaultdict(list)
grouping_keys = []
for table in embedding_tables:
group_fused_params = _get_grouping_fused_params(table.fused_params) or {}
grouping_key = (
table.data_type,
table.pooling,
table.has_feature_processor,
tuple(sorted(group_fused_params.items())),
_get_compute_kernel_type(table.compute_kernel),
emb_dim_bucketer.get_bucket(table.embedding_dim, table.data_type),
)
# micromanage the order of we traverse the groups to ensure backwards compatibility
if grouping_key not in groups:
grouping_keys.append(grouping_key)
groups[grouping_key].append(table)
for grouping_key in grouping_keys:
(
data_type,
pooling,
has_feature_processor,
fused_params_tuple,
compute_kernel_type,
_,
) = grouping_key
grouped_tables = groups[grouping_key]
# remove non-native fused params
per_tbe_fused_params = {
k: v
for k, v in fused_params_tuple
if k not in ["_batch_key", USE_ONE_TBE_PER_TABLE]
}
cache_load_factor = _get_weighted_avg_cache_load_factor(grouped_tables)
if cache_load_factor is not None:
per_tbe_fused_params[CACHE_LOAD_FACTOR_STR] = cache_load_factor
grouped_embedding_configs.append(
GroupedEmbeddingConfig(
data_type=data_type,
pooling=pooling,
is_weighted=is_weighted,
has_feature_processor=has_feature_processor,
compute_kernel=compute_kernel_type,
embedding_tables=grouped_tables,
fused_params=per_tbe_fused_params,
)
)
return grouped_embedding_configs
table_weightedness = [
table.is_weighted for tables in tables_per_rank for table in tables
]
assert all(table_weightedness) or not any(table_weightedness)
grouped_embedding_configs_by_rank: List[List[GroupedEmbeddingConfig]] = []
for tables in tables_per_rank:
grouped_embedding_configs = _group_tables_per_rank(tables)
grouped_embedding_configs_by_rank.append(grouped_embedding_configs)
return grouped_embedding_configs_by_rank | Groups tables by `DataType`, `PoolingType`, and `EmbeddingComputeKernel`. Args: tables_per_rank (List[List[ShardedEmbeddingTable]]): list of sharded embedding tables per rank with consistent weightedness. Returns: List[List[GroupedEmbeddingConfig]]: per rank list of GroupedEmbeddingConfig for features. |
9,032 | import abc
import copy
import uuid
from collections import defaultdict
from dataclasses import dataclass
from typing import Any, Dict, Generic, List, Optional, Tuple, TypeVar, Union
import torch
from torch import distributed as dist, nn
from torchrec.distributed.dist_data import (
KJTAllToAllTensorsAwaitable,
SplitsAllToAllAwaitable,
)
from torchrec.distributed.embedding_dim_bucketer import (
EmbDimBucketer,
EmbDimBucketerPolicy,
should_do_dim_bucketing,
)
from torchrec.distributed.embedding_types import (
BaseEmbeddingLookup,
BaseGroupedFeatureProcessor,
EmbeddingComputeKernel,
FeatureShardingMixIn,
GroupedEmbeddingConfig,
KJTList,
ListOfKJTList,
ShardedEmbeddingTable,
)
from torchrec.distributed.types import (
Awaitable,
ParameterSharding,
QuantizedCommCodecs,
ShardMetadata,
)
from torchrec.fx.utils import assert_fx_safe
from torchrec.modules.embedding_configs import EmbeddingTableConfig
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.streamable import Multistreamable
C = TypeVar("C", bound=Multistreamable)
# pyre-ignore
class KeyedJaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""Represents an (optionally weighted) keyed jagged tensor.
A `KeyedJaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. Keyed on first dimension and jagged on the last
dimension.
Implementation is torch.jit.script-able.
Args:
keys (List[str]): keys to the jagged Tensor.
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if the values have weights. Tensor with the
same shape as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
stride (Optional[int]): number of examples per batch.
stride_per_key_per_rank (Optional[List[List[int]]]): batch size
(number of examples) per key per rank, with the outer list representing the
keys and the inner list representing the values.
Each value in the inner list represents the number of examples in the batch
from the rank of its index in a distributed context.
length_per_key (Optional[List[int]]): start length for each key.
offset_per_key (Optional[List[int]]): start offset for each key and final
offset.
index_per_key (Optional[Dict[str, int]]): index for each key.
jt_dict (Optional[Dict[str, JaggedTensor]]):
inverse_indices (Optional[Tuple[List[str], torch.Tensor]]): inverse indices to
expand deduplicated embedding output for variable stride per key.
Example::
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
dim_0: keyed dimension (ie. `Feature0`, `Feature1`)
dim_1: optional second dimension (ie. batch size)
dim_2: The jagged dimension which has slice lengths between 0-3 in the above example
# We represent this data with following inputs:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
"""
# This is the subset of fields on KJT which are required (all other fields
# can be derived from these fields, and are only cached)
_fields = [
"_values",
"_weights",
"_lengths",
"_offsets",
]
def __init__(
self,
keys: List[str],
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
# Below exposed to ensure torch.script-able
length_per_key: Optional[List[int]] = None,
offset_per_key: Optional[List[int]] = None,
index_per_key: Optional[Dict[str, int]] = None,
jt_dict: Optional[Dict[str, JaggedTensor]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> None:
self._keys: List[str] = keys
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
self._stride_per_key_per_rank: List[List[int]] = []
self._stride_per_key: List[int] = []
self._variable_stride_per_key: bool = False
self._stride: int = -1
if stride_per_key_per_rank is not None:
if stride is not None:
raise ValueError(
"Cannot initialize KJT with both `stride` and `stride_per_key_per_rank`"
)
self._stride_per_key_per_rank = stride_per_key_per_rank
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
self._variable_stride_per_key = True
if not stride_per_key_per_rank:
self._stride = 0
elif all(s == self.stride_per_key()[0] for s in self.stride_per_key()):
self._stride = self.stride_per_key()[0]
else:
if torch.jit.is_tracing():
stride = _maybe_compute_stride_kjt_scripted(
keys, stride, lengths, offsets
)[0]
else:
stride = _maybe_compute_stride_kjt(keys, stride, lengths, offsets)
self._stride = stride
self._stride_per_key_per_rank = [[stride]] * len(self._keys)
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
# lazy fields
self._length_per_key: Optional[List[int]] = length_per_key
self._offset_per_key: Optional[List[int]] = offset_per_key
self._index_per_key: Optional[Dict[str, int]] = index_per_key
self._jt_dict: Optional[Dict[str, JaggedTensor]] = jt_dict
self._inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = (
inverse_indices
)
self._lengths_offset_per_key: List[int] = []
def from_offsets_sync(
keys: List[str],
values: torch.Tensor,
offsets: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
offsets=offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def from_lengths_sync(
keys: List[str],
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def concat(
kjt_list: List["KeyedJaggedTensor"],
) -> "KeyedJaggedTensor":
if len(kjt_list) == 0:
raise ValueError("Can't concat empty KJT list")
is_weighted: bool = kjt_list[0].weights_or_none() is not None
has_length_per_key: bool = True
length_per_key: List[int] = []
keys: List[str] = []
value_list: List[torch.Tensor] = []
weight_list: List[torch.Tensor] = []
length_list: List[torch.Tensor] = []
stride_per_key_per_rank: List[List[int]] = []
stride: Optional[int] = None
variable_stride_per_key_list = [
kjt.variable_stride_per_key() for kjt in kjt_list
]
assert all(variable_stride_per_key_list) or not any(
variable_stride_per_key_list
), "variable stride per key must be consistent for all KJTs"
variable_stride_per_key = all(variable_stride_per_key_list)
for kjt in kjt_list:
curr_is_weighted: bool = kjt.weights_or_none() is not None
if is_weighted != curr_is_weighted:
raise ValueError("Can't merge weighted KJT with unweighted KJT")
_length_per_key: Optional[List[int]] = None
if kjt._length_per_key is None:
has_length_per_key = False
else:
_length_per_key = kjt._length_per_key
if has_length_per_key and _length_per_key is not None:
length_per_key += _length_per_key
keys += kjt.keys()
value_list.append(kjt.values())
if is_weighted:
weight_list.append(kjt.weights())
length_list.append(kjt.lengths())
if variable_stride_per_key:
stride_per_key_per_rank += kjt.stride_per_key_per_rank()
elif stride is None:
stride = kjt.stride()
else:
assert stride == kjt.stride(), "strides must be consistent for all KJTs"
return KeyedJaggedTensor(
keys=keys,
values=torch.cat(value_list, dim=0),
weights=torch.cat(weight_list, dim=0) if is_weighted else None,
lengths=torch.cat(length_list, dim=0),
stride=stride,
stride_per_key_per_rank=(
stride_per_key_per_rank if variable_stride_per_key else None
),
length_per_key=length_per_key if has_length_per_key else None,
)
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "KeyedJaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return KeyedJaggedTensor(
keys=torch.jit.annotate(List[str], []),
values=torch.empty(0, dtype=values_dtype, device=device),
weights=weights,
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
stride=0,
)
def empty_like(kjt: "KeyedJaggedTensor") -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, kjt.stride_per_key_per_rank())
if kjt.variable_stride_per_key()
else (kjt.stride(), None)
)
return KeyedJaggedTensor(
keys=[],
values=torch.empty(0, device=kjt.device(), dtype=kjt.values().dtype),
weights=(
None
if kjt.weights_or_none() is None
else torch.empty(0, device=kjt.device(), dtype=kjt.weights().dtype)
),
lengths=torch.empty(0, device=kjt.device(), dtype=kjt.lengths().dtype),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
)
def from_jt_dict(jt_dict: Dict[str, JaggedTensor]) -> "KeyedJaggedTensor":
"""
Constructs a KeyedJaggedTensor from a Dict[str, JaggedTensor],
but this function will ONLY work if the JaggedTensors all
have the same "implicit" batch_size dimension.
Basically, we can visualize JaggedTensors as 2-D tensors
of the format of [batch_size x variable_feature_dim].
In case, we have some batch without a feature value,
the input JaggedTensor could just not include any values.
But KeyedJaggedTensor (by default) typically pad "None"
so that all the JaggedTensors stored in the KeyedJaggedTensor
have the same batch_size dimension. That is, in the case,
the JaggedTensor input didn't automatically pad
for the empty batches, this function would error / not work.
Consider the visualization of the following KeyedJaggedTensor:
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
Notice that the inputs for this KeyedJaggedTensor would have looked like:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
Now if the input jt_dict = {
# "Feature0" [V0,V1] [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
} and the "None" is left out from each JaggedTensor,
then this function would fail as we would not correctly
be able to pad "None" as it does not technically know
the correct batch / place to pad within the JaggedTensor.
Essentially, the lengths Tensor inferred by this function
would be [2, 1, 1, 1, 3] indicating variable batch_size
dim_1 violates the existing assumption / precondition
that KeyedJaggedTensor's should have fixed batch_size dimension.
"""
kjt_keys = list(jt_dict.keys())
kjt_vals_list: List[torch.Tensor] = []
kjt_lens_list: List[torch.Tensor] = []
kjt_weights_list: List[torch.Tensor] = []
stride_per_key: List[int] = []
for jt in jt_dict.values():
stride_per_key.append(len(jt.lengths()))
kjt_vals_list.append(jt.values())
kjt_lens_list.append(jt.lengths())
weight = jt.weights_or_none()
if weight is not None:
kjt_weights_list.append(weight)
kjt_vals = torch.concat(kjt_vals_list)
kjt_lens = torch.concat(kjt_lens_list)
kjt_weights = (
torch.concat(kjt_weights_list) if len(kjt_weights_list) > 0 else None
)
kjt_stride, kjt_stride_per_key_per_rank = (
(stride_per_key[0], None)
if all(s == stride_per_key[0] for s in stride_per_key)
else (None, [[stride] for stride in stride_per_key])
)
kjt = KeyedJaggedTensor(
keys=kjt_keys,
values=kjt_vals,
weights=kjt_weights,
lengths=kjt_lens,
stride=kjt_stride,
stride_per_key_per_rank=kjt_stride_per_key_per_rank,
).sync()
return kjt
def sync(self) -> "KeyedJaggedTensor":
self.length_per_key()
self.offset_per_key()
return self
def unsync(self) -> "KeyedJaggedTensor":
self._length_per_key = None
self._offset_per_key = None
return self
def device(self) -> torch.device:
return self._values.device
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def keys(self) -> List[str]:
return self._keys
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def stride(self) -> int:
return self._stride
def stride_per_key(self) -> List[int]:
return self._stride_per_key
def stride_per_key_per_rank(self) -> List[List[int]]:
return self._stride_per_key_per_rank
def variable_stride_per_key(self) -> bool:
return self._variable_stride_per_key
def inverse_indices(self) -> Tuple[List[str], torch.Tensor]:
return _get_inverse_indices_or_throw(self._inverse_indices)
def inverse_indices_or_none(self) -> Optional[Tuple[List[str], torch.Tensor]]:
return self._inverse_indices
def _key_indices(self) -> Dict[str, int]:
_index_per_key: Dict[str, int] = _maybe_compute_index_per_key(
self._keys,
self._index_per_key,
)
self._index_per_key = _index_per_key
return _index_per_key
def length_per_key(self) -> List[int]:
_length_per_key = _maybe_compute_length_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
return _length_per_key
def length_per_key_or_none(self) -> Optional[List[int]]:
return self._length_per_key
def offset_per_key(self) -> List[int]:
_length_per_key, _offset_per_key = _maybe_compute_offset_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
self._offset_per_key = _offset_per_key
return _offset_per_key
def offset_per_key_or_none(self) -> Optional[List[int]]:
return self._offset_per_key
def lengths_offset_per_key(self) -> List[int]:
if not self._lengths_offset_per_key:
self._lengths_offset_per_key = _cumsum(self.stride_per_key())
return self._lengths_offset_per_key
def split(self, segments: List[int]) -> List["KeyedJaggedTensor"]:
split_list: List[KeyedJaggedTensor] = []
start = 0
start_offset = 0
_length_per_key = self.length_per_key()
_offset_per_key = self.offset_per_key()
for segment in segments:
end = start + segment
end_offset = _offset_per_key[end]
keys: List[str] = self._keys[start:end]
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank()[start:end])
if self.variable_stride_per_key()
else (self._stride, None)
)
if segment == len(self._keys):
# no torch slicing required
split_list.append(
KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self.weights_or_none(),
lengths=self._lengths,
offsets=self._offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=self._jt_dict,
inverse_indices=None,
)
)
elif segment == 0:
empty_int_list: List[int] = torch.jit.annotate(List[int], [])
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=torch.tensor(
empty_int_list,
device=self.device(),
dtype=self._values.dtype,
),
weights=(
None
if self.weights_or_none() is None
else torch.tensor(
empty_int_list,
device=self.device(),
dtype=self.weights().dtype,
)
),
lengths=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
offsets=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
else:
split_length_per_key = _length_per_key[start:end]
if not torch.jit.is_scripting() and is_torchdynamo_compiling():
# Checks for dynamo dynamic shapes tracing
torch._check_is_size(start_offset)
torch._check_is_size(end_offset)
torch._check_is_size(end_offset - start_offset)
torch._check(start_offset <= self._values.size(0))
torch._check(end_offset <= self._values.size(0))
torch._check(end_offset >= start_offset)
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[
start
] : self.lengths_offset_per_key()[end]
],
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=split_length_per_key,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
start = end
start_offset = end_offset
return split_list
def permute(
self,
indices: List[int],
indices_tensor: Optional[torch.Tensor] = None,
include_inverse_indices: bool = False,
) -> "KeyedJaggedTensor":
if indices_tensor is None:
indices_tensor = torch.tensor(
indices, dtype=torch.int, device=self.device()
)
length_per_key = self.length_per_key()
permuted_keys: List[str] = []
permuted_stride_per_key_per_rank: List[List[int]] = []
permuted_length_per_key: List[int] = []
permuted_lengths_sum = 0
for index in indices:
key = self.keys()[index]
permuted_keys.append(key)
permuted_stride_per_key_per_rank.append(
self.stride_per_key_per_rank()[index]
)
permuted_length_per_key.append(length_per_key[index])
permuted_lengths_sum += length_per_key[index]
if self.variable_stride_per_key():
length_per_key_tensor = _pin_and_move(
torch.tensor(self.length_per_key()), self.device()
)
stride_per_key_tensor = _pin_and_move(
torch.tensor(self.stride_per_key()), self.device()
)
permuted_lengths, _ = _permute_tensor_by_segments(
self.lengths(),
stride_per_key_tensor,
indices_tensor,
None,
)
permuted_values, permuted_weights = _permute_tensor_by_segments(
self.values(),
length_per_key_tensor,
indices_tensor,
self.weights_or_none(),
)
else:
(
permuted_lengths,
permuted_values,
permuted_weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
indices_tensor,
self.lengths().view(len(self._keys), -1),
self.values(),
self.weights_or_none(),
permuted_lengths_sum,
)
stride, optional_permuted_stride_per_key_per_rank = (
(None, permuted_stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
kjt = KeyedJaggedTensor(
keys=permuted_keys,
values=permuted_values,
weights=permuted_weights,
lengths=permuted_lengths.view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=optional_permuted_stride_per_key_per_rank,
length_per_key=permuted_length_per_key if len(permuted_keys) > 0 else None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=(
self.inverse_indices_or_none() if include_inverse_indices else None
),
)
return kjt
def flatten_lengths(self) -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank())
if self.variable_stride_per_key()
else (self._stride, None)
)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self._weights,
lengths=self.lengths().view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self.length_per_key(),
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
def __getitem__(self, key: str) -> JaggedTensor:
offset_per_key = self.offset_per_key()
index = self._key_indices()[key]
start_offset = offset_per_key[index]
end_offset = (
offset_per_key[index + 1]
if index + 1 < len(offset_per_key)
else start_offset
)
return JaggedTensor(
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[index] : self.lengths_offset_per_key()[
index + 1
]
],
offsets=None,
)
def to_dict(self) -> Dict[str, JaggedTensor]:
_jt_dict = _maybe_compute_kjt_to_jt_dict(
stride=self.stride(),
stride_per_key=self.stride_per_key(),
keys=self.keys(),
length_per_key=self.length_per_key(),
lengths=self.lengths(),
values=self.values(),
variable_stride_per_key=self.variable_stride_per_key(),
weights=self.weights_or_none(),
jt_dict=self._jt_dict,
)
self._jt_dict = _jt_dict
return _jt_dict
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def to(
self,
device: torch.device,
non_blocking: bool = False,
dtype: Optional[torch.dtype] = None,
) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
length_per_key = self._length_per_key
offset_per_key = self._offset_per_key
index_per_key = self._index_per_key
jt_dict = self._jt_dict
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (
inverse_indices[0],
inverse_indices[1].to(device, non_blocking=non_blocking),
)
if weights is not None:
if dtype is not None:
weights = weights.to(
dtype=dtype, device=device, non_blocking=non_blocking
)
else:
weights = weights.to(device=device, non_blocking=non_blocking)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.to(device, non_blocking=non_blocking),
weights=weights,
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=length_per_key,
offset_per_key=offset_per_key,
index_per_key=index_per_key,
jt_dict=jt_dict,
inverse_indices=inverse_indices,
)
def __str__(self) -> str:
if len(self._keys) == 0 or self._offsets is None and self._lengths is None:
return "KeyedJaggedTensor()\n"
offsets = self.offsets()
return (
"KeyedJaggedTensor({\n"
+ ",\n".join(
[
" "
+ _jagged_tensor_string(
self._keys[index],
self._values,
self._weights,
offsets,
sum(self.stride_per_key()[:index]),
sum(self.stride_per_key()[: index + 1]),
)
for index in range(len(self._keys))
]
)
+ "\n})\n"
)
def pin_memory(self) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (inverse_indices[0], inverse_indices[1].pin_memory())
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.pin_memory(),
weights=weights.pin_memory() if weights is not None else None,
lengths=lengths.pin_memory() if lengths is not None else None,
offsets=offsets.pin_memory() if offsets is not None else None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=None,
inverse_indices=inverse_indices,
)
def dist_labels(self) -> List[str]:
labels = ["lengths", "values"]
if self.variable_stride_per_key():
labels.append("strides")
if self.weights_or_none() is not None:
labels.append("weights")
return labels
def dist_splits(self, key_splits: List[int]) -> List[List[int]]:
batch_size_per_split = _sum_by_splits(self.stride_per_key(), key_splits)
length_per_split = _sum_by_splits(self.length_per_key(), key_splits)
splits = [batch_size_per_split, length_per_split]
if self.variable_stride_per_key():
splits.append(key_splits)
if self.weights_or_none() is not None:
splits.append(length_per_split)
return splits
def dist_tensors(self) -> List[torch.Tensor]:
tensors = [self.lengths(), self.values()]
if self.variable_stride_per_key():
strides = _pin_and_move(torch.tensor(self.stride_per_key()), self.device())
tensors.append(strides)
if self.weights_or_none() is not None:
tensors.append(self.weights())
return tensors
def dist_init(
keys: List[str],
tensors: List[torch.Tensor],
variable_stride_per_key: bool,
num_workers: int,
recat: Optional[torch.Tensor],
stride_per_rank: Optional[List[int]],
stagger: int = 1,
) -> "KeyedJaggedTensor":
assert len(tensors) in [2, 3, 4]
lengths = tensors[0]
values = tensors[1]
stride_per_rank_per_key = tensors[2] if variable_stride_per_key else None
weights = (
tensors[-1]
if (variable_stride_per_key and len(tensors) == 4)
or (not variable_stride_per_key and len(tensors) == 3)
else None
)
if variable_stride_per_key:
assert stride_per_rank_per_key is not None
stride_per_key_per_rank: List[List[int]] = stride_per_rank_per_key.view(
num_workers, len(keys)
).T.tolist()
strides_cumsum: List[int] = torch.ops.fbgemm.asynchronous_complete_cumsum(
stride_per_rank_per_key
).tolist()
cumsum_lengths = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
length_per_key = (
cumsum_lengths[strides_cumsum[1:]] - cumsum_lengths[strides_cumsum[:-1]]
)
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
lengths, _ = _permute_tensor_by_segments(
lengths,
stride_per_rank_per_key,
recat,
None,
)
values, weights = _permute_tensor_by_segments(
values,
length_per_key,
recat,
weights,
)
if not stride_per_key_per_rank:
stride_per_key_per_rank = [[0]] * len(keys)
if stagger > 1:
stride_per_key_per_rank_stagger: List[List[int]] = []
local_world_size = num_workers // stagger
for i in range(len(keys)):
stride_per_rank_stagger: List[int] = []
for j in range(local_world_size):
stride_per_rank_stagger.extend(
stride_per_key_per_rank[i][j::local_world_size]
)
stride_per_key_per_rank_stagger.append(stride_per_rank_stagger)
stride_per_key_per_rank = stride_per_key_per_rank_stagger
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride_per_key_per_rank=stride_per_key_per_rank,
)
return kjt.sync()
else:
assert stride_per_rank is not None
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
stride = stride_per_rank[0]
if all(s == stride for s in stride_per_rank):
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
recat,
lengths.view(-1, stride),
values,
weights,
values.numel(),
)
lengths = lengths.view(-1)
else: # variable batch size per rank
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_1D_sparse_data(
recat,
lengths.view(-1),
values,
weights,
values.numel(),
)
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=sum(stride_per_rank),
)
return kjt.sync()
def _set_sharding_context_post_a2a(
kjts: List[KeyedJaggedTensor],
ctx: C,
) -> None:
for kjt, sharding_context in zip(kjts, getattr(ctx, "sharding_contexts", [])):
if (
hasattr(sharding_context, "batch_size_per_rank_per_feature")
and kjt.variable_stride_per_key()
and kjt.stride_per_key_per_rank()
):
sharding_context.batch_size_per_rank_per_feature = [
[
kjt.stride_per_key_per_rank()[i][j]
for i in range(len(kjt.stride_per_key_per_rank()))
]
for j in range(len(kjt.stride_per_key_per_rank()[0]))
] | null |
9,033 | import abc
import copy
import uuid
from collections import defaultdict
from dataclasses import dataclass
from typing import Any, Dict, Generic, List, Optional, Tuple, TypeVar, Union
import torch
from torch import distributed as dist, nn
from torchrec.distributed.dist_data import (
KJTAllToAllTensorsAwaitable,
SplitsAllToAllAwaitable,
)
from torchrec.distributed.embedding_dim_bucketer import (
EmbDimBucketer,
EmbDimBucketerPolicy,
should_do_dim_bucketing,
)
from torchrec.distributed.embedding_types import (
BaseEmbeddingLookup,
BaseGroupedFeatureProcessor,
EmbeddingComputeKernel,
FeatureShardingMixIn,
GroupedEmbeddingConfig,
KJTList,
ListOfKJTList,
ShardedEmbeddingTable,
)
from torchrec.distributed.types import (
Awaitable,
ParameterSharding,
QuantizedCommCodecs,
ShardMetadata,
)
from torchrec.fx.utils import assert_fx_safe
from torchrec.modules.embedding_configs import EmbeddingTableConfig
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.streamable import Multistreamable
C = TypeVar("C", bound=Multistreamable)
class KJTAllToAllTensorsAwaitable(Awaitable[KeyedJaggedTensor]):
"""
Awaitable for KJT tensors AlltoAll.
Args:
pg (dist.ProcessGroup): ProcessGroup for AlltoAll communication.
input (KeyedJaggedTensor): input KJT.
splits (List[int]): list of len(pg.size()) which indicates how many features to
send to each pg.rank(). It is assumed the `KeyedJaggedTensor` is ordered by
destination rank. Same for all ranks.
input_splits (List[List[int]]): input splits (number of values each rank will
get) for each tensor in AlltoAll.
output_splits (List[List[int]]): output splits (number of values per rank in
output) for each tensor in AlltoAll.
input_tensors (List[torch.Tensor]): provided KJT tensors (ie. lengths, values)
to redistribute according to splits.
labels (List[str]): labels for each provided tensor.
keys (List[str]): KJT keys after AlltoAll.
device (torch.device): device on which buffers will be allocated.
stagger (int): stagger value to apply to recat tensor.
stride_per_rank (Optional[List[int]]): stride per rank in the non variable
batch per feature case.
"""
def __init__(
self,
pg: dist.ProcessGroup,
input: KeyedJaggedTensor,
splits: List[int],
input_splits: List[List[int]],
output_splits: List[List[int]],
input_tensors: List[torch.Tensor],
labels: List[str],
keys: List[str],
device: torch.device,
stagger: int,
stride_per_rank: Optional[List[int]],
) -> None:
super().__init__()
self._workers: int = pg.size()
self._pg: dist.ProcessGroup = pg
self._device: torch.device = device
self._input = input
self._splits = splits
self._input_splits: Dict[str, List[int]] = dict(zip(labels, input_splits))
self._output_splits: Dict[str, List[int]] = dict(zip(labels, output_splits))
self._keys = keys
self._stagger = stagger
self._stride_per_rank = stride_per_rank
self._recat: Optional[torch.Tensor] = _get_recat(
local_split=splits[pg.rank()],
num_splits=len(splits),
stagger=stagger,
device=device,
batch_size_per_rank=self._stride_per_rank,
)
if self._workers == 1:
return
self._output_tensors: List[torch.Tensor] = []
self._awaitables: List[dist.Work] = []
for input_split, output_split, input_tensor, label in zip(
input_splits,
output_splits,
input_tensors,
labels,
):
output_tensor = torch.empty(
sum(output_split), device=self._device, dtype=input_tensor.dtype
)
with record_function(f"## all2all_data:kjt {label} ##"):
awaitable = dist.all_to_all_single(
output=output_tensor,
input=input_tensor,
output_split_sizes=output_split,
input_split_sizes=input_split,
group=self._pg,
async_op=not is_torchdynamo_compiling(),
)
self._output_tensors.append(output_tensor)
self._awaitables.append(awaitable)
def _wait_impl(self) -> KeyedJaggedTensor:
"""
Overwrites wait function as we don't handle callbacks here.
Returns:
KeyedJaggedTensor: Synced KJT after AlltoAll.
"""
if self._workers == 1:
self._input.sync()
return self._input
for awaitable in self._awaitables:
# handling sync torch dynamo trace case where awaitable will be a Tensor
if isinstance(awaitable, dist.Work):
awaitable.wait()
return type(self._input).dist_init(
keys=self._keys,
tensors=self._output_tensors,
variable_stride_per_key=self._input.variable_stride_per_key(),
num_workers=self._workers,
recat=self._recat,
stride_per_rank=self._stride_per_rank,
stagger=self._stagger,
)
class Awaitable(abc.ABC, Generic[W]):
def __init__(self) -> None:
self._callbacks: List[Callable[[W], W]] = []
def _wait_impl(self) -> W:
pass
def wait(self) -> W:
with record_function(f"## {self.__class__.__name__} wait() ##"):
ret: W = self._wait_impl()
for callback in self.callbacks:
ret = callback(ret)
return ret
def callbacks(self) -> List[Callable[[W], W]]:
return self._callbacks
# pyre-ignore
class KeyedJaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""Represents an (optionally weighted) keyed jagged tensor.
A `KeyedJaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. Keyed on first dimension and jagged on the last
dimension.
Implementation is torch.jit.script-able.
Args:
keys (List[str]): keys to the jagged Tensor.
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if the values have weights. Tensor with the
same shape as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
stride (Optional[int]): number of examples per batch.
stride_per_key_per_rank (Optional[List[List[int]]]): batch size
(number of examples) per key per rank, with the outer list representing the
keys and the inner list representing the values.
Each value in the inner list represents the number of examples in the batch
from the rank of its index in a distributed context.
length_per_key (Optional[List[int]]): start length for each key.
offset_per_key (Optional[List[int]]): start offset for each key and final
offset.
index_per_key (Optional[Dict[str, int]]): index for each key.
jt_dict (Optional[Dict[str, JaggedTensor]]):
inverse_indices (Optional[Tuple[List[str], torch.Tensor]]): inverse indices to
expand deduplicated embedding output for variable stride per key.
Example::
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
dim_0: keyed dimension (ie. `Feature0`, `Feature1`)
dim_1: optional second dimension (ie. batch size)
dim_2: The jagged dimension which has slice lengths between 0-3 in the above example
# We represent this data with following inputs:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
"""
# This is the subset of fields on KJT which are required (all other fields
# can be derived from these fields, and are only cached)
_fields = [
"_values",
"_weights",
"_lengths",
"_offsets",
]
def __init__(
self,
keys: List[str],
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
# Below exposed to ensure torch.script-able
length_per_key: Optional[List[int]] = None,
offset_per_key: Optional[List[int]] = None,
index_per_key: Optional[Dict[str, int]] = None,
jt_dict: Optional[Dict[str, JaggedTensor]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> None:
self._keys: List[str] = keys
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
self._stride_per_key_per_rank: List[List[int]] = []
self._stride_per_key: List[int] = []
self._variable_stride_per_key: bool = False
self._stride: int = -1
if stride_per_key_per_rank is not None:
if stride is not None:
raise ValueError(
"Cannot initialize KJT with both `stride` and `stride_per_key_per_rank`"
)
self._stride_per_key_per_rank = stride_per_key_per_rank
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
self._variable_stride_per_key = True
if not stride_per_key_per_rank:
self._stride = 0
elif all(s == self.stride_per_key()[0] for s in self.stride_per_key()):
self._stride = self.stride_per_key()[0]
else:
if torch.jit.is_tracing():
stride = _maybe_compute_stride_kjt_scripted(
keys, stride, lengths, offsets
)[0]
else:
stride = _maybe_compute_stride_kjt(keys, stride, lengths, offsets)
self._stride = stride
self._stride_per_key_per_rank = [[stride]] * len(self._keys)
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
# lazy fields
self._length_per_key: Optional[List[int]] = length_per_key
self._offset_per_key: Optional[List[int]] = offset_per_key
self._index_per_key: Optional[Dict[str, int]] = index_per_key
self._jt_dict: Optional[Dict[str, JaggedTensor]] = jt_dict
self._inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = (
inverse_indices
)
self._lengths_offset_per_key: List[int] = []
def from_offsets_sync(
keys: List[str],
values: torch.Tensor,
offsets: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
offsets=offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def from_lengths_sync(
keys: List[str],
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def concat(
kjt_list: List["KeyedJaggedTensor"],
) -> "KeyedJaggedTensor":
if len(kjt_list) == 0:
raise ValueError("Can't concat empty KJT list")
is_weighted: bool = kjt_list[0].weights_or_none() is not None
has_length_per_key: bool = True
length_per_key: List[int] = []
keys: List[str] = []
value_list: List[torch.Tensor] = []
weight_list: List[torch.Tensor] = []
length_list: List[torch.Tensor] = []
stride_per_key_per_rank: List[List[int]] = []
stride: Optional[int] = None
variable_stride_per_key_list = [
kjt.variable_stride_per_key() for kjt in kjt_list
]
assert all(variable_stride_per_key_list) or not any(
variable_stride_per_key_list
), "variable stride per key must be consistent for all KJTs"
variable_stride_per_key = all(variable_stride_per_key_list)
for kjt in kjt_list:
curr_is_weighted: bool = kjt.weights_or_none() is not None
if is_weighted != curr_is_weighted:
raise ValueError("Can't merge weighted KJT with unweighted KJT")
_length_per_key: Optional[List[int]] = None
if kjt._length_per_key is None:
has_length_per_key = False
else:
_length_per_key = kjt._length_per_key
if has_length_per_key and _length_per_key is not None:
length_per_key += _length_per_key
keys += kjt.keys()
value_list.append(kjt.values())
if is_weighted:
weight_list.append(kjt.weights())
length_list.append(kjt.lengths())
if variable_stride_per_key:
stride_per_key_per_rank += kjt.stride_per_key_per_rank()
elif stride is None:
stride = kjt.stride()
else:
assert stride == kjt.stride(), "strides must be consistent for all KJTs"
return KeyedJaggedTensor(
keys=keys,
values=torch.cat(value_list, dim=0),
weights=torch.cat(weight_list, dim=0) if is_weighted else None,
lengths=torch.cat(length_list, dim=0),
stride=stride,
stride_per_key_per_rank=(
stride_per_key_per_rank if variable_stride_per_key else None
),
length_per_key=length_per_key if has_length_per_key else None,
)
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "KeyedJaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return KeyedJaggedTensor(
keys=torch.jit.annotate(List[str], []),
values=torch.empty(0, dtype=values_dtype, device=device),
weights=weights,
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
stride=0,
)
def empty_like(kjt: "KeyedJaggedTensor") -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, kjt.stride_per_key_per_rank())
if kjt.variable_stride_per_key()
else (kjt.stride(), None)
)
return KeyedJaggedTensor(
keys=[],
values=torch.empty(0, device=kjt.device(), dtype=kjt.values().dtype),
weights=(
None
if kjt.weights_or_none() is None
else torch.empty(0, device=kjt.device(), dtype=kjt.weights().dtype)
),
lengths=torch.empty(0, device=kjt.device(), dtype=kjt.lengths().dtype),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
)
def from_jt_dict(jt_dict: Dict[str, JaggedTensor]) -> "KeyedJaggedTensor":
"""
Constructs a KeyedJaggedTensor from a Dict[str, JaggedTensor],
but this function will ONLY work if the JaggedTensors all
have the same "implicit" batch_size dimension.
Basically, we can visualize JaggedTensors as 2-D tensors
of the format of [batch_size x variable_feature_dim].
In case, we have some batch without a feature value,
the input JaggedTensor could just not include any values.
But KeyedJaggedTensor (by default) typically pad "None"
so that all the JaggedTensors stored in the KeyedJaggedTensor
have the same batch_size dimension. That is, in the case,
the JaggedTensor input didn't automatically pad
for the empty batches, this function would error / not work.
Consider the visualization of the following KeyedJaggedTensor:
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
Notice that the inputs for this KeyedJaggedTensor would have looked like:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
Now if the input jt_dict = {
# "Feature0" [V0,V1] [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
} and the "None" is left out from each JaggedTensor,
then this function would fail as we would not correctly
be able to pad "None" as it does not technically know
the correct batch / place to pad within the JaggedTensor.
Essentially, the lengths Tensor inferred by this function
would be [2, 1, 1, 1, 3] indicating variable batch_size
dim_1 violates the existing assumption / precondition
that KeyedJaggedTensor's should have fixed batch_size dimension.
"""
kjt_keys = list(jt_dict.keys())
kjt_vals_list: List[torch.Tensor] = []
kjt_lens_list: List[torch.Tensor] = []
kjt_weights_list: List[torch.Tensor] = []
stride_per_key: List[int] = []
for jt in jt_dict.values():
stride_per_key.append(len(jt.lengths()))
kjt_vals_list.append(jt.values())
kjt_lens_list.append(jt.lengths())
weight = jt.weights_or_none()
if weight is not None:
kjt_weights_list.append(weight)
kjt_vals = torch.concat(kjt_vals_list)
kjt_lens = torch.concat(kjt_lens_list)
kjt_weights = (
torch.concat(kjt_weights_list) if len(kjt_weights_list) > 0 else None
)
kjt_stride, kjt_stride_per_key_per_rank = (
(stride_per_key[0], None)
if all(s == stride_per_key[0] for s in stride_per_key)
else (None, [[stride] for stride in stride_per_key])
)
kjt = KeyedJaggedTensor(
keys=kjt_keys,
values=kjt_vals,
weights=kjt_weights,
lengths=kjt_lens,
stride=kjt_stride,
stride_per_key_per_rank=kjt_stride_per_key_per_rank,
).sync()
return kjt
def sync(self) -> "KeyedJaggedTensor":
self.length_per_key()
self.offset_per_key()
return self
def unsync(self) -> "KeyedJaggedTensor":
self._length_per_key = None
self._offset_per_key = None
return self
def device(self) -> torch.device:
return self._values.device
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def keys(self) -> List[str]:
return self._keys
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def stride(self) -> int:
return self._stride
def stride_per_key(self) -> List[int]:
return self._stride_per_key
def stride_per_key_per_rank(self) -> List[List[int]]:
return self._stride_per_key_per_rank
def variable_stride_per_key(self) -> bool:
return self._variable_stride_per_key
def inverse_indices(self) -> Tuple[List[str], torch.Tensor]:
return _get_inverse_indices_or_throw(self._inverse_indices)
def inverse_indices_or_none(self) -> Optional[Tuple[List[str], torch.Tensor]]:
return self._inverse_indices
def _key_indices(self) -> Dict[str, int]:
_index_per_key: Dict[str, int] = _maybe_compute_index_per_key(
self._keys,
self._index_per_key,
)
self._index_per_key = _index_per_key
return _index_per_key
def length_per_key(self) -> List[int]:
_length_per_key = _maybe_compute_length_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
return _length_per_key
def length_per_key_or_none(self) -> Optional[List[int]]:
return self._length_per_key
def offset_per_key(self) -> List[int]:
_length_per_key, _offset_per_key = _maybe_compute_offset_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
self._offset_per_key = _offset_per_key
return _offset_per_key
def offset_per_key_or_none(self) -> Optional[List[int]]:
return self._offset_per_key
def lengths_offset_per_key(self) -> List[int]:
if not self._lengths_offset_per_key:
self._lengths_offset_per_key = _cumsum(self.stride_per_key())
return self._lengths_offset_per_key
def split(self, segments: List[int]) -> List["KeyedJaggedTensor"]:
split_list: List[KeyedJaggedTensor] = []
start = 0
start_offset = 0
_length_per_key = self.length_per_key()
_offset_per_key = self.offset_per_key()
for segment in segments:
end = start + segment
end_offset = _offset_per_key[end]
keys: List[str] = self._keys[start:end]
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank()[start:end])
if self.variable_stride_per_key()
else (self._stride, None)
)
if segment == len(self._keys):
# no torch slicing required
split_list.append(
KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self.weights_or_none(),
lengths=self._lengths,
offsets=self._offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=self._jt_dict,
inverse_indices=None,
)
)
elif segment == 0:
empty_int_list: List[int] = torch.jit.annotate(List[int], [])
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=torch.tensor(
empty_int_list,
device=self.device(),
dtype=self._values.dtype,
),
weights=(
None
if self.weights_or_none() is None
else torch.tensor(
empty_int_list,
device=self.device(),
dtype=self.weights().dtype,
)
),
lengths=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
offsets=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
else:
split_length_per_key = _length_per_key[start:end]
if not torch.jit.is_scripting() and is_torchdynamo_compiling():
# Checks for dynamo dynamic shapes tracing
torch._check_is_size(start_offset)
torch._check_is_size(end_offset)
torch._check_is_size(end_offset - start_offset)
torch._check(start_offset <= self._values.size(0))
torch._check(end_offset <= self._values.size(0))
torch._check(end_offset >= start_offset)
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[
start
] : self.lengths_offset_per_key()[end]
],
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=split_length_per_key,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
start = end
start_offset = end_offset
return split_list
def permute(
self,
indices: List[int],
indices_tensor: Optional[torch.Tensor] = None,
include_inverse_indices: bool = False,
) -> "KeyedJaggedTensor":
if indices_tensor is None:
indices_tensor = torch.tensor(
indices, dtype=torch.int, device=self.device()
)
length_per_key = self.length_per_key()
permuted_keys: List[str] = []
permuted_stride_per_key_per_rank: List[List[int]] = []
permuted_length_per_key: List[int] = []
permuted_lengths_sum = 0
for index in indices:
key = self.keys()[index]
permuted_keys.append(key)
permuted_stride_per_key_per_rank.append(
self.stride_per_key_per_rank()[index]
)
permuted_length_per_key.append(length_per_key[index])
permuted_lengths_sum += length_per_key[index]
if self.variable_stride_per_key():
length_per_key_tensor = _pin_and_move(
torch.tensor(self.length_per_key()), self.device()
)
stride_per_key_tensor = _pin_and_move(
torch.tensor(self.stride_per_key()), self.device()
)
permuted_lengths, _ = _permute_tensor_by_segments(
self.lengths(),
stride_per_key_tensor,
indices_tensor,
None,
)
permuted_values, permuted_weights = _permute_tensor_by_segments(
self.values(),
length_per_key_tensor,
indices_tensor,
self.weights_or_none(),
)
else:
(
permuted_lengths,
permuted_values,
permuted_weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
indices_tensor,
self.lengths().view(len(self._keys), -1),
self.values(),
self.weights_or_none(),
permuted_lengths_sum,
)
stride, optional_permuted_stride_per_key_per_rank = (
(None, permuted_stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
kjt = KeyedJaggedTensor(
keys=permuted_keys,
values=permuted_values,
weights=permuted_weights,
lengths=permuted_lengths.view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=optional_permuted_stride_per_key_per_rank,
length_per_key=permuted_length_per_key if len(permuted_keys) > 0 else None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=(
self.inverse_indices_or_none() if include_inverse_indices else None
),
)
return kjt
def flatten_lengths(self) -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank())
if self.variable_stride_per_key()
else (self._stride, None)
)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self._weights,
lengths=self.lengths().view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self.length_per_key(),
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
def __getitem__(self, key: str) -> JaggedTensor:
offset_per_key = self.offset_per_key()
index = self._key_indices()[key]
start_offset = offset_per_key[index]
end_offset = (
offset_per_key[index + 1]
if index + 1 < len(offset_per_key)
else start_offset
)
return JaggedTensor(
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[index] : self.lengths_offset_per_key()[
index + 1
]
],
offsets=None,
)
def to_dict(self) -> Dict[str, JaggedTensor]:
_jt_dict = _maybe_compute_kjt_to_jt_dict(
stride=self.stride(),
stride_per_key=self.stride_per_key(),
keys=self.keys(),
length_per_key=self.length_per_key(),
lengths=self.lengths(),
values=self.values(),
variable_stride_per_key=self.variable_stride_per_key(),
weights=self.weights_or_none(),
jt_dict=self._jt_dict,
)
self._jt_dict = _jt_dict
return _jt_dict
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def to(
self,
device: torch.device,
non_blocking: bool = False,
dtype: Optional[torch.dtype] = None,
) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
length_per_key = self._length_per_key
offset_per_key = self._offset_per_key
index_per_key = self._index_per_key
jt_dict = self._jt_dict
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (
inverse_indices[0],
inverse_indices[1].to(device, non_blocking=non_blocking),
)
if weights is not None:
if dtype is not None:
weights = weights.to(
dtype=dtype, device=device, non_blocking=non_blocking
)
else:
weights = weights.to(device=device, non_blocking=non_blocking)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.to(device, non_blocking=non_blocking),
weights=weights,
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=length_per_key,
offset_per_key=offset_per_key,
index_per_key=index_per_key,
jt_dict=jt_dict,
inverse_indices=inverse_indices,
)
def __str__(self) -> str:
if len(self._keys) == 0 or self._offsets is None and self._lengths is None:
return "KeyedJaggedTensor()\n"
offsets = self.offsets()
return (
"KeyedJaggedTensor({\n"
+ ",\n".join(
[
" "
+ _jagged_tensor_string(
self._keys[index],
self._values,
self._weights,
offsets,
sum(self.stride_per_key()[:index]),
sum(self.stride_per_key()[: index + 1]),
)
for index in range(len(self._keys))
]
)
+ "\n})\n"
)
def pin_memory(self) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (inverse_indices[0], inverse_indices[1].pin_memory())
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.pin_memory(),
weights=weights.pin_memory() if weights is not None else None,
lengths=lengths.pin_memory() if lengths is not None else None,
offsets=offsets.pin_memory() if offsets is not None else None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=None,
inverse_indices=inverse_indices,
)
def dist_labels(self) -> List[str]:
labels = ["lengths", "values"]
if self.variable_stride_per_key():
labels.append("strides")
if self.weights_or_none() is not None:
labels.append("weights")
return labels
def dist_splits(self, key_splits: List[int]) -> List[List[int]]:
batch_size_per_split = _sum_by_splits(self.stride_per_key(), key_splits)
length_per_split = _sum_by_splits(self.length_per_key(), key_splits)
splits = [batch_size_per_split, length_per_split]
if self.variable_stride_per_key():
splits.append(key_splits)
if self.weights_or_none() is not None:
splits.append(length_per_split)
return splits
def dist_tensors(self) -> List[torch.Tensor]:
tensors = [self.lengths(), self.values()]
if self.variable_stride_per_key():
strides = _pin_and_move(torch.tensor(self.stride_per_key()), self.device())
tensors.append(strides)
if self.weights_or_none() is not None:
tensors.append(self.weights())
return tensors
def dist_init(
keys: List[str],
tensors: List[torch.Tensor],
variable_stride_per_key: bool,
num_workers: int,
recat: Optional[torch.Tensor],
stride_per_rank: Optional[List[int]],
stagger: int = 1,
) -> "KeyedJaggedTensor":
assert len(tensors) in [2, 3, 4]
lengths = tensors[0]
values = tensors[1]
stride_per_rank_per_key = tensors[2] if variable_stride_per_key else None
weights = (
tensors[-1]
if (variable_stride_per_key and len(tensors) == 4)
or (not variable_stride_per_key and len(tensors) == 3)
else None
)
if variable_stride_per_key:
assert stride_per_rank_per_key is not None
stride_per_key_per_rank: List[List[int]] = stride_per_rank_per_key.view(
num_workers, len(keys)
).T.tolist()
strides_cumsum: List[int] = torch.ops.fbgemm.asynchronous_complete_cumsum(
stride_per_rank_per_key
).tolist()
cumsum_lengths = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
length_per_key = (
cumsum_lengths[strides_cumsum[1:]] - cumsum_lengths[strides_cumsum[:-1]]
)
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
lengths, _ = _permute_tensor_by_segments(
lengths,
stride_per_rank_per_key,
recat,
None,
)
values, weights = _permute_tensor_by_segments(
values,
length_per_key,
recat,
weights,
)
if not stride_per_key_per_rank:
stride_per_key_per_rank = [[0]] * len(keys)
if stagger > 1:
stride_per_key_per_rank_stagger: List[List[int]] = []
local_world_size = num_workers // stagger
for i in range(len(keys)):
stride_per_rank_stagger: List[int] = []
for j in range(local_world_size):
stride_per_rank_stagger.extend(
stride_per_key_per_rank[i][j::local_world_size]
)
stride_per_key_per_rank_stagger.append(stride_per_rank_stagger)
stride_per_key_per_rank = stride_per_key_per_rank_stagger
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride_per_key_per_rank=stride_per_key_per_rank,
)
return kjt.sync()
else:
assert stride_per_rank is not None
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
stride = stride_per_rank[0]
if all(s == stride for s in stride_per_rank):
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
recat,
lengths.view(-1, stride),
values,
weights,
values.numel(),
)
lengths = lengths.view(-1)
else: # variable batch size per rank
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_1D_sparse_data(
recat,
lengths.view(-1),
values,
weights,
values.numel(),
)
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=sum(stride_per_rank),
)
return kjt.sync()
def _set_sharding_context_intra_a2a(
tensors_awaitables: List[Awaitable[KeyedJaggedTensor]],
ctx: C,
) -> None:
for awaitable, sharding_context in zip(
tensors_awaitables,
getattr(ctx, "sharding_contexts", []),
):
if isinstance(awaitable, KJTAllToAllTensorsAwaitable):
if hasattr(sharding_context, "input_splits"):
sharding_context.input_splits = awaitable._input_splits["values"]
if hasattr(sharding_context, "output_splits"):
sharding_context.output_splits = awaitable._output_splits["values"]
if hasattr(sharding_context, "sparse_features_recat"):
sharding_context.sparse_features_recat = awaitable._recat
if (
hasattr(sharding_context, "batch_size_per_rank")
and awaitable._stride_per_rank is not None
):
sharding_context.batch_size_per_rank = awaitable._stride_per_rank | null |
9,034 | import abc
import copy
import uuid
from collections import defaultdict
from dataclasses import dataclass
from typing import Any, Dict, Generic, List, Optional, Tuple, TypeVar, Union
import torch
from torch import distributed as dist, nn
from torchrec.distributed.dist_data import (
KJTAllToAllTensorsAwaitable,
SplitsAllToAllAwaitable,
)
from torchrec.distributed.embedding_dim_bucketer import (
EmbDimBucketer,
EmbDimBucketerPolicy,
should_do_dim_bucketing,
)
from torchrec.distributed.embedding_types import (
BaseEmbeddingLookup,
BaseGroupedFeatureProcessor,
EmbeddingComputeKernel,
FeatureShardingMixIn,
GroupedEmbeddingConfig,
KJTList,
ListOfKJTList,
ShardedEmbeddingTable,
)
from torchrec.distributed.types import (
Awaitable,
ParameterSharding,
QuantizedCommCodecs,
ShardMetadata,
)
from torchrec.fx.utils import assert_fx_safe
from torchrec.modules.embedding_configs import EmbeddingTableConfig
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.streamable import Multistreamable
T = TypeVar("T")
T = TypeVar("T")
def _split(flat_list: List[T], splits: List[int]) -> List[List[T]]:
return [
flat_list[sum(splits[:i]) : sum(splits[:i]) + n] for i, n in enumerate(splits)
] | null |
9,035 | import logging
from abc import ABC
from collections import OrderedDict
from typing import Any, cast, Dict, Iterator, List, Optional, Tuple, Union
import torch
import torch.distributed as dist
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_training import (
SplitTableBatchedEmbeddingBagsCodegen,
)
from torch import nn
from torch.autograd.function import FunctionCtx
from torch.nn.modules.module import _IncompatibleKeys
from torchrec.distributed.batched_embedding_kernel import (
BaseBatchedEmbedding,
BaseBatchedEmbeddingBag,
BatchedDenseEmbedding,
BatchedDenseEmbeddingBag,
BatchedFusedEmbedding,
BatchedFusedEmbeddingBag,
)
from torchrec.distributed.comm_ops import get_gradient_division
from torchrec.distributed.composable.table_batched_embedding_slice import (
TableBatchedEmbeddingSlice,
)
from torchrec.distributed.embedding_kernel import BaseEmbedding
from torchrec.distributed.embedding_types import (
BaseEmbeddingLookup,
BaseGroupedFeatureProcessor,
EmbeddingComputeKernel,
GroupedEmbeddingConfig,
KJTList,
)
from torchrec.distributed.fused_params import (
get_tbes_to_register_from_iterable,
TBEToRegisterMixIn,
)
from torchrec.distributed.quant_embedding_kernel import (
QuantBatchedEmbedding,
QuantBatchedEmbeddingBag,
)
from torchrec.distributed.types import ShardedTensor
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
def fx_wrap_tensor_view2d(x: torch.Tensor, dim0: int, dim1: int) -> torch.Tensor:
return x.view(dim0, dim1) | null |
9,036 | import logging
from abc import ABC
from collections import OrderedDict
from typing import Any, cast, Dict, Iterator, List, Optional, Tuple, Union
import torch
import torch.distributed as dist
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_training import (
SplitTableBatchedEmbeddingBagsCodegen,
)
from torch import nn
from torch.autograd.function import FunctionCtx
from torch.nn.modules.module import _IncompatibleKeys
from torchrec.distributed.batched_embedding_kernel import (
BaseBatchedEmbedding,
BaseBatchedEmbeddingBag,
BatchedDenseEmbedding,
BatchedDenseEmbeddingBag,
BatchedFusedEmbedding,
BatchedFusedEmbeddingBag,
)
from torchrec.distributed.comm_ops import get_gradient_division
from torchrec.distributed.composable.table_batched_embedding_slice import (
TableBatchedEmbeddingSlice,
)
from torchrec.distributed.embedding_kernel import BaseEmbedding
from torchrec.distributed.embedding_types import (
BaseEmbeddingLookup,
BaseGroupedFeatureProcessor,
EmbeddingComputeKernel,
GroupedEmbeddingConfig,
KJTList,
)
from torchrec.distributed.fused_params import (
get_tbes_to_register_from_iterable,
TBEToRegisterMixIn,
)
from torchrec.distributed.quant_embedding_kernel import (
QuantBatchedEmbedding,
QuantBatchedEmbeddingBag,
)
from torchrec.distributed.types import ShardedTensor
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
def _load_state_dict(
emb_modules: "nn.ModuleList",
state_dict: "OrderedDict[str, Union[torch.Tensor, ShardedTensor]]",
) -> Tuple[List[str], List[str]]:
missing_keys = []
unexpected_keys = list(state_dict.keys())
for emb_module in emb_modules:
for key, dst_param in emb_module.state_dict().items():
if key in state_dict:
src_param = state_dict[key]
if isinstance(dst_param, ShardedTensor):
assert isinstance(src_param, ShardedTensor)
assert len(dst_param.local_shards()) == len(
src_param.local_shards()
)
for dst_local_shard, src_local_shard in zip(
dst_param.local_shards(), src_param.local_shards()
):
assert (
dst_local_shard.metadata.shard_offsets
== src_local_shard.metadata.shard_offsets
)
assert (
dst_local_shard.metadata.shard_sizes
== src_local_shard.metadata.shard_sizes
)
dst_local_shard.tensor.detach().copy_(src_local_shard.tensor)
else:
assert isinstance(src_param, torch.Tensor) and isinstance(
dst_param, torch.Tensor
)
dst_param.detach().copy_(src_param)
unexpected_keys.remove(key)
else:
missing_keys.append(cast(str, key))
return missing_keys, unexpected_keys | null |
9,037 | import logging
from abc import ABC
from collections import OrderedDict
from typing import Any, cast, Dict, Iterator, List, Optional, Tuple, Union
import torch
import torch.distributed as dist
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_training import (
SplitTableBatchedEmbeddingBagsCodegen,
)
from torch import nn
from torch.autograd.function import FunctionCtx
from torch.nn.modules.module import _IncompatibleKeys
from torchrec.distributed.batched_embedding_kernel import (
BaseBatchedEmbedding,
BaseBatchedEmbeddingBag,
BatchedDenseEmbedding,
BatchedDenseEmbeddingBag,
BatchedFusedEmbedding,
BatchedFusedEmbeddingBag,
)
from torchrec.distributed.comm_ops import get_gradient_division
from torchrec.distributed.composable.table_batched_embedding_slice import (
TableBatchedEmbeddingSlice,
)
from torchrec.distributed.embedding_kernel import BaseEmbedding
from torchrec.distributed.embedding_types import (
BaseEmbeddingLookup,
BaseGroupedFeatureProcessor,
EmbeddingComputeKernel,
GroupedEmbeddingConfig,
KJTList,
)
from torchrec.distributed.fused_params import (
get_tbes_to_register_from_iterable,
TBEToRegisterMixIn,
)
from torchrec.distributed.quant_embedding_kernel import (
QuantBatchedEmbedding,
QuantBatchedEmbeddingBag,
)
from torchrec.distributed.types import ShardedTensor
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
def embeddings_cat_empty_rank_handle(
embeddings: List[torch.Tensor],
dummy_embs_tensor: torch.Tensor,
dim: int = 0,
) -> torch.Tensor:
if len(embeddings) == 0:
# a hack for empty ranks
return dummy_embs_tensor
elif len(embeddings) == 1:
return embeddings[0]
else:
return torch.cat(embeddings, dim=dim) | null |
9,038 | import logging
from abc import ABC
from collections import OrderedDict
from typing import Any, cast, Dict, Iterator, List, Optional, Tuple, Union
import torch
import torch.distributed as dist
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_training import (
SplitTableBatchedEmbeddingBagsCodegen,
)
from torch import nn
from torch.autograd.function import FunctionCtx
from torch.nn.modules.module import _IncompatibleKeys
from torchrec.distributed.batched_embedding_kernel import (
BaseBatchedEmbedding,
BaseBatchedEmbeddingBag,
BatchedDenseEmbedding,
BatchedDenseEmbeddingBag,
BatchedFusedEmbedding,
BatchedFusedEmbeddingBag,
)
from torchrec.distributed.comm_ops import get_gradient_division
from torchrec.distributed.composable.table_batched_embedding_slice import (
TableBatchedEmbeddingSlice,
)
from torchrec.distributed.embedding_kernel import BaseEmbedding
from torchrec.distributed.embedding_types import (
BaseEmbeddingLookup,
BaseGroupedFeatureProcessor,
EmbeddingComputeKernel,
GroupedEmbeddingConfig,
KJTList,
)
from torchrec.distributed.fused_params import (
get_tbes_to_register_from_iterable,
TBEToRegisterMixIn,
)
from torchrec.distributed.quant_embedding_kernel import (
QuantBatchedEmbedding,
QuantBatchedEmbeddingBag,
)
from torchrec.distributed.types import ShardedTensor
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
def embeddings_cat_empty_rank_handle_inference(
embeddings: List[torch.Tensor],
dim: int = 0,
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
) -> torch.Tensor:
if len(embeddings) == 0:
# return a dummy empty tensor when grouped_configs is empty
return torch.empty([0], dtype=dtype, device=device)
elif len(embeddings) == 1:
return embeddings[0]
else:
return torch.cat(embeddings, dim=dim) | null |
9,039 | import copy
import itertools
import logging
from collections import defaultdict
from dataclasses import dataclass, field
from threading import Event, Thread
from typing import (
Any,
Callable,
cast,
Dict,
Generic,
Iterator,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from torch import distributed as dist
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.fx.node import Node
from torch.profiler import record_function
from torchrec.distributed.dist_data import KJTAllToAll
from torchrec.distributed.embedding_sharding import (
FusedKJTListSplitsAwaitable,
KJTListSplitsAwaitable,
KJTSplitsAllToAllMeta,
)
from torchrec.distributed.model_parallel import DistributedModelParallel, ShardedModule
from torchrec.distributed.types import Awaitable
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.streamable import Multistreamable, Pipelineable
import torch
In = TypeVar("In", bound=Pipelineable)
class Pipelineable(Multistreamable):
"""
This interface contains two methods, one for moving an input across devices,
the other one for marking streams that operate the input.
torch.Tensor implements this interface and we can used it in many applications.
Another example is torchrec.(Keyed)JaggedTensor, which we use as the input to
torchrec.EmbeddingBagCollection, which in turn is often the first layer of many models.
Some models take compound inputs, which should implement this interface.
"""
def to(self, device: torch.device, non_blocking: bool) -> "Pipelineable":
"""
Please be aware that according to https://pytorch.org/docs/stable/generated/torch.Tensor.to.html,
`to` might return self or a copy of self. So please remember to use `to` with the assignment operator,
for example, `in = in.to(new_device)`.
"""
...
def _to_device(batch: In, device: torch.device, non_blocking: bool) -> In:
assert isinstance(
batch, (torch.Tensor, Pipelineable)
), f"{type(batch)} must implement Pipelineable interface"
return cast(In, batch.to(device=device, non_blocking=non_blocking)) | null |
9,040 | import copy
import itertools
import logging
from collections import defaultdict
from dataclasses import dataclass, field
from threading import Event, Thread
from typing import (
Any,
Callable,
cast,
Dict,
Generic,
Iterator,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from torch import distributed as dist
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.fx.node import Node
from torch.profiler import record_function
from torchrec.distributed.dist_data import KJTAllToAll
from torchrec.distributed.embedding_sharding import (
FusedKJTListSplitsAwaitable,
KJTListSplitsAwaitable,
KJTSplitsAllToAllMeta,
)
from torchrec.distributed.model_parallel import DistributedModelParallel, ShardedModule
from torchrec.distributed.types import Awaitable
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.streamable import Multistreamable, Pipelineable
import torch
In = TypeVar("In", bound=Pipelineable)
class Multistreamable(abc.ABC):
"""
Objects implementing this interface are allowed to be transferred
from one CUDA stream to another.
torch.Tensor and (Keyed)JaggedTensor implement this interface.
"""
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
"""
See https://pytorch.org/docs/stable/generated/torch.Tensor.record_stream.html
"""
...
The provided code snippet includes necessary dependencies for implementing the `_wait_for_batch` function. Write a Python function `def _wait_for_batch(batch: In, stream: Optional[torch.cuda.streams.Stream]) -> None` to solve the following problem:
As mentioned in https://pytorch.org/docs/stable/generated/torch.Tensor.record_stream.html, PyTorch uses the "caching allocator" for memory allocation for tensors. When a tensor is freed, its memory is likely to be reused by newly constructed tenosrs. By default, this allocator traces whether a tensor is still in use by only the CUDA stream where it was created. When a tensor is used by additional CUDA streams, we need to call `record_stream` to tell the allocator about these streams. Otherwise, the allocator might free the underlying memory of the tensor once it is no longer used by the creator stream. This is a notable programming trick when we write programs using multiple CUDA streams.
Here is the function:
def _wait_for_batch(batch: In, stream: Optional[torch.cuda.streams.Stream]) -> None:
if stream is None:
return
torch.cuda.current_stream().wait_stream(stream)
"""
As mentioned in
https://pytorch.org/docs/stable/generated/torch.Tensor.record_stream.html, PyTorch
uses the "caching allocator" for memory allocation for tensors. When a tensor is
freed, its memory is likely to be reused by newly constructed tenosrs. By default,
this allocator traces whether a tensor is still in use by only the CUDA stream where
it was created. When a tensor is used by additional CUDA streams, we need to call
`record_stream` to tell the allocator about these streams. Otherwise, the allocator
might free the underlying memory of the tensor once it is no longer used by the
creator stream. This is a notable programming trick when we write programs using
multiple CUDA streams.
"""
cur_stream = torch.cuda.current_stream()
assert isinstance(
batch, (torch.Tensor, Multistreamable)
), f"{type(batch)} must implement Multistreamable interface"
batch.record_stream(cur_stream) | As mentioned in https://pytorch.org/docs/stable/generated/torch.Tensor.record_stream.html, PyTorch uses the "caching allocator" for memory allocation for tensors. When a tensor is freed, its memory is likely to be reused by newly constructed tenosrs. By default, this allocator traces whether a tensor is still in use by only the CUDA stream where it was created. When a tensor is used by additional CUDA streams, we need to call `record_stream` to tell the allocator about these streams. Otherwise, the allocator might free the underlying memory of the tensor once it is no longer used by the creator stream. This is a notable programming trick when we write programs using multiple CUDA streams. |
9,041 | import copy
import itertools
import logging
from collections import defaultdict
from dataclasses import dataclass, field
from threading import Event, Thread
from typing import (
Any,
Callable,
cast,
Dict,
Generic,
Iterator,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from torch import distributed as dist
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.fx.node import Node
from torch.profiler import record_function
from torchrec.distributed.dist_data import KJTAllToAll
from torchrec.distributed.embedding_sharding import (
FusedKJTListSplitsAwaitable,
KJTListSplitsAwaitable,
KJTSplitsAllToAllMeta,
)
from torchrec.distributed.model_parallel import DistributedModelParallel, ShardedModule
from torchrec.distributed.types import Awaitable
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.streamable import Multistreamable, Pipelineable
import torch
In = TypeVar("In", bound=Pipelineable)
class TrainPipelineContext:
class PipelinedForward(BaseForward):
def __call__(self, *input, **kwargs) -> Awaitable:
class PrefetchPipelinedForward(BaseForward):
def __init__(
self,
name: str,
args: List[ArgInfo],
module: ShardedModule,
context: PrefetchTrainPipelineContext,
prefetch_stream: Optional[torch.cuda.streams.Stream],
) -> None:
def __call__(self, *input, **kwargs) -> Awaitable:
def _fuse_input_dist_splits(context: TrainPipelineContext) -> None:
def _start_data_dist(
pipelined_modules: List[ShardedModule],
batch: In,
context: TrainPipelineContext,
) -> None:
context.input_dist_splits_requests.clear()
context.module_contexts_next_batch.clear()
context.fused_splits_awaitables.clear()
for module in pipelined_modules:
forward = module.forward
assert isinstance(forward, PipelinedForward) or isinstance(
forward, PrefetchPipelinedForward
)
# Retrieve argument for the input_dist of EBC
# is_getitem True means this argument could be retrieved by a list
# False means this argument is getting while getattr
# and this info was done in the _rewrite_model by tracing the
# entire model to get the arg_info_list
args = []
kwargs = {}
for arg_info in forward.args:
if arg_info.input_attrs:
arg = batch
for attr, is_getitem in zip(arg_info.input_attrs, arg_info.is_getitems):
if is_getitem:
arg = arg[attr]
else:
arg = getattr(arg, attr)
if arg_info.name:
kwargs[arg_info.name] = arg
else:
args.append(arg)
else:
args.append(None)
# Start input distribution.
module_ctx = module.create_context()
context.module_contexts_next_batch[forward.name] = module_ctx
context.input_dist_splits_requests[forward.name] = module.input_dist(
module_ctx, *args, **kwargs
)
_fuse_input_dist_splits(context) | null |
9,042 | import copy
import itertools
import logging
from collections import defaultdict
from dataclasses import dataclass, field
from threading import Event, Thread
from typing import (
Any,
Callable,
cast,
Dict,
Generic,
Iterator,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from torch import distributed as dist
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.fx.node import Node
from torch.profiler import record_function
from torchrec.distributed.dist_data import KJTAllToAll
from torchrec.distributed.embedding_sharding import (
FusedKJTListSplitsAwaitable,
KJTListSplitsAwaitable,
KJTSplitsAllToAllMeta,
)
from torchrec.distributed.model_parallel import DistributedModelParallel, ShardedModule
from torchrec.distributed.types import Awaitable
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.streamable import Multistreamable, Pipelineable
logger: logging.Logger = logging.getLogger(__name__)
import torch
In = TypeVar("In", bound=Pipelineable)
class TrainPipelineContext:
"""
Context information for a `TrainPipelineSparseDist` instance.
Attributes:
input_dist_splits_requests (Dict[str, Awaitable[Any]]): Stores input dist
requests in the splits awaitable stage, which occurs after starting the
input dist.
input_dist_tensors_requests (Dict[str, Awaitable[Any]]): Stores input dist
requests in the tensors awaitable stage, which occurs after calling `wait()`
on the splits awaitable.
module_contexts (Dict[str, Multistreamable]): Stores module contexts from the
input dist for the current batch.
module_contexts_next_batch (Dict[str, Multistreamable]): Stores module contexts
from the input dist for the next batch.
fused_splits_awaitables (List[Tuple[List[str], FusedKJTListSplitsAwaitable]]):
List of fused splits input dist awaitable and the corresponding module names
of each awaitable.
"""
# pyre-ignore [4]
input_dist_splits_requests: Dict[str, Awaitable[Any]] = field(default_factory=dict)
# pyre-ignore [4]
input_dist_tensors_requests: Dict[str, Awaitable[Any]] = field(default_factory=dict)
module_contexts: Dict[str, Multistreamable] = field(default_factory=dict)
module_contexts_next_batch: Dict[str, Multistreamable] = field(default_factory=dict)
fused_splits_awaitables: List[Tuple[List[str], FusedKJTListSplitsAwaitable]] = (
field(default_factory=list)
)
class BaseForward:
def __init__(
self,
name: str,
args: List[ArgInfo],
module: ShardedModule,
context: TrainPipelineContext,
stream: Optional[torch.cuda.streams.Stream],
) -> None:
self._name = name
self._args = args
self._module = module
self._context = context
self._stream = stream
def name(self) -> str:
return self._name
def args(self) -> List[ArgInfo]:
return self._args
class PipelinedForward(BaseForward):
# pyre-ignore [2, 24]
def __call__(self, *input, **kwargs) -> Awaitable:
assert self._name in self._context.input_dist_tensors_requests
request = self._context.input_dist_tensors_requests[self._name]
assert isinstance(request, Awaitable)
with record_function("## wait_sparse_data_dist ##"):
# Finish waiting on the dist_stream,
# in case some delayed stream scheduling happens during the wait() call.
with torch.cuda.stream(self._stream):
data = request.wait()
# Make sure that both result of input_dist and context
# are properly transferred to the current stream.
if self._stream is not None:
torch.cuda.current_stream().wait_stream(self._stream)
cur_stream = torch.cuda.current_stream()
assert isinstance(
data, (torch.Tensor, Multistreamable)
), f"{type(data)} must implement Multistreamable interface"
data.record_stream(cur_stream)
ctx = self._context.module_contexts[self._name]
ctx.record_stream(cur_stream)
return self._module.compute_and_output_dist(
self._context.module_contexts[self._name], data
)
class Tracer(torch.fx.Tracer):
"""
Disables proxying buffers during tracing. Ideally, proxying buffers would be
disabled, but some models are currently mutating buffer values, which causes errors
during tracing. If those models can be rewritten to not do that, we can likely
remove this line.
"""
proxy_buffer_attributes = False
def __init__(self, leaf_modules: Optional[List[str]] = None) -> None:
super().__init__()
self._leaf_modules: List[str] = leaf_modules if leaf_modules is not None else []
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool:
if (
isinstance(m, ShardedModule)
or module_qualified_name in self._leaf_modules
or isinstance(m, FSDP)
):
return True
return super().is_leaf_module(m, module_qualified_name)
def _get_node_args(
node: Node,
) -> Tuple[List[ArgInfo], int]:
num_found = 0
pos_arg_info_list, num_found = _get_node_args_helper(node.args, num_found)
kwargs_arg_info_list, num_found = _get_node_args_helper(
node.kwargs.values(), num_found
)
# Replace with proper names for kwargs
for name, arg_info_list in zip(node.kwargs, kwargs_arg_info_list):
arg_info_list.name = name
arg_info_list = pos_arg_info_list + kwargs_arg_info_list
return arg_info_list, num_found
def _get_leaf_module_names(model: torch.nn.Module) -> List[str]:
"""
Returns a list of top level modules to be used as leaf modules for FX tracing.
This is a shallow FX trace that only goes the minimum depth required to pipeline
the model unless child modules are explicitly tagged as `_is_pytorch_fx_traceable`.
"""
leaf_module_names: Set[str] = set()
_get_leaf_module_names_helper(
model,
"",
leaf_module_names,
)
return list(leaf_module_names)
def _jit_modules(module: torch.nn.Module, path: str, optional: bool = True) -> bool:
sharded_children = set()
for name, child in module.named_children():
curr_path = path + name
if isinstance(child, ShardedModule):
sharded_children.add(name)
else:
child_sharded = _jit_modules(child, curr_path + ".", optional)
if child_sharded:
sharded_children.add(name)
if len(sharded_children) > 0:
for name, child in module.named_children():
if name not in sharded_children:
try:
jit_child = torch.jit.script(child)
setattr(module, name, jit_child)
logger.info(f"jit.script applied to {path + name}.")
except Exception as error:
if not optional:
raise
else:
logger.info(
f"Warning: failed to jit.script {path + name}: {error}."
)
return len(sharded_children) > 0
class DistributedModelParallel(nn.Module, FusedOptimizerModule):
"""
Entry point to model parallelism.
Args:
module (nn.Module): module to wrap.
env (Optional[ShardingEnv]): sharding environment that has the process group.
device (Optional[torch.device]): compute device, defaults to cpu.
plan (Optional[ShardingPlan]): plan to use when sharding, defaults to
`EmbeddingShardingPlanner.collective_plan()`.
sharders (Optional[List[ModuleSharder[nn.Module]]]): `ModuleSharders` available
to shard with, defaults to `EmbeddingBagCollectionSharder()`.
init_data_parallel (bool): data-parallel modules can be lazy, i.e. they delay
parameter initialization until the first forward pass. Pass `True` to delay
initialization of data parallel modules. Do first forward pass and then call
DistributedModelParallel.init_data_parallel().
init_parameters (bool): initialize parameters for modules still on meta device.
data_parallel_wrapper (Optional[DataParallelWrapper]): custom wrapper for data
parallel modules.
Example::
def init_weights(m):
if isinstance(m, nn.Linear):
m.weight.fill_(1.0)
elif isinstance(m, EmbeddingBagCollection):
for param in m.parameters():
init.kaiming_normal_(param)
m = MyModel(device='meta')
m = DistributedModelParallel(m)
m.apply(init_weights)
"""
def __init__(
self,
module: nn.Module,
env: Optional[ShardingEnv] = None,
device: Optional[torch.device] = None,
plan: Optional[ShardingPlan] = None,
sharders: Optional[List[ModuleSharder[torch.nn.Module]]] = None,
init_data_parallel: bool = True,
init_parameters: bool = True,
data_parallel_wrapper: Optional[DataParallelWrapper] = None,
) -> None:
super().__init__()
torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}")
self.init_parameters = init_parameters
self._ddp_wrapped: bool = False
if env is None:
pg = dist.GroupMember.WORLD
assert pg is not None, "Process group is not initialized"
env = ShardingEnv.from_process_group(pg)
self._env: ShardingEnv = env
if device is None:
device = torch.device("cpu")
self.device: torch.device = device
if sharders is None:
sharders = get_default_sharders()
self._sharder_map: Dict[Type[nn.Module], ModuleSharder[nn.Module]] = {
sharder.module_type: sharder for sharder in sharders
}
if data_parallel_wrapper is None:
data_parallel_wrapper = DefaultDataParallelWrapper()
self._data_parallel_wrapper: DataParallelWrapper = data_parallel_wrapper
if plan is None:
planner = EmbeddingShardingPlanner(
topology=Topology(
local_world_size=get_local_size(self._env.world_size),
world_size=self._env.world_size,
compute_device=self.device.type,
)
)
pg = self._env.process_group
if pg is not None:
plan = planner.collective_plan(module, sharders, pg)
else:
plan = planner.plan(module, sharders)
self._plan: ShardingPlan = plan
self._dmp_wrapped_module: nn.Module = self._init_dmp(module)
self._optim: CombinedOptimizer = self._init_optim(self._dmp_wrapped_module)
if init_parameters:
self._init_parameters(self.module)
if init_data_parallel:
self.init_data_parallel()
def module(self) -> nn.Module:
"""
Property to directly access sharded module, which will not be wrapped in DDP,
FSDP, DMP, or any other parallelism wrappers.
"""
return get_unwrapped_module(self)
def module(self, value: nn.Module) -> None:
if isinstance(self.module, DistributedDataParallel) or isinstance(
self.module, FullyShardedDataParallel
):
raise RuntimeError(
"module can't be set after calling init_data_parallel(...)"
)
else:
self._dmp_wrapped_module = value
# pyre-ignore [2, 3]
def forward(self, *args, **kwargs) -> Any:
return self._dmp_wrapped_module(*args, **kwargs)
def init_data_parallel(self) -> None:
"""
See init_data_parallel c-tor argument for usage.
It's safe to call this method multiple times.
"""
if not self._ddp_wrapped:
# Allocate any 'meta' tensors
if self.init_parameters:
self._init_parameters(self._dmp_wrapped_module)
self._data_parallel_wrapper.wrap(self, self._env, self.device)
self._ddp_wrapped = True
def copy(
self,
device: torch.device,
) -> "DistributedModelParallel":
"""
Recursively copy submodules to new device by calling per-module customized copy
process, since some modules needs to use the original references (like
`ShardedModule` for inference).
"""
assert isinstance(device, torch.device)
# dmp code deep copy
with sharded_model_copy(device=None):
copy_dmp = copy.deepcopy(self)
# tensor resident module deep copy
copy_dmp_wrapped_module = copy_to_device(
self._dmp_wrapped_module, self.device, device
)
copy_dmp._dmp_wrapped_module = copy_dmp_wrapped_module
return copy_dmp
def _init_dmp(self, module: nn.Module) -> nn.Module:
return self._shard_modules_impl(module)
def _init_optim(self, module: nn.Module) -> CombinedOptimizer:
# pyre-ignore [6]
return CombinedOptimizer(self._fused_optim_impl(module, []))
def _fused_optim_impl(
self,
module: nn.Module,
fused_optims: List[Tuple[str, KeyedOptimizer]],
path: str = "",
) -> List[Tuple[str, KeyedOptimizer]]:
if isinstance(module, FusedOptimizerModule):
fused_optims.append((path, module.fused_optimizer))
return fused_optims
for name, child in module.named_children():
self._fused_optim_impl(
child,
fused_optims,
path + "." + name if path else name,
)
return fused_optims
def _shard_modules_impl(
self,
module: nn.Module,
path: str = "",
) -> nn.Module:
# pre-sharded module
if isinstance(module, ShardedModule):
return module
# shardable module
module_sharding_plan = self._plan.get_plan_for_module(path)
if module_sharding_plan:
sharder_key = type(module)
module = self._sharder_map[sharder_key].shard(
module,
module_sharding_plan,
self._env,
self.device,
)
return module
for name, child in module.named_children():
child = self._shard_modules_impl(
child,
path + "." + name if path else name,
)
setattr(module, name, child)
return module
def _init_parameters(self, module: nn.Module) -> None:
def init_parameters(module: nn.Module) -> None:
# Allocate parameters and buffers if over 'meta' device.
has_meta_param = False
for name, param in module._parameters.items():
if isinstance(param, torch.Tensor) and param.device.type == "meta":
module._parameters[name] = nn.Parameter(
torch.empty_like(param, device=self.device),
requires_grad=param.requires_grad,
)
has_meta_param = True
for name, buffer in module._buffers.items():
if isinstance(buffer, torch.Tensor) and buffer.device.type == "meta":
module._buffers[name] = torch.zeros_like(buffer, device=self.device)
# Init parameters if at least one parameter is over 'meta' device.
if has_meta_param and hasattr(module, "reset_parameters"):
module.reset_parameters()
module.apply(init_parameters)
def sparse_grad_parameter_names(
self, destination: Optional[List[str]] = None, prefix: str = ""
) -> List[str]:
destination = [] if destination is None else destination
return self._sparse_grad_parameter_names(self.module, destination, prefix)
def _sparse_grad_parameter_names(
self, module: nn.Module, destination: List[str], prefix: str = ""
) -> List[str]:
module = get_unwrapped_module(module)
if isinstance(module, ShardedModule):
pass
elif isinstance(module, nn.Embedding):
if module.sparse:
destination.append(append_prefix(prefix, "weight"))
elif isinstance(module, nn.EmbeddingBag):
if module.sparse:
destination.append(append_prefix(prefix, "weight"))
else:
for name, child in module.named_children():
self._sparse_grad_parameter_names(
child, destination, append_prefix(prefix, name)
)
return destination
# pyre-ignore [14]
def state_dict(
self,
destination: Optional[Dict[str, Any]] = None,
prefix: str = "",
keep_vars: bool = False,
) -> Dict[str, Any]:
state_dict = get_module(self).state_dict(
destination=destination, prefix=prefix, keep_vars=keep_vars
)
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict, prefix + _DDP_STATE_DICT_PREFIX
)
add_prefix_to_state_dict(state_dict, prefix)
return state_dict
# pyre-fixme[14]: `load_state_dict` overrides method defined in `Module`
# inconsistently.
def load_state_dict(
self,
state_dict: "OrderedDict[str, torch.Tensor]",
prefix: str = "",
strict: bool = True,
) -> _IncompatibleKeys:
return self._load_state_dict(self, state_dict, prefix, strict)
def _load_state_dict(
self,
module: nn.Module,
state_dict: "OrderedDict[str, torch.Tensor]",
prefix: str = "",
strict: bool = True,
) -> _IncompatibleKeys:
missing_keys = []
unexpected_keys = []
module = get_module(module)
if isinstance(module, DistributedDataParallel):
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict, prefix
)
add_prefix_to_state_dict(state_dict, prefix + _DDP_STATE_DICT_PREFIX)
if isinstance(module, ShardedModule):
return module.load_state_dict(state_dict, strict=strict)
else:
module._load_from_state_dict(
state_dict, prefix, {}, strict, missing_keys, unexpected_keys, []
)
for name, child in module.named_children():
m_keys, u_keys = self._load_state_dict(
child,
filter_state_dict(state_dict, prefix + name),
"",
strict,
)
missing_keys.extend(m_keys)
unexpected_keys.extend(u_keys)
return _IncompatibleKeys(
missing_keys=missing_keys, unexpected_keys=unexpected_keys
)
def _named_parameters(
self,
module: nn.Module,
prefix: str = "",
recurse: bool = True,
strip_ddp: bool = True,
) -> Iterator[Tuple[str, torch.nn.Parameter]]:
if strip_ddp:
module = get_unwrapped_module(module)
if isinstance(module, ShardedModule):
yield from module.named_parameters(prefix, recurse)
else:
yield from module.named_parameters(prefix, recurse=False)
for name, child in module.named_children():
yield from self._named_parameters(
child,
append_prefix(prefix, name),
recurse,
strip_ddp,
)
def named_parameters(
self,
prefix: str = "",
recurse: bool = True,
remove_duplicate: bool = True,
) -> Iterator[Tuple[str, torch.nn.Parameter]]:
gen = self._named_parameters(
self.module,
prefix,
recurse,
)
memo = set()
for key, param in gen:
if param in memo:
continue
if remove_duplicate:
memo.add(param)
yield key, param
def bare_named_parameters(
self,
prefix: str = "",
recurse: bool = True,
) -> Iterator[Tuple[str, torch.nn.Parameter]]:
gen = self._named_parameters(
self.module,
prefix,
recurse,
)
memo = set()
for key, param in gen:
if param in memo:
continue
memo.add(param)
yield key, param
def _sharded_parameter_names(module: nn.Module, prefix: str = "") -> Iterator[str]:
module = get_unwrapped_module(module)
if isinstance(module, ShardedModule):
yield from module.sharded_parameter_names(prefix)
else:
for name, child in module.named_children():
yield from DistributedModelParallel._sharded_parameter_names(
child, append_prefix(prefix, name)
)
def _named_buffers(
self, module: nn.Module, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]:
module = get_unwrapped_module(module)
if isinstance(module, ShardedModule):
yield from module.named_buffers(prefix, recurse)
else:
yield from module.named_buffers(prefix, recurse=False)
for name, child in module.named_children():
yield from self._named_buffers(
child, append_prefix(prefix, name), recurse
)
def named_buffers(
self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]:
gen = self._named_buffers(self.module, prefix, recurse)
memo = set()
for key, param in gen:
if param in memo:
continue
if remove_duplicate:
memo.add(param)
yield key, param
def fused_optimizer(self) -> KeyedOptimizer:
return self._optim
def plan(self) -> ShardingPlan:
return self._plan
def _reset_parameters(module: nn.Module) -> None:
for _, m in module.named_modules():
if hasattr(m, "reset_parameters"):
m.reset_parameters()
def _rewrite_model( # noqa C901
model: torch.nn.Module,
context: TrainPipelineContext,
dist_stream: Optional[torch.cuda.streams.Stream],
batch: Optional[In] = None,
apply_jit: bool = False,
pipelined_forward: Type[BaseForward] = PipelinedForward,
) -> Tuple[List[ShardedModule], torch.nn.Module]:
input_model = model
# Get underlying nn.Module
if isinstance(model, DistributedModelParallel):
model = model.module
# Collect a list of sharded modules.
sharded_modules = {}
for name, m in model.named_modules():
if isinstance(m, ShardedModule):
sharded_modules[name] = m
# Trace a model.
concrete_args = {}
if batch:
if hasattr(batch, "to_proxy"):
# for some special models, it requires using "input"
# as the key for input
# pyre-ignore[16]: Variable[In (bound to Pipelineable)] has no attribute to_proxy.
concrete_args["inputs"] = copy.copy(batch).to_proxy()
elif hasattr(batch, "to_proxy_tuple"):
# when the model is pre-fx traced or dynamo exported, the
# inputs are already flattened, and therefore we use
# tuple as concrete args that fx.trace will automatically
# match with the argument names.
# We pass in the model for the caller side to customize
# the batch
# pyre-ignore[16]: Variable[In (bound to Pipelineable)] has no attribute to_proxy_tuple.
concrete_args = batch.to_proxy_tuple(model)
tracer = Tracer(leaf_modules=_get_leaf_module_names(model))
graph = tracer.trace(model, concrete_args=concrete_args)
# Select sharded modules, which are top-level in the forward call graph,
# i.e. don't have input transformations, i.e. rely only on 'builtins.getattr'.
pipelined_forwards = []
for node in graph.nodes:
if node.op == "call_module" and node.target in sharded_modules:
total_num_args = len(node.args) + len(node.kwargs)
if total_num_args == 0:
continue
arg_info_list, num_found = _get_node_args(node)
if num_found == total_num_args:
logger.info(f"Module '{node.target}'' will be pipelined")
child = sharded_modules[node.target]
child.forward = pipelined_forward(
node.target,
arg_info_list,
child,
context,
dist_stream,
)
pipelined_forwards.append(child)
# JIT script unsharded modules if applicable.
if apply_jit:
graph_model = torch.fx.GraphModule(model, graph)
_jit_modules(graph_model, "")
if isinstance(input_model, DistributedModelParallel):
input_model.module = graph_model
return pipelined_forwards, input_model | null |
9,043 | import copy
import itertools
import logging
from collections import defaultdict
from dataclasses import dataclass, field
from threading import Event, Thread
from typing import (
Any,
Callable,
cast,
Dict,
Generic,
Iterator,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from torch import distributed as dist
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.fx.node import Node
from torch.profiler import record_function
from torchrec.distributed.dist_data import KJTAllToAll
from torchrec.distributed.embedding_sharding import (
FusedKJTListSplitsAwaitable,
KJTListSplitsAwaitable,
KJTSplitsAllToAllMeta,
)
from torchrec.distributed.model_parallel import DistributedModelParallel, ShardedModule
from torchrec.distributed.types import Awaitable
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.streamable import Multistreamable, Pipelineable
import torch
class KJTAllToAllForward:
def __init__(
self, pg: dist.ProcessGroup, splits: List[int], stagger: int = 1
) -> None:
self._pg = pg
self._splits = splits
self._stagger = stagger
self._splits_cumsum: List[int] = [0] + list(itertools.accumulate(splits))
def __call__(self, input: KeyedJaggedTensor) -> KJTSplitsAllToAllMeta:
with torch.no_grad():
assert len(input.keys()) == sum(self._splits)
rank = dist.get_rank(self._pg)
local_keys = input.keys()[
self._splits_cumsum[rank] : self._splits_cumsum[rank + 1]
]
input_splits = input.dist_splits(self._splits)
device = input.values().device
splits_tensors = [
torch.tensor(splits, device=device) for splits in input_splits
]
if not input.variable_stride_per_key():
splits_tensors.append(
torch.tensor([input.stride()] * self._pg.size(), device=device)
)
return KJTSplitsAllToAllMeta(
pg=self._pg,
_input=input,
splits=self._splits,
splits_tensors=splits_tensors,
input_splits=input_splits,
input_tensors=input.dist_tensors(),
labels=input.dist_labels(),
keys=local_keys,
device=device,
stagger=self._stagger,
)
class KJTAllToAll(nn.Module):
"""
Redistributes `KeyedJaggedTensor` to a `ProcessGroup` according to splits.
Implementation utilizes AlltoAll collective as part of torch.distributed.
The input provides the necessary tensors and input splits to distribute.
The first collective call in `KJTAllToAllSplitsAwaitable` will transmit output
splits (to allocate correct space for tensors) and batch size per rank. The
following collective calls in `KJTAllToAllTensorsAwaitable` will transmit the actual
tensors asynchronously.
Args:
pg (dist.ProcessGroup): ProcessGroup for AlltoAll communication.
splits (List[int]): List of len(pg.size()) which indicates how many features to
send to each pg.rank(). It is assumed the `KeyedJaggedTensor` is ordered by
destination rank. Same for all ranks.
stagger (int): stagger value to apply to recat tensor, see `_get_recat` function
for more detail.
Example::
keys=['A','B','C']
splits=[2,1]
kjtA2A = KJTAllToAll(pg, splits)
awaitable = kjtA2A(rank0_input)
# where:
# rank0_input is KeyedJaggedTensor holding
# 0 1 2
# 'A' [A.V0] None [A.V1, A.V2]
# 'B' None [B.V0] [B.V1]
# 'C' [C.V0] [C.V1] None
# rank1_input is KeyedJaggedTensor holding
# 0 1 2
# 'A' [A.V3] [A.V4] None
# 'B' None [B.V2] [B.V3, B.V4]
# 'C' [C.V2] [C.V3] None
rank0_output = awaitable.wait()
# where:
# rank0_output is KeyedJaggedTensor holding
# 0 1 2 3 4 5
# 'A' [A.V0] None [A.V1, A.V2] [A.V3] [A.V4] None
# 'B' None [B.V0] [B.V1] None [B.V2] [B.V3, B.V4]
# rank1_output is KeyedJaggedTensor holding
# 0 1 2 3 4 5
# 'C' [C.V0] [C.V1] None [C.V2] [C.V3] None
"""
def __init__(
self,
pg: dist.ProcessGroup,
splits: List[int],
stagger: int = 1,
) -> None:
super().__init__()
assert len(splits) == pg.size()
self._pg: dist.ProcessGroup = pg
self._splits = splits
self._splits_cumsum: List[int] = [0] + list(itertools.accumulate(splits))
self._stagger = stagger
def forward(
self, input: KeyedJaggedTensor
) -> Awaitable[KJTAllToAllTensorsAwaitable]:
"""
Sends input to relevant `ProcessGroup` ranks.
The first wait will get the output splits for the provided tensors and issue
tensors AlltoAll. The second wait will get the tensors.
Args:
input (KeyedJaggedTensor): `KeyedJaggedTensor` of values to distribute.
Returns:
Awaitable[KJTAllToAllTensorsAwaitable]: awaitable of a `KJTAllToAllTensorsAwaitable`.
"""
with torch.no_grad():
assert len(input.keys()) == sum(self._splits)
rank = dist.get_rank(self._pg)
local_keys = input.keys()[
self._splits_cumsum[rank] : self._splits_cumsum[rank + 1]
]
return KJTAllToAllSplitsAwaitable(
pg=self._pg,
input=input,
splits=self._splits,
labels=input.dist_labels(),
tensor_splits=input.dist_splits(self._splits),
input_tensors=input.dist_tensors(),
keys=local_keys,
device=input.device(),
stagger=self._stagger,
)
The provided code snippet includes necessary dependencies for implementing the `_override_input_dist_forwards` function. Write a Python function `def _override_input_dist_forwards(pipelined_modules: List[ShardedModule]) -> None` to solve the following problem:
Overrides each input dist forward to support fusing the splits collective. NOTE: this can only be called after the input dists are initialized.
Here is the function:
def _override_input_dist_forwards(pipelined_modules: List[ShardedModule]) -> None:
"""
Overrides each input dist forward to support fusing the splits collective.
NOTE: this can only be called after the input dists are initialized.
"""
for module in pipelined_modules:
for child_fqn, child_module in module.named_modules():
if hasattr(child_module, "_has_uninitialized_input_dist"):
assert (
not child_module._has_uninitialized_input_dist
), f"{child_fqn} has uninitialized input dist"
if not hasattr(child_module, "_input_dists"):
continue
for input_dist in child_module._input_dists:
if hasattr(input_dist, "_dist"):
assert isinstance(input_dist._dist, KJTAllToAll)
input_dist._dist.forward = KJTAllToAllForward(
pg=input_dist._dist._pg,
splits=input_dist._dist._splits,
stagger=input_dist._dist._stagger,
) | Overrides each input dist forward to support fusing the splits collective. NOTE: this can only be called after the input dists are initialized. |
9,044 | import copy
import itertools
import logging
from collections import defaultdict
from dataclasses import dataclass, field
from threading import Event, Thread
from typing import (
Any,
Callable,
cast,
Dict,
Generic,
Iterator,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from torch import distributed as dist
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.fx.node import Node
from torch.profiler import record_function
from torchrec.distributed.dist_data import KJTAllToAll
from torchrec.distributed.embedding_sharding import (
FusedKJTListSplitsAwaitable,
KJTListSplitsAwaitable,
KJTSplitsAllToAllMeta,
)
from torchrec.distributed.model_parallel import DistributedModelParallel, ShardedModule
from torchrec.distributed.types import Awaitable
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.streamable import Multistreamable, Pipelineable
import torch
In = TypeVar("In", bound=Pipelineable)
class Pipelineable(Multistreamable):
def to(self, device: torch.device, non_blocking: bool) -> "Pipelineable":
def get_h2d_func(batch: In, device: torch.device) -> Pipelineable:
return batch.to(device, non_blocking=True) | null |
9,045 | import abc
import logging
from typing import cast, Generic, Iterator, List, Optional, Tuple
import torch
from torch.autograd.profiler import record_function
from torchrec.distributed.model_parallel import ShardedModule
from torchrec.distributed.train_pipeline.utils import (
_override_input_dist_forwards,
_rewrite_model,
_start_data_dist,
_to_device,
_wait_for_batch,
DataLoadingThread,
In,
Out,
PipelineStage,
PrefetchPipelinedForward,
PrefetchTrainPipelineContext,
RunnableType,
StageOut,
StageOutputWithEvent,
TrainPipelineContext,
)
from torchrec.distributed.types import Awaitable
from torchrec.streamable import Multistreamable
def is_torchdynamo_compiling() -> bool: # type: ignore[misc]
return False | null |
9,046 | from functools import wraps
from typing import Any, Callable, cast, Optional, TypeVar
import torch.distributed as dist
The provided code snippet includes necessary dependencies for implementing the `is_leader` function. Write a Python function `def is_leader(pg: Optional[dist.ProcessGroup], leader_rank: int = 0) -> bool` to solve the following problem:
Checks if the current processs is the leader. Args: pg (Optional[dist.ProcessGroup]): the process's rank within the pg is used to determine if the process is the leader. pg being None implies that the process is the only member in the group (e.g. a single process program). leader_rank (int): the definition of leader (defaults to 0). The caller can override it with a context-specific definition.
Here is the function:
def is_leader(pg: Optional[dist.ProcessGroup], leader_rank: int = 0) -> bool:
"""
Checks if the current processs is the leader.
Args:
pg (Optional[dist.ProcessGroup]): the process's rank within the pg is used to
determine if the process is the leader. pg being None implies that the
process is the only member in the group (e.g. a single process program).
leader_rank (int): the definition of leader (defaults to 0). The caller can
override it with a context-specific definition.
"""
if pg is None:
return leader_rank == 0
return pg.rank() == leader_rank | Checks if the current processs is the leader. Args: pg (Optional[dist.ProcessGroup]): the process's rank within the pg is used to determine if the process is the leader. pg being None implies that the process is the only member in the group (e.g. a single process program). leader_rank (int): the definition of leader (defaults to 0). The caller can override it with a context-specific definition. |
9,047 | from functools import wraps
from typing import Any, Callable, cast, Optional, TypeVar
import torch.distributed as dist
T = TypeVar("T")
def invoke_on_rank_and_broadcast_result(
pg: dist.ProcessGroup,
rank: int,
func: Callable[..., T],
*args: Any,
**kwargs: Any,
) -> T:
"""
Invokes a function on the designated rank and broadcasts the result to all
members within the group.
Example::
id = invoke_on_rank_and_broadcast_result(pg, 0, allocate_id)
"""
if pg.rank() == rank:
res = func(*args, **kwargs)
object_list = [res]
else:
object_list = [None]
if pg.size() > 1:
dist.broadcast_object_list(object_list, rank, group=pg)
return cast(T, object_list[0])
def run_on_leader(pg: dist.ProcessGroup, rank: int):
def callable(func: Callable[..., T]) -> T:
@wraps(func)
def wrapped(*args: Any, **kwargs: Any) -> T:
return invoke_on_rank_and_broadcast_result(pg, rank, func, *args, **kwargs)
return wrapped
return callable | null |
9,048 | import copy
import itertools
import logging
from decimal import Decimal
from typing import cast, Dict, List, Optional, Set, Tuple, Union
import torch
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.planner.types import (
Enumerator,
Perf,
Proposer,
ShardingOption,
Topology,
)
from torchrec.distributed.planner.utils import bytes_to_gb, LuusJaakolaSearch, prod
class Perf:
"""
Representation of the breakdown of the perf estimate a single shard of an
embedding table.
"""
fwd_compute: float
fwd_comms: float
bwd_compute: float
bwd_comms: float
prefetch_compute: float = 0.0
def total(self) -> float:
# When using embedding offload, there is a prefetch compute component. This
# prefetch can overlap with fwd_compute + fwd_comm and dense fwd (some of it
# overlaps with fwd_compute) and dense bwd. (fwd_compute and bwd_compute are
# embedding fwd/bwd, nothing to do with dense). Only when prefetch is longer
# than fwd_compute + dense_fwd + dense_bwd it will block bwd_compute. However,
# we don't have an effective way to estimate dense fwd/bwd at this point, so our
# cost model is too simplistic. Instead prefetch is always considered blocking.
#
# Also note, measuring prefetch blocking can only be done after partitioning,
# here are only have the per shard estimates.
#
# However adding a per-shard prefetch component to the cost model does have the
# benefit that 1) it enables the ScaleupProposer to explore the trade off
# between increasing cache sizes vs more difficult bin-packing constraints. 2)
# it helps balance the prefetch compute across the ranks.
return (
self.fwd_compute
+ self.bwd_compute
+ self.fwd_comms
+ self.bwd_comms
+ self.prefetch_compute
)
def __add__(self, other: "Perf") -> "Perf":
return Perf(
fwd_compute=self.fwd_compute + other.fwd_compute,
fwd_comms=self.fwd_comms + other.fwd_comms,
bwd_compute=self.bwd_compute + other.bwd_compute,
bwd_comms=self.bwd_comms + other.bwd_comms,
prefetch_compute=self.prefetch_compute + other.prefetch_compute,
)
def __hash__(self) -> int:
return hash(
(
self.fwd_compute,
self.fwd_comms,
self.bwd_compute,
self.bwd_comms,
self.prefetch_compute,
)
)
class ShardingOption:
"""
One way of sharding an embedding table.
"""
def __init__(
self,
name: str,
tensor: torch.Tensor,
module: Tuple[str, nn.Module],
input_lengths: List[float],
batch_size: int,
sharding_type: str,
partition_by: str,
compute_kernel: str,
shards: List[Shard],
cache_params: Optional[CacheParams] = None,
enforce_hbm: Optional[bool] = None,
stochastic_rounding: Optional[bool] = None,
bounds_check_mode: Optional[BoundsCheckMode] = None,
dependency: Optional[str] = None,
is_pooled: Optional[bool] = None,
feature_names: Optional[List[str]] = None,
) -> None:
self.name = name
self._tensor = tensor
self._module = module
self.input_lengths = input_lengths
self.batch_size = batch_size
self.sharding_type = sharding_type
self.partition_by = partition_by
self.compute_kernel = compute_kernel
# relevant to planner output, must be populated if sharding option
# part of final solution
self.shards = shards
self.cache_params = cache_params
self.enforce_hbm = enforce_hbm
self.stochastic_rounding = stochastic_rounding
self.bounds_check_mode = bounds_check_mode
self.dependency = dependency
self._is_pooled = is_pooled
self.is_weighted: Optional[bool] = None
self.feature_names: Optional[List[str]] = feature_names
def tensor(self) -> torch.Tensor:
return self._tensor
def module(self) -> Tuple[str, nn.Module]:
return self._module
def fqn(self) -> str:
return self.module[0] + "." + self.name
def cache_load_factor(self) -> Optional[float]:
if self.cache_params is not None:
return self.cache_params.load_factor
return None
def path(self) -> str:
return self.module[0]
def num_shards(self) -> int:
return len(self.shards)
def num_inputs(self) -> int:
return len(self.input_lengths)
def total_storage(self) -> Storage:
storage: Storage = Storage(hbm=0, ddr=0)
for shard in self.shards:
storage += cast(Storage, shard.storage)
return storage
def total_perf(self) -> float:
perf: float = 0
for shard in self.shards:
# pyre-ignore: Undefined attribute [16]
perf += shard.perf.total
return perf
def is_pooled(self) -> bool:
if self._is_pooled is None:
self._is_pooled = ShardingOption.module_pooled(self.module[1], self.name)
return self._is_pooled
def module_pooled(module: nn.Module, sharding_option_name: str) -> bool:
"""Determine if module pools output (e.g. EmbeddingBag) or uses unpooled/sequential output."""
if isinstance(module, EmbeddingCollectionInterface) or isinstance(
module, ManagedCollisionEmbeddingCollection
):
return False
for submodule in module.modules():
if isinstance(submodule, EmbeddingCollectionInterface) or isinstance(
submodule, ManagedCollisionEmbeddingCollection
):
for name, _ in submodule.named_parameters():
if sharding_option_name in name:
return False
return True
def __hash__(self) -> int:
return hash(
(
self.fqn,
self.sharding_type,
self.compute_kernel,
tuple(self.shards),
self.cache_params,
)
)
def __deepcopy__(
self, memo: Optional[Dict[int, "ShardingOption"]]
) -> "ShardingOption":
cls = self.__class__
result = cls.__new__(cls)
for k, v in self.__dict__.items():
if k in ["_tensor", "_module"]:
setattr(result, k, v)
else:
setattr(result, k, deepcopy(v, memo))
return result
def _sharding_option_score(
sharding_option: ShardingOption, use_depth: bool = True
) -> float:
return (
max([cast(Perf, shard.perf).total for shard in sharding_option.shards])
if use_depth
else sum([cast(Perf, shard.perf).total for shard in sharding_option.shards])
) | null |
9,049 | import copy
import itertools
import logging
from decimal import Decimal
from typing import cast, Dict, List, Optional, Set, Tuple, Union
import torch
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.planner.types import (
Enumerator,
Perf,
Proposer,
ShardingOption,
Topology,
)
from torchrec.distributed.planner.utils import bytes_to_gb, LuusJaakolaSearch, prod
class ShardingOption:
"""
One way of sharding an embedding table.
"""
def __init__(
self,
name: str,
tensor: torch.Tensor,
module: Tuple[str, nn.Module],
input_lengths: List[float],
batch_size: int,
sharding_type: str,
partition_by: str,
compute_kernel: str,
shards: List[Shard],
cache_params: Optional[CacheParams] = None,
enforce_hbm: Optional[bool] = None,
stochastic_rounding: Optional[bool] = None,
bounds_check_mode: Optional[BoundsCheckMode] = None,
dependency: Optional[str] = None,
is_pooled: Optional[bool] = None,
feature_names: Optional[List[str]] = None,
) -> None:
self.name = name
self._tensor = tensor
self._module = module
self.input_lengths = input_lengths
self.batch_size = batch_size
self.sharding_type = sharding_type
self.partition_by = partition_by
self.compute_kernel = compute_kernel
# relevant to planner output, must be populated if sharding option
# part of final solution
self.shards = shards
self.cache_params = cache_params
self.enforce_hbm = enforce_hbm
self.stochastic_rounding = stochastic_rounding
self.bounds_check_mode = bounds_check_mode
self.dependency = dependency
self._is_pooled = is_pooled
self.is_weighted: Optional[bool] = None
self.feature_names: Optional[List[str]] = feature_names
def tensor(self) -> torch.Tensor:
return self._tensor
def module(self) -> Tuple[str, nn.Module]:
return self._module
def fqn(self) -> str:
return self.module[0] + "." + self.name
def cache_load_factor(self) -> Optional[float]:
if self.cache_params is not None:
return self.cache_params.load_factor
return None
def path(self) -> str:
return self.module[0]
def num_shards(self) -> int:
return len(self.shards)
def num_inputs(self) -> int:
return len(self.input_lengths)
def total_storage(self) -> Storage:
storage: Storage = Storage(hbm=0, ddr=0)
for shard in self.shards:
storage += cast(Storage, shard.storage)
return storage
def total_perf(self) -> float:
perf: float = 0
for shard in self.shards:
# pyre-ignore: Undefined attribute [16]
perf += shard.perf.total
return perf
def is_pooled(self) -> bool:
if self._is_pooled is None:
self._is_pooled = ShardingOption.module_pooled(self.module[1], self.name)
return self._is_pooled
def module_pooled(module: nn.Module, sharding_option_name: str) -> bool:
"""Determine if module pools output (e.g. EmbeddingBag) or uses unpooled/sequential output."""
if isinstance(module, EmbeddingCollectionInterface) or isinstance(
module, ManagedCollisionEmbeddingCollection
):
return False
for submodule in module.modules():
if isinstance(submodule, EmbeddingCollectionInterface) or isinstance(
submodule, ManagedCollisionEmbeddingCollection
):
for name, _ in submodule.named_parameters():
if sharding_option_name in name:
return False
return True
def __hash__(self) -> int:
return hash(
(
self.fqn,
self.sharding_type,
self.compute_kernel,
tuple(self.shards),
self.cache_params,
)
)
def __deepcopy__(
self, memo: Optional[Dict[int, "ShardingOption"]]
) -> "ShardingOption":
cls = self.__class__
result = cls.__new__(cls)
for k, v in self.__dict__.items():
if k in ["_tensor", "_module"]:
setattr(result, k, v)
else:
setattr(result, k, deepcopy(v, memo))
return result
class Proposer(abc.ABC):
"""
Prosposes complete lists of sharding options which can be parititioned to generate a
plan.
"""
def load(
self,
search_space: List[ShardingOption],
enumerator: Optional[Enumerator] = None,
) -> None: ...
def feedback(
self,
partitionable: bool,
plan: Optional[List[ShardingOption]] = None,
perf_rating: Optional[float] = None,
storage_constraint: Optional[Topology] = None,
) -> None: ...
def propose(self) -> Optional[List[ShardingOption]]: ...
The provided code snippet includes necessary dependencies for implementing the `proposers_to_proposals_list` function. Write a Python function `def proposers_to_proposals_list( proposers_list: List[Proposer], search_space: List[ShardingOption] ) -> List[List[ShardingOption]]` to solve the following problem:
only works for static_feedback proposers (the path of proposals to check is independent of the performance of the proposals)
Here is the function:
def proposers_to_proposals_list(
proposers_list: List[Proposer], search_space: List[ShardingOption]
) -> List[List[ShardingOption]]:
"""
only works for static_feedback proposers (the path of proposals to check is independent of the performance of the proposals)
"""
proposals_list = []
proposal_cache: Set[Tuple[int, ...]] = set()
for proposer in proposers_list:
proposer.load(search_space=search_space)
for proposer in proposers_list:
proposal = proposer.propose()
while proposal:
proposal_key = tuple(sorted(map(hash, proposal)))
proposer.feedback(partitionable=True)
if proposal_key in proposal_cache:
proposal = proposer.propose()
continue
proposals_list.append(proposal)
proposal_cache.add(proposal_key)
proposal = proposer.propose()
return proposals_list | only works for static_feedback proposers (the path of proposals to check is independent of the performance of the proposals) |
9,050 | import copy
import logging
import math
from typing import Dict, List, Optional, Set, Tuple
from torch import nn
from torchrec.distributed.planner.constants import BIGINT_DTYPE, POOLING_FACTOR
from torchrec.distributed.planner.types import (
ParameterConstraints,
PlannerError,
PlannerErrorType,
Storage,
StorageReservation,
Topology,
)
from torchrec.distributed.planner.utils import sharder_name, storage_repr_in_gb
from torchrec.distributed.types import get_tensor_size_bytes, ModuleSharder
logger: logging.Logger = logging.getLogger(__name__)
def _get_dense_tensor_size(
module: nn.Module,
shardable_modules: Set[nn.Module],
multiplier: float = 6.0,
) -> int:
class Storage:
def __add__(self, other: "Storage") -> "Storage":
def __sub__(self, other: "Storage") -> "Storage":
def __hash__(self) -> int:
def fits_in(self, other: "Storage") -> bool:
class Topology:
def __init__(
self,
world_size: int,
compute_device: str,
hbm_cap: Optional[int] = None,
ddr_cap: Optional[int] = None,
local_world_size: Optional[int] = None,
hbm_mem_bw: float = HBM_MEM_BW,
ddr_mem_bw: float = DDR_MEM_BW,
intra_host_bw: float = INTRA_NODE_BANDWIDTH,
inter_host_bw: float = CROSS_NODE_BANDWIDTH,
bwd_compute_multiplier: float = BWD_COMPUTE_MULTIPLIER,
) -> None:
def compute_device(self) -> str:
def devices(self) -> List[DeviceHardware]:
def world_size(self) -> int:
def local_world_size(self) -> int:
def hbm_mem_bw(self) -> float:
def ddr_mem_bw(self) -> float:
def intra_host_bw(self) -> float:
def inter_host_bw(self) -> float:
def bwd_compute_multiplier(self) -> float:
def __repr__(self) -> str:
def _reserve_dense_storage(
topology: Topology,
module: nn.Module,
shardable_modules: Set[nn.Module],
multiplier: float,
dense_tensor_estimate: Optional[int] = None,
) -> Storage:
dense_tensor_size = _get_dense_tensor_size(module, shardable_modules, multiplier)
if dense_tensor_estimate:
logger.info(
f"We override default dense tensor estimate ({dense_tensor_size} bytes) "
f"with user-provided dense tensor estimate ({dense_tensor_estimate} bytes)."
)
dense_tensor_size = dense_tensor_estimate
dense_tensor_storage = Storage(
hbm=dense_tensor_size if topology.compute_device == "cuda" else 0,
ddr=dense_tensor_size if topology.compute_device in {"cpu", "mtia"} else 0,
)
for device in topology.devices:
device.storage -= dense_tensor_storage
return dense_tensor_storage | null |
9,051 | import copy
import logging
import math
from typing import Dict, List, Optional, Set, Tuple
from torch import nn
from torchrec.distributed.planner.constants import BIGINT_DTYPE, POOLING_FACTOR
from torchrec.distributed.planner.types import (
ParameterConstraints,
PlannerError,
PlannerErrorType,
Storage,
StorageReservation,
Topology,
)
from torchrec.distributed.planner.utils import sharder_name, storage_repr_in_gb
from torchrec.distributed.types import get_tensor_size_bytes, ModuleSharder
class Storage:
"""
Representation of the storage capacities of a hardware used in training.
"""
hbm: int
ddr: int
def __add__(self, other: "Storage") -> "Storage":
return Storage(
hbm=self.hbm + other.hbm,
ddr=self.ddr + other.ddr,
)
def __sub__(self, other: "Storage") -> "Storage":
return Storage(
hbm=self.hbm - other.hbm,
ddr=self.ddr - other.ddr,
)
def __hash__(self) -> int:
return hash((self.hbm, self.ddr))
def fits_in(self, other: "Storage") -> bool:
return self.hbm <= other.hbm and self.ddr <= other.ddr
class Topology:
def __init__(
self,
world_size: int,
compute_device: str,
hbm_cap: Optional[int] = None,
ddr_cap: Optional[int] = None,
local_world_size: Optional[int] = None,
hbm_mem_bw: float = HBM_MEM_BW,
ddr_mem_bw: float = DDR_MEM_BW,
intra_host_bw: float = INTRA_NODE_BANDWIDTH,
inter_host_bw: float = CROSS_NODE_BANDWIDTH,
bwd_compute_multiplier: float = BWD_COMPUTE_MULTIPLIER,
) -> None:
"""
Representation of a network of devices in a cluster.
"""
# validate input
assert compute_device in [
"cpu",
"cuda",
"mtia",
], f"unsupported compute device {compute_device}"
self._compute_device = compute_device
self._world_size = world_size
hbm_per_device = 0
if self._compute_device == "cuda":
hbm_per_device = hbm_cap if hbm_cap else HBM_CAP
ddr_cap = ddr_cap if ddr_cap else DDR_CAP
self._devices: List[DeviceHardware] = []
for rank in range(world_size):
self._devices.append(
DeviceHardware(
rank=rank,
storage=Storage(hbm=hbm_per_device, ddr=ddr_cap),
perf=Perf(fwd_compute=0, fwd_comms=0, bwd_compute=0, bwd_comms=0),
)
)
self._local_world_size: int = (
local_world_size if local_world_size else world_size
)
self._hbm_mem_bw = hbm_mem_bw
self._ddr_mem_bw = ddr_mem_bw
self._intra_host_bw = intra_host_bw
self._inter_host_bw = inter_host_bw
self._bwd_compute_multiplier = bwd_compute_multiplier
def compute_device(self) -> str:
return self._compute_device
def devices(self) -> List[DeviceHardware]:
return self._devices
def world_size(self) -> int:
return self._world_size
def local_world_size(self) -> int:
return self._local_world_size
def hbm_mem_bw(self) -> float:
return self._hbm_mem_bw
def ddr_mem_bw(self) -> float:
return self._ddr_mem_bw
def intra_host_bw(self) -> float:
return self._intra_host_bw
def inter_host_bw(self) -> float:
return self._inter_host_bw
def bwd_compute_multiplier(self) -> float:
return self._bwd_compute_multiplier
def __repr__(self) -> str:
topology_repr: str = f"world_size={self._world_size} \n"
topology_repr += f"compute_device={self._compute_device}\n"
topology_repr += "devices=\n"
for idx, device in enumerate(self._devices):
topology_repr += f"\tdevice {idx} {device}\n"
topology_repr += f"local_world_size={self._local_world_size} \n"
topology_repr += f"intra_host_bw={self._intra_host_bw} \n"
topology_repr += f"inter_host_bw={self._inter_host_bw} \n"
return topology_repr
def _reserve_kjt_storage(
topology: Topology,
batch_size: int,
batch_inputs: List[float],
input_data_type_size: int,
multiplier: int,
) -> Storage:
kjt_size = math.ceil(sum(batch_inputs) * float(input_data_type_size)) * multiplier
kjt_storage = Storage(
hbm=kjt_size if topology.compute_device == "cuda" else 0,
ddr=kjt_size if topology.compute_device in {"cpu", "mtia"} else 0,
)
for device in topology.devices:
device.storage -= kjt_storage
return kjt_storage | null |
9,052 | import copy
import logging
import math
from typing import Dict, List, Optional, Set, Tuple
from torch import nn
from torchrec.distributed.planner.constants import BIGINT_DTYPE, POOLING_FACTOR
from torchrec.distributed.planner.types import (
ParameterConstraints,
PlannerError,
PlannerErrorType,
Storage,
StorageReservation,
Topology,
)
from torchrec.distributed.planner.utils import sharder_name, storage_repr_in_gb
from torchrec.distributed.types import get_tensor_size_bytes, ModuleSharder
class Topology:
def __init__(
self,
world_size: int,
compute_device: str,
hbm_cap: Optional[int] = None,
ddr_cap: Optional[int] = None,
local_world_size: Optional[int] = None,
hbm_mem_bw: float = HBM_MEM_BW,
ddr_mem_bw: float = DDR_MEM_BW,
intra_host_bw: float = INTRA_NODE_BANDWIDTH,
inter_host_bw: float = CROSS_NODE_BANDWIDTH,
bwd_compute_multiplier: float = BWD_COMPUTE_MULTIPLIER,
) -> None:
"""
Representation of a network of devices in a cluster.
"""
# validate input
assert compute_device in [
"cpu",
"cuda",
"mtia",
], f"unsupported compute device {compute_device}"
self._compute_device = compute_device
self._world_size = world_size
hbm_per_device = 0
if self._compute_device == "cuda":
hbm_per_device = hbm_cap if hbm_cap else HBM_CAP
ddr_cap = ddr_cap if ddr_cap else DDR_CAP
self._devices: List[DeviceHardware] = []
for rank in range(world_size):
self._devices.append(
DeviceHardware(
rank=rank,
storage=Storage(hbm=hbm_per_device, ddr=ddr_cap),
perf=Perf(fwd_compute=0, fwd_comms=0, bwd_compute=0, bwd_comms=0),
)
)
self._local_world_size: int = (
local_world_size if local_world_size else world_size
)
self._hbm_mem_bw = hbm_mem_bw
self._ddr_mem_bw = ddr_mem_bw
self._intra_host_bw = intra_host_bw
self._inter_host_bw = inter_host_bw
self._bwd_compute_multiplier = bwd_compute_multiplier
def compute_device(self) -> str:
return self._compute_device
def devices(self) -> List[DeviceHardware]:
return self._devices
def world_size(self) -> int:
return self._world_size
def local_world_size(self) -> int:
return self._local_world_size
def hbm_mem_bw(self) -> float:
return self._hbm_mem_bw
def ddr_mem_bw(self) -> float:
return self._ddr_mem_bw
def intra_host_bw(self) -> float:
return self._intra_host_bw
def inter_host_bw(self) -> float:
return self._inter_host_bw
def bwd_compute_multiplier(self) -> float:
return self._bwd_compute_multiplier
def __repr__(self) -> str:
topology_repr: str = f"world_size={self._world_size} \n"
topology_repr += f"compute_device={self._compute_device}\n"
topology_repr += "devices=\n"
for idx, device in enumerate(self._devices):
topology_repr += f"\tdevice {idx} {device}\n"
topology_repr += f"local_world_size={self._local_world_size} \n"
topology_repr += f"intra_host_bw={self._intra_host_bw} \n"
topology_repr += f"inter_host_bw={self._inter_host_bw} \n"
return topology_repr
def _reserve_storage_percentage(topology: Topology, percent: float) -> None:
for device in topology.devices:
device.storage.hbm = int((1 - percent) * device.storage.hbm) | null |
9,053 | import copy
import logging
import math
from typing import Dict, List, Optional, Set, Tuple
from torch import nn
from torchrec.distributed.planner.constants import BIGINT_DTYPE, POOLING_FACTOR
from torchrec.distributed.planner.types import (
ParameterConstraints,
PlannerError,
PlannerErrorType,
Storage,
StorageReservation,
Topology,
)
from torchrec.distributed.planner.utils import sharder_name, storage_repr_in_gb
from torchrec.distributed.types import get_tensor_size_bytes, ModuleSharder
POOLING_FACTOR: float = 1.0
class ParameterConstraints:
"""
Stores user provided constraints around the sharding plan.
If provided, `pooling_factors`, `num_poolings`, and `batch_sizes` must match in
length, as per sample.
"""
sharding_types: Optional[List[str]] = None
compute_kernels: Optional[List[str]] = None
min_partition: Optional[int] = None # CW sharding, min CW dim to shard
pooling_factors: List[float] = field(
default_factory=lambda: [POOLING_FACTOR]
) # average number of embedding lookups required per sample
num_poolings: Optional[List[float]] = None # number of poolings per sample in batch
batch_sizes: Optional[List[int]] = None # batch size per input feature
is_weighted: bool = False
cache_params: Optional[CacheParams] = None
enforce_hbm: Optional[bool] = None
stochastic_rounding: Optional[bool] = None
bounds_check_mode: Optional[BoundsCheckMode] = None
feature_names: Optional[List[str]] = None
def sharder_name(t: Type[Any]) -> str:
return t.__module__ + "." + t.__name__
class ModuleSharder(abc.ABC, Generic[M]):
"""
`ModuleSharder` is per each module, which supports sharding,
e.g. `EmbeddingBagCollection`.
Args::
qcomm_codecs_registry (Optional[Dict[str, QuantizedCommCodecs]]) : Mapping of CommOp name to QuantizedCommCodecs
"""
def __init__(
self, qcomm_codecs_registry: Optional[Dict[str, QuantizedCommCodecs]] = None
) -> None:
torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}")
self._qcomm_codecs_registry = qcomm_codecs_registry
# pyre-ignore [3]
def shard(
self,
module: M,
params: EmbeddingModuleShardingPlan,
env: ShardingEnv,
device: Optional[torch.device] = None,
) -> ShardedModule[Any, Any, Any, Any]:
"""
Does the actual sharding. It will allocate parameters on the requested locations
as specified by corresponding ParameterSharding.
Default implementation is data-parallel replication.
Args:
module (M): module to shard.
params (EmbeddingModuleShardingPlan): dict of fully qualified parameter names
(module path + parameter name, '.'-separated) to its sharding spec.
env (ShardingEnv): sharding environment that has the process group.
device (torch.device): compute device.
Returns:
ShardedModule[Any, Any, Any]: sharded module implementation.
"""
...
def module_type(self) -> Type[M]: ...
def qcomm_codecs_registry(self) -> Optional[Dict[str, QuantizedCommCodecs]]:
return self._qcomm_codecs_registry
def shardable_parameters(self, module: M) -> Dict[str, nn.Parameter]:
"""
List of parameters that can be sharded.
"""
return dict(module.named_parameters())
def sharding_types(self, compute_device_type: str) -> List[str]:
"""
List of supported sharding types. See `ShardingType` for well-known examples.
"""
return [ShardingType.DATA_PARALLEL.value]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
"""
List of supported compute kernels for a given sharding type and compute device.
"""
return [ComputeKernel.DEFAULT.value]
def storage_usage(
self, tensor: torch.Tensor, compute_device_type: str, compute_kernel: str
) -> Dict[str, int]:
"""
List of system resources and corresponding usage given a compute device and
compute kernel.
"""
assert compute_device_type in {"cuda", "cpu", "mtia"}
storage_map = {
"cuda": ParameterStorage.HBM,
"cpu": ParameterStorage.DDR,
# TODO: Update it later. Setting for MTIA is same as CPU's for now.
"mtia": ParameterStorage.DDR,
}
return {storage_map[compute_device_type].value: get_tensor_size_bytes(tensor)}
def _get_batch_inputs_and_shardable_parameters(
module: nn.Module,
sharders: List[ModuleSharder[nn.Module]],
batch_size: int,
constraints: Optional[Dict[str, ParameterConstraints]] = None,
) -> Tuple[List[float], Set[nn.Module]]:
sharder_map: Dict[str, ModuleSharder[nn.Module]] = {
sharder_name(sharder.module_type): sharder for sharder in sharders
}
input_lengths: List[float] = []
batch_sizes: List[int] = []
shardable_modules: Set[nn.Module] = set()
def populate_shardable_modules(
module: nn.Module,
) -> None:
sharder_key = sharder_name(type(module))
sharder = sharder_map.get(sharder_key)
if not sharder:
for _child_name, child in module.named_children():
populate_shardable_modules(child)
else:
names = sharder.shardable_parameters(module).keys()
shardable_modules.add(module)
for name in names:
pooling_factors = (
constraints[name].pooling_factors
if constraints and constraints.get(name)
else [POOLING_FACTOR]
)
input_lengths.extend(pooling_factors)
batch_sizes.extend(
constraints[name].batch_sizes # pyre-ignore[6]
if constraints
and constraints.get(name)
and constraints[name].batch_sizes
else [batch_size] * len(pooling_factors)
)
populate_shardable_modules(module)
batch_inputs: List[float] = [
input_length * batch_size
for input_length, batch_size in zip(input_lengths, batch_sizes)
]
return batch_inputs, shardable_modules | null |
9,054 | import math
import operator
from functools import reduce
from typing import Any, cast, Dict, Iterable, List, Optional, Tuple, Type, Union
import torch
from torchrec.distributed.planner.types import Perf, ShardingOption, Storage
from torchrec.distributed.types import ShardingType
def bytes_to_mb(num_bytes: Union[float, int]) -> float:
return float(num_bytes / (1024 * 1024)) | null |
9,055 | import math
import operator
from functools import reduce
from typing import Any, cast, Dict, Iterable, List, Optional, Tuple, Type, Union
import torch
from torchrec.distributed.planner.types import Perf, ShardingOption, Storage
from torchrec.distributed.types import ShardingType
def gb_to_bytes(gb: float) -> int:
return int(gb * 1024 * 1024 * 1024) | null |
9,056 | import math
import operator
from functools import reduce
from typing import Any, cast, Dict, Iterable, List, Optional, Tuple, Type, Union
import torch
from torchrec.distributed.planner.types import Perf, ShardingOption, Storage
from torchrec.distributed.types import ShardingType
def bytes_to_gb(num_bytes: int) -> float:
return float(num_bytes / (1024 * 1024 * 1024))
class Storage:
"""
Representation of the storage capacities of a hardware used in training.
"""
hbm: int
ddr: int
def __add__(self, other: "Storage") -> "Storage":
return Storage(
hbm=self.hbm + other.hbm,
ddr=self.ddr + other.ddr,
)
def __sub__(self, other: "Storage") -> "Storage":
return Storage(
hbm=self.hbm - other.hbm,
ddr=self.ddr - other.ddr,
)
def __hash__(self) -> int:
return hash((self.hbm, self.ddr))
def fits_in(self, other: "Storage") -> bool:
return self.hbm <= other.hbm and self.ddr <= other.ddr
def storage_repr_in_gb(storage: Optional[Storage]) -> str:
if storage is None:
return ""
return (
f"Storage(hbm = {round(bytes_to_gb(storage.hbm), 3)} GB, "
f"ddr = {round(bytes_to_gb(storage.ddr), 3)} GB)"
) | null |
9,057 | import math
import operator
from functools import reduce
from typing import Any, cast, Dict, Iterable, List, Optional, Tuple, Type, Union
import torch
from torchrec.distributed.planner.types import Perf, ShardingOption, Storage
from torchrec.distributed.types import ShardingType
class ShardingOption:
"""
One way of sharding an embedding table.
"""
def __init__(
self,
name: str,
tensor: torch.Tensor,
module: Tuple[str, nn.Module],
input_lengths: List[float],
batch_size: int,
sharding_type: str,
partition_by: str,
compute_kernel: str,
shards: List[Shard],
cache_params: Optional[CacheParams] = None,
enforce_hbm: Optional[bool] = None,
stochastic_rounding: Optional[bool] = None,
bounds_check_mode: Optional[BoundsCheckMode] = None,
dependency: Optional[str] = None,
is_pooled: Optional[bool] = None,
feature_names: Optional[List[str]] = None,
) -> None:
self.name = name
self._tensor = tensor
self._module = module
self.input_lengths = input_lengths
self.batch_size = batch_size
self.sharding_type = sharding_type
self.partition_by = partition_by
self.compute_kernel = compute_kernel
# relevant to planner output, must be populated if sharding option
# part of final solution
self.shards = shards
self.cache_params = cache_params
self.enforce_hbm = enforce_hbm
self.stochastic_rounding = stochastic_rounding
self.bounds_check_mode = bounds_check_mode
self.dependency = dependency
self._is_pooled = is_pooled
self.is_weighted: Optional[bool] = None
self.feature_names: Optional[List[str]] = feature_names
def tensor(self) -> torch.Tensor:
return self._tensor
def module(self) -> Tuple[str, nn.Module]:
return self._module
def fqn(self) -> str:
return self.module[0] + "." + self.name
def cache_load_factor(self) -> Optional[float]:
if self.cache_params is not None:
return self.cache_params.load_factor
return None
def path(self) -> str:
return self.module[0]
def num_shards(self) -> int:
return len(self.shards)
def num_inputs(self) -> int:
return len(self.input_lengths)
def total_storage(self) -> Storage:
storage: Storage = Storage(hbm=0, ddr=0)
for shard in self.shards:
storage += cast(Storage, shard.storage)
return storage
def total_perf(self) -> float:
perf: float = 0
for shard in self.shards:
# pyre-ignore: Undefined attribute [16]
perf += shard.perf.total
return perf
def is_pooled(self) -> bool:
if self._is_pooled is None:
self._is_pooled = ShardingOption.module_pooled(self.module[1], self.name)
return self._is_pooled
def module_pooled(module: nn.Module, sharding_option_name: str) -> bool:
"""Determine if module pools output (e.g. EmbeddingBag) or uses unpooled/sequential output."""
if isinstance(module, EmbeddingCollectionInterface) or isinstance(
module, ManagedCollisionEmbeddingCollection
):
return False
for submodule in module.modules():
if isinstance(submodule, EmbeddingCollectionInterface) or isinstance(
submodule, ManagedCollisionEmbeddingCollection
):
for name, _ in submodule.named_parameters():
if sharding_option_name in name:
return False
return True
def __hash__(self) -> int:
return hash(
(
self.fqn,
self.sharding_type,
self.compute_kernel,
tuple(self.shards),
self.cache_params,
)
)
def __deepcopy__(
self, memo: Optional[Dict[int, "ShardingOption"]]
) -> "ShardingOption":
cls = self.__class__
result = cls.__new__(cls)
for k, v in self.__dict__.items():
if k in ["_tensor", "_module"]:
setattr(result, k, v)
else:
setattr(result, k, deepcopy(v, memo))
return result
def reset_shard_rank(proposal: List[ShardingOption]) -> None:
for sharding_option in proposal:
for shard in sharding_option.shards:
shard.rank = None | null |
9,058 | import math
import operator
from functools import reduce
from typing import Any, cast, Dict, Iterable, List, Optional, Tuple, Type, Union
import torch
from torchrec.distributed.planner.types import Perf, ShardingOption, Storage
from torchrec.distributed.types import ShardingType
class Perf:
"""
Representation of the breakdown of the perf estimate a single shard of an
embedding table.
"""
fwd_compute: float
fwd_comms: float
bwd_compute: float
bwd_comms: float
prefetch_compute: float = 0.0
def total(self) -> float:
# When using embedding offload, there is a prefetch compute component. This
# prefetch can overlap with fwd_compute + fwd_comm and dense fwd (some of it
# overlaps with fwd_compute) and dense bwd. (fwd_compute and bwd_compute are
# embedding fwd/bwd, nothing to do with dense). Only when prefetch is longer
# than fwd_compute + dense_fwd + dense_bwd it will block bwd_compute. However,
# we don't have an effective way to estimate dense fwd/bwd at this point, so our
# cost model is too simplistic. Instead prefetch is always considered blocking.
#
# Also note, measuring prefetch blocking can only be done after partitioning,
# here are only have the per shard estimates.
#
# However adding a per-shard prefetch component to the cost model does have the
# benefit that 1) it enables the ScaleupProposer to explore the trade off
# between increasing cache sizes vs more difficult bin-packing constraints. 2)
# it helps balance the prefetch compute across the ranks.
return (
self.fwd_compute
+ self.bwd_compute
+ self.fwd_comms
+ self.bwd_comms
+ self.prefetch_compute
)
def __add__(self, other: "Perf") -> "Perf":
return Perf(
fwd_compute=self.fwd_compute + other.fwd_compute,
fwd_comms=self.fwd_comms + other.fwd_comms,
bwd_compute=self.bwd_compute + other.bwd_compute,
bwd_comms=self.bwd_comms + other.bwd_comms,
prefetch_compute=self.prefetch_compute + other.prefetch_compute,
)
def __hash__(self) -> int:
return hash(
(
self.fwd_compute,
self.fwd_comms,
self.bwd_compute,
self.bwd_comms,
self.prefetch_compute,
)
)
class Storage:
"""
Representation of the storage capacities of a hardware used in training.
"""
hbm: int
ddr: int
def __add__(self, other: "Storage") -> "Storage":
return Storage(
hbm=self.hbm + other.hbm,
ddr=self.ddr + other.ddr,
)
def __sub__(self, other: "Storage") -> "Storage":
return Storage(
hbm=self.hbm - other.hbm,
ddr=self.ddr - other.ddr,
)
def __hash__(self) -> int:
return hash((self.hbm, self.ddr))
def fits_in(self, other: "Storage") -> bool:
return self.hbm <= other.hbm and self.ddr <= other.ddr
class ShardingOption:
"""
One way of sharding an embedding table.
"""
def __init__(
self,
name: str,
tensor: torch.Tensor,
module: Tuple[str, nn.Module],
input_lengths: List[float],
batch_size: int,
sharding_type: str,
partition_by: str,
compute_kernel: str,
shards: List[Shard],
cache_params: Optional[CacheParams] = None,
enforce_hbm: Optional[bool] = None,
stochastic_rounding: Optional[bool] = None,
bounds_check_mode: Optional[BoundsCheckMode] = None,
dependency: Optional[str] = None,
is_pooled: Optional[bool] = None,
feature_names: Optional[List[str]] = None,
) -> None:
self.name = name
self._tensor = tensor
self._module = module
self.input_lengths = input_lengths
self.batch_size = batch_size
self.sharding_type = sharding_type
self.partition_by = partition_by
self.compute_kernel = compute_kernel
# relevant to planner output, must be populated if sharding option
# part of final solution
self.shards = shards
self.cache_params = cache_params
self.enforce_hbm = enforce_hbm
self.stochastic_rounding = stochastic_rounding
self.bounds_check_mode = bounds_check_mode
self.dependency = dependency
self._is_pooled = is_pooled
self.is_weighted: Optional[bool] = None
self.feature_names: Optional[List[str]] = feature_names
def tensor(self) -> torch.Tensor:
return self._tensor
def module(self) -> Tuple[str, nn.Module]:
return self._module
def fqn(self) -> str:
return self.module[0] + "." + self.name
def cache_load_factor(self) -> Optional[float]:
if self.cache_params is not None:
return self.cache_params.load_factor
return None
def path(self) -> str:
return self.module[0]
def num_shards(self) -> int:
return len(self.shards)
def num_inputs(self) -> int:
return len(self.input_lengths)
def total_storage(self) -> Storage:
storage: Storage = Storage(hbm=0, ddr=0)
for shard in self.shards:
storage += cast(Storage, shard.storage)
return storage
def total_perf(self) -> float:
perf: float = 0
for shard in self.shards:
# pyre-ignore: Undefined attribute [16]
perf += shard.perf.total
return perf
def is_pooled(self) -> bool:
if self._is_pooled is None:
self._is_pooled = ShardingOption.module_pooled(self.module[1], self.name)
return self._is_pooled
def module_pooled(module: nn.Module, sharding_option_name: str) -> bool:
"""Determine if module pools output (e.g. EmbeddingBag) or uses unpooled/sequential output."""
if isinstance(module, EmbeddingCollectionInterface) or isinstance(
module, ManagedCollisionEmbeddingCollection
):
return False
for submodule in module.modules():
if isinstance(submodule, EmbeddingCollectionInterface) or isinstance(
submodule, ManagedCollisionEmbeddingCollection
):
for name, _ in submodule.named_parameters():
if sharding_option_name in name:
return False
return True
def __hash__(self) -> int:
return hash(
(
self.fqn,
self.sharding_type,
self.compute_kernel,
tuple(self.shards),
self.cache_params,
)
)
def __deepcopy__(
self, memo: Optional[Dict[int, "ShardingOption"]]
) -> "ShardingOption":
cls = self.__class__
result = cls.__new__(cls)
for k, v in self.__dict__.items():
if k in ["_tensor", "_module"]:
setattr(result, k, v)
else:
setattr(result, k, deepcopy(v, memo))
return result
class ShardingType(Enum):
"""
Well-known sharding types, used by inter-module optimizations.
"""
# Replicated on all ranks
DATA_PARALLEL = "data_parallel"
# Placed on a single rank
TABLE_WISE = "table_wise"
# Placed on multiple ranks as different sharded tables
COLUMN_WISE = "column_wise"
# Range-split on the first dimension across all ranks
ROW_WISE = "row_wise"
# Row-wise on the same node and table-wise across nodes
# Useful when having multiple ranks per node
# and comms within a single node are more efficient than across nodes.
TABLE_ROW_WISE = "table_row_wise"
# Column-wise on the same node and table-wise across nodes
TABLE_COLUMN_WISE = "table_column_wise"
The provided code snippet includes necessary dependencies for implementing the `_find_imbalance_tables` function. Write a Python function `def _find_imbalance_tables( sharding_options: List[ShardingOption], target_imbalance: str = "perf" ) -> List[ShardingOption]` to solve the following problem:
Find the tables that are causing the imbalance, and return their names.
Here is the function:
def _find_imbalance_tables(
sharding_options: List[ShardingOption], target_imbalance: str = "perf"
) -> List[ShardingOption]:
"""
Find the tables that are causing the imbalance, and return their names.
"""
rank_to_target_stats: Dict[int, float] = {}
# populate rank_to_target_stats
for sharding_option in sharding_options:
for shard in sharding_option.shards:
rank = cast(int, shard.rank)
if rank not in rank_to_target_stats:
rank_to_target_stats[rank] = 0
if target_imbalance == "perf":
rank_to_target_stats[rank] += cast(Perf, shard.perf).total
elif target_imbalance == "hbm":
rank_to_target_stats[rank] += cast(Storage, shard.storage).hbm
else:
raise ValueError(f"Unknown target imbalance {target_imbalance}")
if len(rank_to_target_stats.values()) <= 1:
# world_size is 1
return []
max_value = max(rank_to_target_stats.values())
max_value_ranks = {
rank for rank, value in rank_to_target_stats.items() if value == max_value
}
# find tables
tables_in_max_value_ranks: List[ShardingOption] = []
for sharding_option in sharding_options:
sharding_option_ranks = [shard.rank for shard in sharding_option.shards]
if set(
sharding_option_ranks
) >= max_value_ranks and sharding_option.sharding_type not in [
ShardingType.DATA_PARALLEL.value,
ShardingType.ROW_WISE.value,
]:
tables_in_max_value_ranks.append(sharding_option)
if target_imbalance == "perf":
# sort tables by total perf from largest to smallest
tables_in_max_value_ranks.sort(
key=lambda sharding_option: sharding_option.shards[0].perf.total,
reverse=True,
)
elif target_imbalance == "hbm":
# sort tables by hbm from largest to smallest
tables_in_max_value_ranks.sort(
key=lambda sharding_option: sharding_option.shards[0].storage.hbm,
reverse=True,
)
else:
raise ValueError(f"Unknown target imbalance {target_imbalance}")
return tables_in_max_value_ranks | Find the tables that are causing the imbalance, and return their names. |
9,059 | import copy
from functools import reduce
from time import perf_counter
from typing import cast, Dict, List, Optional, Tuple, Union
import torch
import torch.distributed as dist
from torch import nn
from torchrec.distributed.collective_utils import invoke_on_rank_and_broadcast_result
from torchrec.distributed.comm import get_local_size
from torchrec.distributed.planner.constants import BATCH_SIZE, MAX_SIZE
from torchrec.distributed.planner.enumerators import EmbeddingEnumerator
from torchrec.distributed.planner.partitioners import GreedyPerfPartitioner
from torchrec.distributed.planner.perf_models import NoopPerfModel
from torchrec.distributed.planner.proposers import (
GreedyProposer,
GridSearchProposer,
UniformProposer,
)
from torchrec.distributed.planner.stats import EmbeddingStats
from torchrec.distributed.planner.storage_reservations import (
HeuristicalStorageReservation,
)
from torchrec.distributed.planner.types import (
Enumerator,
ParameterConstraints,
Partitioner,
PerfModel,
PlannerError,
PlannerErrorType,
Proposer,
ShardingOption,
Stats,
Storage,
StorageReservation,
Topology,
)
from torchrec.distributed.planner.utils import (
bytes_to_gb,
reset_shard_rank,
storage_repr_in_gb,
)
from torchrec.distributed.sharding_plan import get_default_sharders, placement
from torchrec.distributed.types import (
EmbeddingModuleShardingPlan,
EnumerableShardingSpec,
ModuleSharder,
ParameterSharding,
ShardingPlan,
ShardingPlanner,
ShardingType,
ShardMetadata,
)
class Topology:
def __init__(
self,
world_size: int,
compute_device: str,
hbm_cap: Optional[int] = None,
ddr_cap: Optional[int] = None,
local_world_size: Optional[int] = None,
hbm_mem_bw: float = HBM_MEM_BW,
ddr_mem_bw: float = DDR_MEM_BW,
intra_host_bw: float = INTRA_NODE_BANDWIDTH,
inter_host_bw: float = CROSS_NODE_BANDWIDTH,
bwd_compute_multiplier: float = BWD_COMPUTE_MULTIPLIER,
) -> None:
"""
Representation of a network of devices in a cluster.
"""
# validate input
assert compute_device in [
"cpu",
"cuda",
"mtia",
], f"unsupported compute device {compute_device}"
self._compute_device = compute_device
self._world_size = world_size
hbm_per_device = 0
if self._compute_device == "cuda":
hbm_per_device = hbm_cap if hbm_cap else HBM_CAP
ddr_cap = ddr_cap if ddr_cap else DDR_CAP
self._devices: List[DeviceHardware] = []
for rank in range(world_size):
self._devices.append(
DeviceHardware(
rank=rank,
storage=Storage(hbm=hbm_per_device, ddr=ddr_cap),
perf=Perf(fwd_compute=0, fwd_comms=0, bwd_compute=0, bwd_comms=0),
)
)
self._local_world_size: int = (
local_world_size if local_world_size else world_size
)
self._hbm_mem_bw = hbm_mem_bw
self._ddr_mem_bw = ddr_mem_bw
self._intra_host_bw = intra_host_bw
self._inter_host_bw = inter_host_bw
self._bwd_compute_multiplier = bwd_compute_multiplier
def compute_device(self) -> str:
return self._compute_device
def devices(self) -> List[DeviceHardware]:
return self._devices
def world_size(self) -> int:
return self._world_size
def local_world_size(self) -> int:
return self._local_world_size
def hbm_mem_bw(self) -> float:
return self._hbm_mem_bw
def ddr_mem_bw(self) -> float:
return self._ddr_mem_bw
def intra_host_bw(self) -> float:
return self._intra_host_bw
def inter_host_bw(self) -> float:
return self._inter_host_bw
def bwd_compute_multiplier(self) -> float:
return self._bwd_compute_multiplier
def __repr__(self) -> str:
topology_repr: str = f"world_size={self._world_size} \n"
topology_repr += f"compute_device={self._compute_device}\n"
topology_repr += "devices=\n"
for idx, device in enumerate(self._devices):
topology_repr += f"\tdevice {idx} {device}\n"
topology_repr += f"local_world_size={self._local_world_size} \n"
topology_repr += f"intra_host_bw={self._intra_host_bw} \n"
topology_repr += f"inter_host_bw={self._inter_host_bw} \n"
return topology_repr
class ShardingOption:
"""
One way of sharding an embedding table.
"""
def __init__(
self,
name: str,
tensor: torch.Tensor,
module: Tuple[str, nn.Module],
input_lengths: List[float],
batch_size: int,
sharding_type: str,
partition_by: str,
compute_kernel: str,
shards: List[Shard],
cache_params: Optional[CacheParams] = None,
enforce_hbm: Optional[bool] = None,
stochastic_rounding: Optional[bool] = None,
bounds_check_mode: Optional[BoundsCheckMode] = None,
dependency: Optional[str] = None,
is_pooled: Optional[bool] = None,
feature_names: Optional[List[str]] = None,
) -> None:
self.name = name
self._tensor = tensor
self._module = module
self.input_lengths = input_lengths
self.batch_size = batch_size
self.sharding_type = sharding_type
self.partition_by = partition_by
self.compute_kernel = compute_kernel
# relevant to planner output, must be populated if sharding option
# part of final solution
self.shards = shards
self.cache_params = cache_params
self.enforce_hbm = enforce_hbm
self.stochastic_rounding = stochastic_rounding
self.bounds_check_mode = bounds_check_mode
self.dependency = dependency
self._is_pooled = is_pooled
self.is_weighted: Optional[bool] = None
self.feature_names: Optional[List[str]] = feature_names
def tensor(self) -> torch.Tensor:
return self._tensor
def module(self) -> Tuple[str, nn.Module]:
return self._module
def fqn(self) -> str:
return self.module[0] + "." + self.name
def cache_load_factor(self) -> Optional[float]:
if self.cache_params is not None:
return self.cache_params.load_factor
return None
def path(self) -> str:
return self.module[0]
def num_shards(self) -> int:
return len(self.shards)
def num_inputs(self) -> int:
return len(self.input_lengths)
def total_storage(self) -> Storage:
storage: Storage = Storage(hbm=0, ddr=0)
for shard in self.shards:
storage += cast(Storage, shard.storage)
return storage
def total_perf(self) -> float:
perf: float = 0
for shard in self.shards:
# pyre-ignore: Undefined attribute [16]
perf += shard.perf.total
return perf
def is_pooled(self) -> bool:
if self._is_pooled is None:
self._is_pooled = ShardingOption.module_pooled(self.module[1], self.name)
return self._is_pooled
def module_pooled(module: nn.Module, sharding_option_name: str) -> bool:
"""Determine if module pools output (e.g. EmbeddingBag) or uses unpooled/sequential output."""
if isinstance(module, EmbeddingCollectionInterface) or isinstance(
module, ManagedCollisionEmbeddingCollection
):
return False
for submodule in module.modules():
if isinstance(submodule, EmbeddingCollectionInterface) or isinstance(
submodule, ManagedCollisionEmbeddingCollection
):
for name, _ in submodule.named_parameters():
if sharding_option_name in name:
return False
return True
def __hash__(self) -> int:
return hash(
(
self.fqn,
self.sharding_type,
self.compute_kernel,
tuple(self.shards),
self.cache_params,
)
)
def __deepcopy__(
self, memo: Optional[Dict[int, "ShardingOption"]]
) -> "ShardingOption":
cls = self.__class__
result = cls.__new__(cls)
for k, v in self.__dict__.items():
if k in ["_tensor", "_module"]:
setattr(result, k, v)
else:
setattr(result, k, deepcopy(v, memo))
return result
def placement(
compute_device: str,
rank: int,
local_size: int,
) -> str:
param_device = compute_device
if compute_device in {"cuda", "mtia"}:
param_device = torch.device(compute_device, rank % local_size)
return f"rank:{rank}/{param_device}"
class ShardingType(Enum):
"""
Well-known sharding types, used by inter-module optimizations.
"""
# Replicated on all ranks
DATA_PARALLEL = "data_parallel"
# Placed on a single rank
TABLE_WISE = "table_wise"
# Placed on multiple ranks as different sharded tables
COLUMN_WISE = "column_wise"
# Range-split on the first dimension across all ranks
ROW_WISE = "row_wise"
# Row-wise on the same node and table-wise across nodes
# Useful when having multiple ranks per node
# and comms within a single node are more efficient than across nodes.
TABLE_ROW_WISE = "table_row_wise"
# Column-wise on the same node and table-wise across nodes
TABLE_COLUMN_WISE = "table_column_wise"
class ParameterSharding:
"""
Describes the sharding of the parameter.
sharding_type (str): how this parameter is sharded. See ShardingType for well-known
types.
compute_kernel (str): compute kernel to be used by this parameter.
ranks (Optional[List[int]]): rank of each shard.
sharding_spec (Optional[ShardingSpec]): list of ShardMetadata for each shard.
cache_params (Optional[CacheParams]): cache params for embedding lookup.
enforce_hbm (Optional[bool]): whether to use HBM.
stochastic_rounding (Optional[bool]): whether to use stochastic rounding.
bounds_check_mode (Optional[BoundsCheckMode]): bounds check mode.
NOTE:
ShardingType.TABLE_WISE - rank where this embedding is placed
ShardingType.COLUMN_WISE - rank where the embedding shards are placed, seen as
individual tables
ShardingType.TABLE_ROW_WISE - first rank when this embedding is placed
ShardingType.ROW_WISE, ShardingType.DATA_PARALLEL - unused
"""
sharding_type: str
compute_kernel: str
ranks: Optional[List[int]] = None
sharding_spec: Optional[ShardingSpec] = None
cache_params: Optional[CacheParams] = None
enforce_hbm: Optional[bool] = None
stochastic_rounding: Optional[bool] = None
bounds_check_mode: Optional[BoundsCheckMode] = None
class EmbeddingModuleShardingPlan(ModuleShardingPlan, Dict[str, ParameterSharding]):
"""
Map of ParameterSharding per parameter (usually a table). This describes the sharding plan for a torchrec module (e.g. `EmbeddingBagCollection`)
"""
def __str__(self) -> str:
out = ""
param_table = []
shard_table = []
for param_name, param_sharding in self.items():
param_table.append(
[
param_name,
param_sharding.sharding_type,
param_sharding.compute_kernel,
param_sharding.ranks,
]
)
if isinstance(param_sharding.sharding_spec, EnumerableShardingSpec):
shards = param_sharding.sharding_spec.shards
if shards is not None:
for shard in shards:
shard_table.append(
[
param_name,
shard.shard_offsets,
shard.shard_sizes,
shard.placement,
]
)
out += "\n\n" + _tabulate(
param_table, ["param", "sharding type", "compute kernel", "ranks"]
)
out += "\n\n" + _tabulate(
shard_table, ["param", "shard offsets", "shard sizes", "placement"]
)
return out
class ShardingPlan:
"""
Representation of sharding plan. This uses the FQN of the larger wrapped model (i.e the model that is wrapped using `DistributedModelParallel`)
EmbeddingModuleShardingPlan should be used when TorchRec composability is desired.
Attributes:
plan (Dict[str, EmbeddingModuleShardingPlan]): dict keyed by module path of
dict of parameter sharding specs keyed by parameter name.
"""
plan: Dict[str, ModuleShardingPlan]
def get_plan_for_module(self, module_path: str) -> Optional[ModuleShardingPlan]:
"""
Args:
module_path (str):
Returns:
Optional[ModuleShardingPlan]: dict of parameter sharding specs keyed by parameter name. None if sharding specs do not exist for given module_path.
"""
return self.plan.get(module_path, None)
def __str__(self) -> str:
out = ""
for i, (module_path, module_plan) in enumerate(self.plan.items()):
if i > 0:
out += "\n\n"
out += "module: " + module_path
out += str(module_plan)
return out
def _to_sharding_plan(
sharding_options: List[ShardingOption],
topology: Topology,
) -> ShardingPlan:
compute_device = topology.compute_device
local_size = topology.local_world_size
plan = {}
for sharding_option in sharding_options:
shards = sharding_option.shards
sharding_type = sharding_option.sharding_type
module_plan = plan.get(sharding_option.path, EmbeddingModuleShardingPlan())
module_plan[sharding_option.name] = ParameterSharding(
sharding_spec=(
None
if sharding_type == ShardingType.DATA_PARALLEL.value
else EnumerableShardingSpec(
[
ShardMetadata(
shard_sizes=shard.size,
shard_offsets=shard.offset,
placement=placement(
compute_device, cast(int, shard.rank), local_size
),
)
for shard in shards
]
)
),
sharding_type=sharding_type,
compute_kernel=sharding_option.compute_kernel,
ranks=[cast(int, shard.rank) for shard in shards],
cache_params=sharding_option.cache_params,
enforce_hbm=sharding_option.enforce_hbm,
stochastic_rounding=sharding_option.stochastic_rounding,
bounds_check_mode=sharding_option.bounds_check_mode,
)
plan[sharding_option.path] = module_plan
return ShardingPlan(plan) | null |
9,060 | import logging
from collections import defaultdict
from typing import Any, cast, Dict, List, Optional, Tuple, Union
from torch import nn
from torchrec.distributed.planner.constants import BIGINT_DTYPE, NUM_POOLINGS
from torchrec.distributed.planner.shard_estimators import _calculate_shard_io_sizes
from torchrec.distributed.planner.storage_reservations import (
FixedPercentageStorageReservation,
HeuristicalStorageReservation,
InferenceStorageReservation,
)
from torchrec.distributed.planner.types import (
ParameterConstraints,
Perf,
ShardingOption,
Stats,
Storage,
StorageReservation,
Topology,
)
from torchrec.distributed.planner.utils import (
_find_imbalance_tables,
bytes_to_gb,
bytes_to_mb,
sharder_name as get_sharder_name,
)
from torchrec.distributed.types import (
ModuleSharder,
ParameterSharding,
ShardingPlan,
ShardingType,
)
def _collapse_consecutive_ranks(ranks: List[int]) -> List[str]:
if len(ranks) > 1 and ranks == list(range(min(ranks), max(ranks) + 1)):
return [f"{min(ranks)}-{max(ranks)}"]
else:
return [str(rank) for rank in ranks]
def _generate_max_text(perfs: List[float]) -> str:
max_perf = max(perfs)
max_perf_indices = [i for i in range(len(perfs)) if perfs[i] == max_perf]
rank_text = "ranks" if len(max_perf_indices) > 1 else "rank"
max_perf_indices = _collapse_consecutive_ranks(max_perf_indices)
max_perf_ranks = f"{rank_text} {','.join(max_perf_indices)}"
return f"{round(max_perf, 3)} ms on {max_perf_ranks}" | null |
9,061 | import logging
from collections import defaultdict
from typing import Any, cast, Dict, List, Optional, Tuple, Union
from torch import nn
from torchrec.distributed.planner.constants import BIGINT_DTYPE, NUM_POOLINGS
from torchrec.distributed.planner.shard_estimators import _calculate_shard_io_sizes
from torchrec.distributed.planner.storage_reservations import (
FixedPercentageStorageReservation,
HeuristicalStorageReservation,
InferenceStorageReservation,
)
from torchrec.distributed.planner.types import (
ParameterConstraints,
Perf,
ShardingOption,
Stats,
Storage,
StorageReservation,
Topology,
)
from torchrec.distributed.planner.utils import (
_find_imbalance_tables,
bytes_to_gb,
bytes_to_mb,
sharder_name as get_sharder_name,
)
from torchrec.distributed.types import (
ModuleSharder,
ParameterSharding,
ShardingPlan,
ShardingType,
)
class ShardingType(Enum):
def _get_sharding_type_abbr(sharding_type: str) -> str:
if sharding_type == ShardingType.DATA_PARALLEL.value:
return "DP"
elif sharding_type == ShardingType.TABLE_WISE.value:
return "TW"
elif sharding_type == ShardingType.COLUMN_WISE.value:
return "CW"
elif sharding_type == ShardingType.ROW_WISE.value:
return "RW"
elif sharding_type == ShardingType.TABLE_ROW_WISE.value:
return "TWRW"
elif sharding_type == ShardingType.TABLE_COLUMN_WISE.value:
return "TWCW"
else:
raise ValueError(
f"Unrecognized or unsupported sharding type provided: {sharding_type}"
) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.